Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP1:Update
xen.7653
5587d7b7-evtchn-use-a-per-event-channel-lock-fo...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File 5587d7b7-evtchn-use-a-per-event-channel-lock-for-sending-events.patch of Package xen.7653
# Commit de6acb78bf0e137cbe5b72cee4a35ca018d759cc # Date 2015-06-22 11:39:03 +0200 # Author David Vrabel <david.vrabel@citrix.com> # Committer Jan Beulich <jbeulich@suse.com> evtchn: use a per-event channel lock for sending events When sending an event, use a new per-event channel lock to safely validate the event channel state. This new lock must be held when changing event channel state. Note that the event channel lock must also be held when changing state from ECS_FREE or it will race with a concurrent get_free_port() call. To avoid having to take the remote event channel locks when sending to an interdomain event channel, the local and remote channel locks are both held when binding or closing an interdomain event channel. This significantly increases the number of events that can be sent from multiple VCPUs. But struct evtchn increases in size, reducing the number that fit into a single page to 64 (instead of 128). Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -141,6 +141,7 @@ static struct evtchn *alloc_evtchn_bucke return NULL; } chn[i].port = port + i; + spin_lock_init(&chn[i].lock); } return chn; } @@ -232,11 +233,15 @@ static long evtchn_alloc_unbound(evtchn_ if ( rc ) goto out; + spin_lock(&chn->lock); + chn->state = ECS_UNBOUND; if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF ) chn->u.unbound.remote_domid = current->domain->domain_id; evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + alloc->port = port; out: @@ -247,6 +252,28 @@ static long evtchn_alloc_unbound(evtchn_ } +static void double_evtchn_lock(struct evtchn *lchn, struct evtchn *rchn) +{ + if ( lchn < rchn ) + { + spin_lock(&lchn->lock); + spin_lock(&rchn->lock); + } + else + { + if ( lchn != rchn ) + spin_lock(&rchn->lock); + spin_lock(&lchn->lock); + } +} + +static void double_evtchn_unlock(struct evtchn *lchn, struct evtchn *rchn) +{ + spin_unlock(&lchn->lock); + if ( lchn != rchn ) + spin_unlock(&rchn->lock); +} + static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) { struct evtchn *lchn, *rchn; @@ -289,6 +316,8 @@ static long evtchn_bind_interdomain(evtc if ( rc ) goto out; + double_evtchn_lock(lchn, rchn); + lchn->u.interdomain.remote_dom = rd; lchn->u.interdomain.remote_port = rport; lchn->state = ECS_INTERDOMAIN; @@ -304,6 +333,8 @@ static long evtchn_bind_interdomain(evtc */ evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); + double_evtchn_unlock(lchn, rchn); + bind->local_port = lport; out: @@ -344,11 +375,16 @@ static long evtchn_bind_virq(evtchn_bind ERROR_EXIT(port); chn = evtchn_from_port(d, port); + + spin_lock(&chn->lock); + chn->state = ECS_VIRQ; chn->notify_vcpu_id = vcpu; chn->u.virq = virq; evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + v->virq_to_evtchn[virq] = bind->port = port; out: @@ -375,10 +411,15 @@ static long evtchn_bind_ipi(evtchn_bind_ ERROR_EXIT(port); chn = evtchn_from_port(d, port); + + spin_lock(&chn->lock); + chn->state = ECS_IPI; chn->notify_vcpu_id = vcpu; evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + bind->port = port; out: @@ -453,11 +494,15 @@ static long evtchn_bind_pirq(evtchn_bind goto out; } + spin_lock(&chn->lock); + chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); evtchn_port_init(d, chn); + spin_unlock(&chn->lock); + bind->port = port; arch_evtchn_bind_pirq(d, pirq); @@ -575,15 +620,24 @@ static long evtchn_close(struct domain * BUG_ON(chn2->state != ECS_INTERDOMAIN); BUG_ON(chn2->u.interdomain.remote_dom != d1); + double_evtchn_lock(chn1, chn2); + + free_evtchn(d1, chn1); + chn2->state = ECS_UNBOUND; chn2->u.unbound.remote_domid = d1->domain_id; - break; + + double_evtchn_unlock(chn1, chn2); + + goto out; default: BUG(); } + spin_lock(&chn1->lock); free_evtchn(d1, chn1); + spin_unlock(&chn1->lock); out: if ( d2 != NULL ) @@ -605,21 +659,18 @@ int evtchn_send(struct domain *d, unsign struct vcpu *rvcpu; int rport, ret = 0; - spin_lock(&ld->event_lock); - - if ( unlikely(!port_is_valid(ld, lport)) ) - { - spin_unlock(&ld->event_lock); + if ( !port_is_valid(ld, lport) ) return -EINVAL; - } lchn = evtchn_from_port(ld, lport); + spin_lock(&lchn->lock); + /* Guest cannot send via a Xen-attached event channel. */ if ( unlikely(consumer_is_xen(lchn)) ) { - spin_unlock(&ld->event_lock); - return -EINVAL; + ret = -EINVAL; + goto out; } ret = xsm_evtchn_send(XSM_HOOK, ld, lchn); @@ -649,7 +700,7 @@ int evtchn_send(struct domain *d, unsign } out: - spin_unlock(&ld->event_lock); + spin_unlock(&lchn->lock); return ret; } @@ -1160,11 +1211,15 @@ int alloc_unbound_xen_event_channel( if ( rc ) goto out; + spin_lock(&chn->lock); + chn->state = ECS_UNBOUND; chn->xen_consumer = get_xen_consumer(notification_fn); chn->notify_vcpu_id = local_vcpu->vcpu_id; chn->u.unbound.remote_domid = remote_domid; + spin_unlock(&chn->lock); + out: spin_unlock(&d->event_lock); @@ -1188,11 +1243,11 @@ void notify_via_xen_event_channel(struct struct domain *rd; int rport; - spin_lock(&ld->event_lock); - ASSERT(port_is_valid(ld, lport)); lchn = evtchn_from_port(ld, lport); + spin_lock(&lchn->lock); + if ( likely(lchn->state == ECS_INTERDOMAIN) ) { ASSERT(consumer_is_xen(lchn)); @@ -1202,7 +1257,7 @@ void notify_via_xen_event_channel(struct evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport); } - spin_unlock(&ld->event_lock); + spin_unlock(&lchn->lock); } void evtchn_check_pollers(struct domain *d, unsigned int port) --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -80,6 +80,7 @@ extern domid_t hardware_domid; struct evtchn { + spinlock_t lock; #define ECS_FREE 0 /* Channel is available for use. */ #define ECS_RESERVED 1 /* Channel is reserved. */ #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor