Further cleanup after r285607.

Remove useless release semantic for some stores to it_need.  For
stores where the release is needed, add a comment explaining why.

Fence after the atomic_cmpset() op on the it_need should be acquire
only, release is not needed (see above).  The combination of
atomic_cmpset() + fence_acq() is better expressed there as
atomic_cmpset_acq().

Use atomic_cmpset() for swi' ih_need read and clear.

Discussed with:	alc, bde
Reviewed by:	bde
Comments wording provided by:	bde
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2015-07-18 19:59:29 +00:00
parent 7059fa6ff8
commit 283dfee925

View File

@ -830,7 +830,7 @@ intr_event_remove_handler(void *cookie)
* again and remove this handler if it has already passed
* it on the list.
*/
atomic_store_rel_int(&ie->ie_thread->it_need, 1);
ie->ie_thread->it_need = 1;
} else
TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
thread_unlock(ie->ie_thread->it_thread);
@ -897,6 +897,10 @@ intr_event_schedule_thread(struct intr_event *ie)
* Set it_need to tell the thread to keep running if it is already
* running. Then, lock the thread and see if we actually need to
* put it on the runqueue.
*
* Use store_rel to arrange that the store to ih_need in
* swi_sched() is before the store to it_need and prepare for
* transfer of this order to loads in the ithread.
*/
atomic_store_rel_int(&it->it_need, 1);
thread_lock(td);
@ -976,7 +980,7 @@ intr_event_remove_handler(void *cookie)
* again and remove this handler if it has already passed
* it on the list.
*/
atomic_store_rel_int(&it->it_need, 1);
it->it_need = 1;
} else
TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
thread_unlock(it->it_thread);
@ -1048,6 +1052,10 @@ intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
* Set it_need to tell the thread to keep running if it is already
* running. Then, lock the thread and see if we actually need to
* put it on the runqueue.
*
* Use store_rel to arrange that the store to ih_need in
* swi_sched() is before the store to it_need and prepare for
* transfer of this order to loads in the ithread.
*/
atomic_store_rel_int(&it->it_need, 1);
thread_lock(td);
@ -1133,7 +1141,7 @@ swi_sched(void *cookie, int flags)
* running it will execute this handler on the next pass. Otherwise,
* it will execute it the next time it runs.
*/
atomic_store_rel_int(&ih->ih_need, 1);
ih->ih_need = 1;
if (!(flags & SWI_DELAY)) {
PCPU_INC(cnt.v_soft);
@ -1224,11 +1232,15 @@ intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
* handlers that have their need flag set. Hardware
* interrupt threads always invoke all of their handlers.
*/
if (ie->ie_flags & IE_SOFT) {
if (atomic_load_acq_int(&ih->ih_need) == 0)
if ((ie->ie_flags & IE_SOFT) != 0) {
/*
* ih_need can only be 0 or 1. Failed cmpset
* below means that there is no request to
* execute handlers, so a retry of the cmpset
* is not needed.
*/
if (atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
continue;
else
atomic_store_rel_int(&ih->ih_need, 0);
}
/* Execute this handler. */
@ -1326,16 +1338,13 @@ ithread_loop(void *arg)
* Service interrupts. If another interrupt arrives while
* we are running, it will set it_need to note that we
* should make another pass.
*
* The load_acq part of the following cmpset ensures
* that the load of ih_need in ithread_execute_handlers()
* is ordered after the load of it_need here.
*/
while (atomic_cmpset_int(&ithd->it_need, 1, 0) != 0) {
/*
* This needs a release barrier to make sure
* that this write posts before any of the
* memory or device accesses in the handlers.
*/
atomic_thread_fence_acq_rel();
while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
ithread_execute_handlers(p, ie);
}
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
mtx_assert(&Giant, MA_NOTOWNED);
@ -1505,14 +1514,12 @@ ithread_loop(void *arg)
* Service interrupts. If another interrupt arrives while
* we are running, it will set it_need to note that we
* should make another pass.
*
* The load_acq part of the following cmpset ensures
* that the load of ih_need in ithread_execute_handlers()
* is ordered after the load of it_need here.
*/
while (atomic_cmpset_int(&ithd->it_need, 1, 0) != 0) {
/*
* This needs a release barrier to make sure
* that this write posts before any of the
* memory or device accesses in the handlers.
*/
atomic_thread_fence_acq_rel();
while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
if (priv)
priv_ithread_execute_handler(p, ih);
else