diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index de7f70d2b2be..e44bc33eeb4d 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -100,9 +100,9 @@ MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); TASKQUEUE_DEFINE_THREAD(kqueue_ctx); static struct kevq * kevqlist_find(struct kevqlist *kevq_list, struct kqueue *kq); -static void kevq_wakeup(struct kevq* kevq); static void kevq_thred_init(struct kevq_thred *kevq_th); static void kevq_thred_destroy(struct kevq_thred *kevq_th); +static void kevq_wakeup(struct kevq* kevq); static void kevq_init(struct kevq *kevq); static void kevq_release(struct kevq* kevq, int locked); static int kevq_acquire_kq(struct kqueue *kq, struct thread *td, struct kevq **kevqp); @@ -158,7 +158,14 @@ static struct fileops kqueueops = { .fo_fill_kinfo = kqueue_fill_kinfo, }; -static void knote_activate(struct knote *kn, int haskqlock); +static bool knote_leave_flux_ul(struct knote *kn); +static bool knote_leave_flux(struct knote *kn); +static void knote_enter_flux(struct knote *kn); +static void knote_enter_flux_ul(struct knote *kn); +static void knote_flux_wakeup_ul(struct knote *kn); +static void knote_flux_wakeup(struct knote *kn); +static void knote_activate_ul(struct knote *kn); +static void knote_activate(struct knote *kn); static int knote_attach(struct knote *kn, struct kqueue *kq); static void knote_drop(struct knote *kn, struct thread *td); static void knote_drop_detached(struct knote *kn, struct thread *td); @@ -167,7 +174,7 @@ static void knote_dequeue(struct knote *kn); static void knote_init(void); static struct knote *knote_alloc(int mflag); static void knote_free(struct knote *kn); -static struct kevq* knote_sched(struct knote *kn); +static void knote_sched(struct knote *kn); static void filt_kqdetach(struct knote *kn); static int filt_kqueue(struct knote *kn, long hint); @@ -239,24 +246,19 @@ SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, #define KEVQ_LOCK(kevq) do { \ mtx_lock(&(kevq)->lock); \ } while (0) -#define KN_FLUX_WAKEUP(kn, haslock) do { \ - if ((haslock)) \ - KN_FLUX_OWNED((kn)); \ - else \ - KN_FLUX_LOCK((kn)); \ - if ((kn)->kn_fluxwait) { \ - (kn)->kn_fluxwait = 0; \ - wakeup((kn)); \ - } \ - if (!(haslock)) \ - KN_FLUX_UNLOCK((kn)); \ -} while (0) #define KQ_UNLOCK(kq) do { \ mtx_unlock(&(kq)->kq_lock); \ } while (0) #define KN_FLUX_UNLOCK(kn) do { \ mtx_unlock(&(kn)->kn_fluxlock); \ } while (0) +#define KN_LEAVE_FLUX_WAKEUP(kn) do { \ + KN_FLUX_NOTOWNED((kn)); \ + KN_FLUX_LOCK((kn)); \ + knote_leave_flux((kn)); \ + knote_flux_wakeup((kn)); \ + KN_FLUX_UNLOCK((kn)); \ +} while(0) #define KEVQ_TH_UNLOCK(kevqth) do { \ mtx_unlock(&(kevqth)->lock); \ } while (0) @@ -272,7 +274,7 @@ SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, #define KN_FLUX_OWNED(kn) do { \ mtx_assert(&(kn)->kn_fluxlock, MA_OWNED); \ } while (0) -#define KN_FLUX_NOTOWNED(kq) do { \ +#define KN_FLUX_NOTOWNED(kn) do { \ mtx_assert(&(kn)->kn_fluxlock, MA_NOTOWNED); \ } while (0) #define KEVQ_OWNED(kevq) do { \ @@ -315,46 +317,49 @@ kn_in_flux(struct knote *kn) } static void -kn_enter_flux(struct knote *kn, int haslock) +knote_enter_flux_ul(struct knote *kn) +{ + KN_FLUX_NOTOWNED(kn); + KN_FLUX_LOCK(kn); + knote_enter_flux(kn); + KN_FLUX_UNLOCK(kn); +} + +static void +knote_enter_flux(struct knote *kn) { #ifdef KQ_DEBUG - printf("KQUEUE: kn_enter_flux: %p\n", kn); + printf("KQUEUE: knote_enter_flux: %p\n", kn); #endif - if(haslock) { KN_FLUX_OWNED(kn); - } else { - KN_FLUX_LOCK(kn); - } MPASS(kn->kn_influx < INT_MAX); kn->kn_influx++; - if(!haslock) { - KN_FLUX_UNLOCK(kn); } + +/* TODO: change *_ul functions to macros? */ +static bool +knote_leave_flux_ul(struct knote *kn) +{ + bool ret; + KN_FLUX_NOTOWNED(kn); + KN_FLUX_LOCK(kn); + ret = knote_leave_flux(kn); + KN_FLUX_UNLOCK(kn); + return ret; } static bool -kn_leave_flux(struct knote *kn, int haslock) +knote_leave_flux(struct knote *kn) { #ifdef KQ_DEBUG - printf("KQUEUE: kn_leave_flux: %p\n", kn); + printf("KQUEUE: knote_leave_flux: %p\n", kn); #endif - if(haslock) { KN_FLUX_OWNED(kn); - } else { - KN_FLUX_LOCK(kn); - } MPASS(kn->kn_influx > 0); kn->kn_influx--; - if(!haslock) { - KN_FLUX_UNLOCK(kn); - // RETURN false if the caller doesn't care about the result - // otherwise there might be a race here - return false; - } else { return (kn->kn_influx == 0); } -} #define KNL_ASSERT_LOCK(knl, islocked) do { \ if (islocked) \ @@ -535,7 +540,7 @@ filt_procattach(struct knote *kn) * is registered. */ if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) - knote_activate(kn, 0); + knote_activate_ul(kn); PROC_UNLOCK(p); @@ -636,21 +641,20 @@ knote_fork(struct knlist *list, struct thread *td, int pid) * track the child. Drop the locks in preparation for * the call to kqueue_register(). */ - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); /* * The same as knote(), activate the event. */ if ((kn->kn_sfflags & NOTE_TRACK) == 0) { - if (kn->kn_fop->f_event(kn, NOTE_FORK)) - knote_activate(kn, 1); - - KN_FLUX_LOCK(kn); - kn_leave_flux(kn, 1); - KN_FLUX_WAKEUP(kn, 1); - KN_FLUX_UNLOCK(kn); + error = kn->kn_fop->f_event(kn, NOTE_FORK); KQ_UNLOCK(kq); + + if(error) + knote_activate(kn); + + KN_LEAVE_FLUX_WAKEUP(kn); continue; } @@ -698,13 +702,10 @@ knote_fork(struct knlist *list, struct thread *td, int pid) if (error) kn->kn_fflags |= NOTE_TRACKERR; if (kn->kn_fop->f_event(kn, NOTE_FORK)) - knote_activate(kn, 0); + knote_activate_ul(kn); list->kl_lock(list->kl_lockarg); - KN_FLUX_LOCK(kn); - kn_leave_flux(kn, 1); - KN_FLUX_WAKEUP(kn, 1); - KN_FLUX_UNLOCK(kn); + KN_LEAVE_FLUX_WAKEUP(kn); } } @@ -787,13 +788,9 @@ filt_timerexpire(void *knx) kn = knx; kn->kn_data++; - kn_enter_flux(kn, 0); - - knote_activate(kn, 0); - - /* TODO: lock? */ - kn_leave_flux(kn, 0); - KN_FLUX_WAKEUP(kn, 0); + knote_enter_flux_ul(kn); + knote_activate_ul(kn); + KN_LEAVE_FLUX_WAKEUP(kn); if ((kn->kn_flags & EV_ONESHOT) != 0) return; @@ -1606,6 +1603,7 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct KQ_GLOBAL_LOCK(&kq_global, haskqglobal); } + /* lock the kq lock for accessing kq_knhash table */ KQ_LOCK(kq); if (kev->ident < kq->kq_knlistsize) { SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) @@ -1619,6 +1617,7 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct goto done; } + /* lock the kq lock for accessing kq_knhash table */ KQ_LOCK(kq); /* @@ -1643,6 +1642,9 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct } } + /* We need the kq lock because attaching to KQ requires KQ Lock */ + KQ_OWNED(kq); + /* knote is in the process of changing, wait for it to stabilize. */ if (kn != NULL) { KN_FLUX_LOCK(kn); @@ -1663,7 +1665,7 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct goto findkn; } } - /* We now have exclusive access to the knote with flux lock */ + /* We now have exclusive access to the knote with flux lock and kq lock */ /* * kn now contains the matching knote, or NULL if no match @@ -1699,7 +1701,7 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct kn->kn_status = KN_DETACHED; if ((kev->flags & EV_DISABLE) != 0) kn->kn_status |= KN_DISABLED; - kn_enter_flux(kn, 0); + knote_enter_flux_ul(kn); error = knote_attach(kn, kq); KQ_UNLOCK(kq); @@ -1724,7 +1726,7 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct if (kev->flags & EV_DELETE) { /* We have the exclusive flux lock here */ - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); KQ_UNLOCK(kq); @@ -1734,13 +1736,13 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct } /* We have the exclusive lock */ - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); // we have kq lock and knote influx if (kev->flags & EV_FORCEONESHOT) { kn->kn_flags |= EV_ONESHOT; - knote_activate(kn, 1); + knote_activate(kn); } if ((kev->flags & EV_ENABLE) != 0) @@ -1784,16 +1786,14 @@ kqueue_register(struct kqueue *kq, struct kevq *kevq, struct kevent *kev, struct KQ_LOCK(kq); if (event) - knote_activate(kn, 1); + knote_activate(kn); kn->kn_status &= ~KN_SCAN; - KN_FLUX_LOCK(kn); - kn_leave_flux(kn, 1); - KN_FLUX_WAKEUP(kn, 1); - KN_FLUX_UNLOCK(kn); + KN_LEAVE_FLUX_WAKEUP(kn); KQ_UNLOCK(kq); + kn_list_unlock(knl); done: @@ -2271,7 +2271,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, kn_in_flux(kn)) { if (influx) { influx = 0; - KN_FLUX_WAKEUP(kn, 1); + knote_flux_wakeup(kn); } kn->kn_fluxwait = 1; KN_FLUX_UNLOCK(kn); @@ -2293,7 +2293,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, } if (kn == marker) { /* We are dequeuing our marker, wakeup threads waiting on it */ - KN_FLUX_WAKEUP(kn, 1); + knote_flux_wakeup(kn); KN_FLUX_UNLOCK(kn); if (count == maxevents) { goto retry; @@ -2305,7 +2305,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, if ((kn->kn_flags & EV_DROP) == EV_DROP) { kn->kn_status &= ~KN_QUEUED; - kn_enter_flux(kn, 1); + knote_enter_flux(kn); kevq->kn_count--; KN_FLUX_UNLOCK(kn); KEVQ_UNLOCK(kevq); @@ -2318,7 +2318,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, continue; } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { kn->kn_status &= ~KN_QUEUED; - kn_enter_flux(kn, 1); + knote_enter_flux(kn); kevq->kn_count--; KN_FLUX_UNLOCK(kn); KEVQ_UNLOCK(kevq); @@ -2332,7 +2332,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, kn = NULL; } else { kn->kn_status |= KN_SCAN; - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); KEVQ_UNLOCK(kevq); if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) { @@ -2346,7 +2346,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | KN_SCAN); - kn_leave_flux(kn, 0); + knote_leave_flux_ul(kn); kevq->kn_count--; kn_list_unlock(knl); influx = 1; @@ -2383,7 +2383,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, } kn->kn_status &= ~KN_SCAN; - kn_leave_flux(kn, 0); + knote_leave_flux_ul(kn); kn_list_unlock(knl); influx = 1; } @@ -2394,9 +2394,11 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, count--; if (nkev == KQ_NEVENTS) { - influx = 0; - KN_FLUX_WAKEUP(kn, 0); KEVQ_UNLOCK(kevq); + + influx = 0; + knote_flux_wakeup_ul(kn); + error = k_ops->k_copyout(k_ops->arg, keva, nkev); nkev = 0; kevp = keva; @@ -2411,7 +2413,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops, KEVQ_UNLOCK(kevq); if (kn != NULL) { - KN_FLUX_WAKEUP(kn, 0); + knote_flux_wakeup_ul(kn); } knote_free(marker); @@ -2559,7 +2561,7 @@ kevq_destroy(struct kevq *kevq) } /* This is called on every kevq when kqueue exits - This is also called when a thread exits/crashes + This is also called when a thread exits/crashes (currently racing, also to make it work need to reconfigure kq->ck_evq) * a ref cnt must be held */ void kevq_drain(struct kevq *kevq) @@ -2576,7 +2578,6 @@ kevq_drain(struct kevq *kevq) KEVQ_NOTOWNED(kevq); KEVQ_LOCK(kevq); - // now the refcnt is 1 if(kevq->kevq_state == KEVQ_CLOSING) { // already closing, dereference kevq_release(kevq, 1); @@ -2602,38 +2603,35 @@ kevq_drain(struct kevq *kevq) if (kn_in_flux(kn)) { kn->kn_fluxwait = 1; KN_FLUX_UNLOCK(kn); - msleep(kn, &kevq->lock, PSOCK, "kevqclo1", 0); + msleep(kn, &kevq->lock, PSOCK, "kevqclose2", 0); goto retry; } KN_FLUX_OWNED(kn); KASSERT(!kn_in_flux(kn), ("knote is still influx")); - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); knote_dequeue(kn); if ((kq->kq_state & KQ_FLAG_MULTI) == KQ_FLAG_MULTI && (kq->kq_state & KQ_CLOSING) != KQ_CLOSING) { KEVQ_UNLOCK(kevq); - knote_activate(kn, 0); + knote_activate_ul(kn); KEVQ_LOCK(kevq); } - KN_FLUX_LOCK(kn); - kn_leave_flux(kn, 1); - KN_FLUX_WAKEUP(kn, 1); - KN_FLUX_UNLOCK(kn); + KN_LEAVE_FLUX_WAKEUP(kn); } KASSERT(kevq->kn_count == 0, ("some knotes are left")); KEVQ_UNLOCK(kevq); + KQ_LOCK(kq); + TAILQ_REMOVE(&kq->kq_kevqlist, kevq, kq_e); + KQ_UNLOCK(kq); + if ((kq->kq_state & KQ_FLAG_MULTI) == KQ_FLAG_MULTI) { // drop from kq if in multithreaded mode - KQ_LOCK(kq); - TAILQ_REMOVE(&kq->kq_kevqlist, kevq, kq_e); - KQ_UNLOCK(kq); - KEVQ_TH_LOCK(kevq->kevq_th); TAILQ_REMOVE(&kevq->kevq_th->kevq_tq, kevq, kevq_th_tqe); kevq_list = &kevq->kevq_th->kevq_hash[KEVQ_HASH((unsigned long long)kq, kevq->kevq_th->kevq_hashmask)]; @@ -2641,6 +2639,7 @@ kevq_drain(struct kevq *kevq) KEVQ_TH_UNLOCK(kevq->kevq_th); } + /* delete the kevq */ kevq_destroy(kevq); } @@ -2699,7 +2698,7 @@ kqueue_drain(struct kqueue *kq, struct kevq *kevq, struct thread *td) msleep(kn, &kq->kq_lock, PSOCK, "kqclo1", 0); continue; } - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); KQ_UNLOCK(kq); knote_drop(kn, td); @@ -2716,7 +2715,7 @@ kqueue_drain(struct kqueue *kq, struct kevq *kevq, struct thread *td) msleep(kn, &kq->kq_lock, PSOCK, "kqclo2", 0); continue; } - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); KQ_UNLOCK(kq); knote_drop(kn, td); @@ -2888,7 +2887,7 @@ knote(struct knlist *list, long hint, int lockflags) KN_FLUX_UNLOCK(kn); } else { kq = kn->kn_kq; - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); require_kqlock = ((lockflags & KNF_NOKQLOCK) == 0); @@ -2896,70 +2895,73 @@ knote(struct knlist *list, long hint, int lockflags) if (require_kqlock) KQ_LOCK(kq); - if (kn->kn_fop->f_event(kn, hint)) - knote_activate(kn, require_kqlock); + if (kn->kn_fop->f_event(kn, hint)){ + // TODO: WTH is this? + require_kqlock ? knote_activate(kn) : knote_activate_ul(kn); + } if (require_kqlock) KQ_UNLOCK(kq); - KN_FLUX_LOCK(kn); - kn_leave_flux(kn, 1); - KN_FLUX_WAKEUP(kn, 1); - KN_FLUX_UNLOCK(kn); + KN_LEAVE_FLUX_WAKEUP(kn); } } if ((lockflags & KNF_LISTLOCKED) == 0) list->kl_unlock(list->kl_lockarg); } +static void +knote_flux_wakeup_ul(struct knote *kn) +{ + KN_FLUX_NOTOWNED(kn); + KN_FLUX_LOCK(kn); + knote_flux_wakeup(kn); + KN_FLUX_UNLOCK(kn); +} + +static void +knote_flux_wakeup(struct knote *kn) +{ + KN_FLUX_OWNED(kn); + if ((kn)->kn_fluxwait) { + (kn)->kn_fluxwait = 0; + wakeup((kn)); + } +} + +static void +knote_activate_ul(struct knote *kn) +{ + KQ_LOCK(kn->kn_kq); + knote_activate(kn); + KQ_UNLOCK(kn->kn_kq); +} + /* * activate a knote * the knote should be marked in flux and the knote flux lock should not be owned + * none of the other locks should be held */ static void -knote_activate(struct knote *kn, int haskqlock) +knote_activate(struct knote *kn) { - struct kevq *kevq; struct kqueue *kq; + kq = kn->kn_kq; #ifdef KQ_DEBUG printf("KQUEUE: knote_activate: kn %p\n", kn); #endif - kq = kn->kn_kq; - - if (haskqlock) { - KQ_OWNED(kq); - } else { - KQ_LOCK(kq); - } - + KQ_OWNED(kq); KN_FLUX_NOTOWNED(kn); KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); kn->kn_status |= KN_ACTIVE; -retry: if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) { - kevq = knote_sched(kn); - - if (kevq != NULL) { - // if we have a queue to queue the knote - KEVQ_LOCK(kevq); - - if ((kevq->kevq_state & KEVQ_CLOSING) != 0) { - KEVQ_UNLOCK(kevq); - goto retry; - } - - knote_enqueue(kn, kevq); - - KEVQ_UNLOCK(kevq); - } + knote_sched(kn); } - if (!haskqlock) { - KQ_UNLOCK(kq); - } + kqueue_wakeup(kq); } /* @@ -3198,7 +3200,7 @@ knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) } knlist_remove_kq(knl, kn, 1, 1); if (killkn) { - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); KQ_UNLOCK(kq); knote_drop_detached(kn, td); @@ -3265,13 +3267,13 @@ knote_fdclose(struct thread *td, int fd) if (kn_in_flux(kn)) { /* someone else might be waiting on our knote */ if (influx) - KN_FLUX_WAKEUP(kn, 1); + knote_flux_wakeup(kn); kn->kn_fluxwait = 1; KN_FLUX_UNLOCK(kn); msleep(kn, &kq->kq_lock, PSOCK, "kqflxwt", 0); goto again; } - kn_enter_flux(kn, 1); + knote_enter_flux(kn); KN_FLUX_UNLOCK(kn); KQ_UNLOCK(kq); /* WTF ? influx should be 0? */ @@ -3361,11 +3363,11 @@ knote_drop_detached(struct knote *kn, struct thread *td) // if no kevqs are available for queueing, returns NULL -static struct kevq* +static void knote_sched(struct knote *kn) { struct kqueue *kq = kn->kn_kq; - struct kevq *each_kevq; + struct kevq *next_kevq; KQ_OWNED(kq); KASSERT(kn_in_flux(kn), ("kn not in flux")); @@ -3377,55 +3379,63 @@ knote_sched(struct knote *kn) printf("KQUEUE: knote_sched(M) affinity set: kn %p \n", kn); #endif if ((kn->kn_org_kevq->kevq_state & KEVQ_RDY) != 0) { - return kn->kn_org_kevq; + next_kevq = kn->kn_org_kevq; } else { - return NULL; + next_kevq = NULL; } } else { - each_kevq = kq->kq_ckevq; + next_kevq = kq->kq_ckevq; while(1) { - if (each_kevq == NULL) { - each_kevq = TAILQ_FIRST(&kq->kq_kevqlist); - if (each_kevq == NULL) { + if (next_kevq == NULL) { + next_kevq = TAILQ_FIRST(&kq->kq_kevqlist); + if (next_kevq == NULL) { #ifdef KQ_DEBUG printf("KQUEUE: knote_sched(M) no kevqs exist for queueing kn %p, discarding... \n", kn); #endif break; } } else { - each_kevq = TAILQ_NEXT(each_kevq, kq_e); - if (each_kevq == NULL) { + next_kevq = TAILQ_NEXT(next_kevq, kq_e); + if (next_kevq == NULL) { continue; } } - if ((each_kevq->kevq_state & KEVQ_CLOSING) == 0 && (each_kevq->kevq_state & KEVQ_RDY) != 0) { + KASSERT(next_kevq != NULL, ("picked a null kevq")); + KEVQ_LOCK(next_kevq); + + if ((next_kevq->kevq_state & KEVQ_CLOSING) == 0 && (next_kevq->kevq_state & KEVQ_RDY) != 0) { + kq->kq_ckevq = next_kevq; break; } - if (each_kevq == kq->kq_ckevq) { + if (next_kevq == kq->kq_ckevq) { // if the previous "if" didn't break // we have traversed the list once and the current kevq is closing // we have no queue to queue the knote #ifdef KQ_DEBUG printf("KQUEUE: knote_sched(M) no open kevqs for queueing kn %p, discarding... \n", kn); #endif - each_kevq = NULL; + next_kevq = NULL; break; } } - -#ifdef KQ_DEBUG - printf("KQUEUE: knote_sched(M) next kevq %p for kn %p \n", each_kevq, kn); -#endif - kq->kq_ckevq = each_kevq; - return each_kevq; } } else { #ifdef KQ_DEBUG printf("KQUEUE: knote_sched(S): kn %p to kevq %p\n", kn, kq->kq_kevq); #endif - return kq->kq_kevq; + next_kevq = kq->kq_kevq; + } + +#ifdef KQ_DEBUG + printf("KQUEUE: knote_sched(M) next kevq %p for kn %p \n", next_kevq, kn); +#endif + + if (next_kevq != NULL) { + KEVQ_OWNED(next_kevq); + knote_enqueue(kn, next_kevq); + KEVQ_UNLOCK(next_kevq); } } @@ -3439,8 +3449,6 @@ knote_enqueue(struct knote *kn, struct kevq *kevq) printf("KQUEUE: knote_enqueue: kn %p to kevq %p\n", kn, kevq); #endif - /* TODO: optimize locking, we don't really need KQ_LOCK here except for kqueue_wakeup() */ - KQ_OWNED(kq); KEVQ_OWNED(kevq); KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); @@ -3452,7 +3460,6 @@ knote_enqueue(struct knote *kn, struct kevq *kevq) TAILQ_INSERT_TAIL(&kevq->kn_head, kn, kn_tqe); kevq->kn_count++; - kqueue_wakeup(kq); kevq_wakeup(kevq); }