1. get rid of the usage of next_kn in kqueue_scan. The latter is hell to maintain and lead to countless bugs.
2. slightly reduce lock contention in the knote_drop path.
This commit is contained in:
parent
61067bc214
commit
96330609b7
@ -3050,7 +3050,7 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops,
|
|||||||
{
|
{
|
||||||
struct kqueue *kq;
|
struct kqueue *kq;
|
||||||
struct kevent *kevp;
|
struct kevent *kevp;
|
||||||
struct knote *kn, *marker, *rtmarker, *nextkn;
|
struct knote *kn, *marker, *rtmarker;
|
||||||
struct knlist *knl;
|
struct knlist *knl;
|
||||||
sbintime_t asbt, rsbt, fsbt;
|
sbintime_t asbt, rsbt, fsbt;
|
||||||
int count, error, haskqglobal, influx, nkev, touch, fevent;
|
int count, error, haskqglobal, influx, nkev, touch, fevent;
|
||||||
@ -3077,7 +3077,6 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops,
|
|||||||
/* activate kq if not already activated */
|
/* activate kq if not already activated */
|
||||||
kevq_activate(kevq, td);
|
kevq_activate(kevq, td);
|
||||||
}
|
}
|
||||||
|
|
||||||
KEVQ_LOCK(kevq);
|
KEVQ_LOCK(kevq);
|
||||||
/* release processing knotes first */
|
/* release processing knotes first */
|
||||||
kevq_rel_proc_kn(kevq);
|
kevq_rel_proc_kn(kevq);
|
||||||
@ -3280,7 +3279,6 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops,
|
|||||||
|
|
||||||
influx = 0;
|
influx = 0;
|
||||||
kn = NULL;
|
kn = NULL;
|
||||||
nextkn = NULL;
|
|
||||||
while (count < maxevents) {
|
while (count < maxevents) {
|
||||||
KEVQ_OWNED(kevq);
|
KEVQ_OWNED(kevq);
|
||||||
|
|
||||||
@ -3289,20 +3287,16 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops,
|
|||||||
if (curr < rtlimit) {
|
if (curr < rtlimit) {
|
||||||
kntq = &kevq->kn_rt_head;
|
kntq = &kevq->kn_rt_head;
|
||||||
kncnt = &kevq->kn_rt_count;
|
kncnt = &kevq->kn_rt_count;
|
||||||
|
kn = TAILQ_FIRST(kntq);
|
||||||
} else {
|
} else {
|
||||||
/* we've reached the limit, dequeue the realtime marker knote */
|
// otherwise just dequeue the rtmarker
|
||||||
nextkn = rtmarker;
|
kn = rtmarker;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
kntq = &kevq->kn_head;
|
kntq = &kevq->kn_head;
|
||||||
kncnt = &kevq->kn_count;
|
kncnt = &kevq->kn_count;
|
||||||
|
kn = TAILQ_FIRST(kntq);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nextkn == NULL) {
|
|
||||||
nextkn = TAILQ_FIRST(kntq);
|
|
||||||
}
|
|
||||||
|
|
||||||
kn = nextkn;
|
|
||||||
|
|
||||||
KASSERT(kn != NULL, ("kqueue_scan dequeued NULL"));
|
KASSERT(kn != NULL, ("kqueue_scan dequeued NULL"));
|
||||||
|
|
||||||
@ -3326,14 +3320,6 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
nextkn = TAILQ_NEXT(kn, kn_tqe);
|
|
||||||
|
|
||||||
if ((kn->kn_status & KN_PROCESSING) == KN_PROCESSING) {
|
|
||||||
// ignore knotes being processed
|
|
||||||
KN_FLUX_UNLOCK(kn);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// now this kn is going to be always dequeued from the kevq
|
// now this kn is going to be always dequeued from the kevq
|
||||||
TAILQ_REMOVE(kntq, kn, kn_tqe);
|
TAILQ_REMOVE(kntq, kn, kn_tqe);
|
||||||
|
|
||||||
@ -3348,7 +3334,6 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops,
|
|||||||
|
|
||||||
if (kn == rtmarker) {
|
if (kn == rtmarker) {
|
||||||
rdrained = 1;
|
rdrained = 1;
|
||||||
nextkn = NULL;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3359,6 +3344,14 @@ kqueue_scan(struct kevq *kevq, int maxevents, struct kevent_copyops *k_ops,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((kn->kn_status & KN_PROCESSING) == KN_PROCESSING) {
|
||||||
|
// reinsert at the end of queue
|
||||||
|
TAILQ_INSERT_TAIL(kntq, kn, kn_tqe);
|
||||||
|
KN_FLUX_UNLOCK(kn);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// now process the knote
|
||||||
kn->kn_status &= ~(KN_QUEUED | KN_WS);
|
kn->kn_status &= ~(KN_QUEUED | KN_WS);
|
||||||
(*kncnt)--;
|
(*kncnt)--;
|
||||||
|
|
||||||
@ -4078,8 +4071,8 @@ kevq_wakeup(struct kevq* kevq)
|
|||||||
KEVQ_OWNED(kevq);
|
KEVQ_OWNED(kevq);
|
||||||
if ((kevq->kevq_state & KEVQ_SLEEP) == KEVQ_SLEEP) {
|
if ((kevq->kevq_state & KEVQ_SLEEP) == KEVQ_SLEEP) {
|
||||||
kevq->kevq_state &= ~KEVQ_SLEEP;
|
kevq->kevq_state &= ~KEVQ_SLEEP;
|
||||||
|
wakeup(kevq);
|
||||||
}
|
}
|
||||||
wakeup(kevq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -4591,19 +4584,10 @@ knote_drop_detached(struct knote *kn, struct thread *td)
|
|||||||
("knote %p still attached", kn));
|
("knote %p still attached", kn));
|
||||||
KQ_NOTOWNED(kq);
|
KQ_NOTOWNED(kq);
|
||||||
|
|
||||||
KQ_LOCK(kq);
|
|
||||||
|
|
||||||
KASSERT(kn->kn_influx == 1,
|
KASSERT(kn->kn_influx == 1,
|
||||||
("knote_drop called on %p with influx %d", kn, kn->kn_influx));
|
("knote_drop called on %p with influx %d", kn, kn->kn_influx));
|
||||||
|
|
||||||
if (kn->kn_fop->f_isfd)
|
// drop from kevqs
|
||||||
list = &kq->kq_knlist[kn->kn_id];
|
|
||||||
else
|
|
||||||
list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
|
|
||||||
|
|
||||||
if (!SLIST_EMPTY(list))
|
|
||||||
SLIST_REMOVE(list, kn, knote, kn_link);
|
|
||||||
|
|
||||||
if (kn->kn_status & KN_QUEUED) {
|
if (kn->kn_status & KN_QUEUED) {
|
||||||
kevq = kn->kn_kevq;
|
kevq = kn->kn_kevq;
|
||||||
KEVQ_LOCK(kevq);
|
KEVQ_LOCK(kevq);
|
||||||
@ -4617,9 +4601,21 @@ knote_drop_detached(struct knote *kn, struct thread *td)
|
|||||||
knote_proc_dequeue(kn);
|
knote_proc_dequeue(kn);
|
||||||
KEVQ_UNLOCK(kevq);
|
KEVQ_UNLOCK(kevq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// drop from kq
|
||||||
|
KQ_LOCK(kq);
|
||||||
|
|
||||||
|
if (kn->kn_fop->f_isfd)
|
||||||
|
list = &kq->kq_knlist[kn->kn_id];
|
||||||
|
else
|
||||||
|
list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
|
||||||
|
|
||||||
|
if (!SLIST_EMPTY(list))
|
||||||
|
SLIST_REMOVE(list, kn, knote, kn_link);
|
||||||
|
|
||||||
knote_leave_flux_ul(kn);
|
|
||||||
KQ_UNLOCK(kq);
|
KQ_UNLOCK(kq);
|
||||||
|
|
||||||
|
knote_leave_flux_ul(kn);
|
||||||
|
|
||||||
if (kn->kn_fop->f_isfd) {
|
if (kn->kn_fop->f_isfd) {
|
||||||
fdrop(kn->kn_fp, td);
|
fdrop(kn->kn_fp, td);
|
||||||
|
Loading…
Reference in New Issue
Block a user