Ensure that knotes do not get registered when KQ_CLOSING is set.

KQ_CLOSING is set before draining the knotes associated with a kqueue,
so we must ensure that new knotes are not added after that point.  In
particular, some kernel facilities may register for events on behalf
of a userspace process and race with a close of the kqueue.

PR:		228858
Reviewed by:	kib
Tested by:	pho
MFC after:	3 days
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D18316
This commit is contained in:
Mark Johnston 2018-11-24 16:58:34 +00:00
parent 1eeab857a3
commit a2afae524a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=340898

View File

@ -1460,8 +1460,11 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int wa
break; break;
} }
} else { } else {
if ((kev->flags & EV_ADD) == EV_ADD) if ((kev->flags & EV_ADD) == EV_ADD) {
kqueue_expand(kq, fops, kev->ident, waitok); error = kqueue_expand(kq, fops, kev->ident, waitok);
if (error != 0)
goto done;
}
KQ_LOCK(kq); KQ_LOCK(kq);
@ -1693,12 +1696,12 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
{ {
struct klist *list, *tmp_knhash, *to_free; struct klist *list, *tmp_knhash, *to_free;
u_long tmp_knhashmask; u_long tmp_knhashmask;
int size; int error, fd, size;
int fd;
int mflag = waitok ? M_WAITOK : M_NOWAIT; int mflag = waitok ? M_WAITOK : M_NOWAIT;
KQ_NOTOWNED(kq); KQ_NOTOWNED(kq);
error = 0;
to_free = NULL; to_free = NULL;
if (fops->f_isfd) { if (fops->f_isfd) {
fd = ident; fd = ident;
@ -1710,9 +1713,11 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
if (list == NULL) if (list == NULL)
return ENOMEM; return ENOMEM;
KQ_LOCK(kq); KQ_LOCK(kq);
if (kq->kq_knlistsize > fd) { if ((kq->kq_state & KQ_CLOSING) != 0) {
to_free = list;
error = EBADF;
} else if (kq->kq_knlistsize > fd) {
to_free = list; to_free = list;
list = NULL;
} else { } else {
if (kq->kq_knlist != NULL) { if (kq->kq_knlist != NULL) {
bcopy(kq->kq_knlist, list, bcopy(kq->kq_knlist, list,
@ -1734,9 +1739,12 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
&tmp_knhashmask, &tmp_knhashmask,
waitok ? HASH_WAITOK : HASH_NOWAIT); waitok ? HASH_WAITOK : HASH_NOWAIT);
if (tmp_knhash == NULL) if (tmp_knhash == NULL)
return ENOMEM; return (ENOMEM);
KQ_LOCK(kq); KQ_LOCK(kq);
if (kq->kq_knhashmask == 0) { if ((kq->kq_state & KQ_CLOSING) != 0) {
to_free = tmp_knhash;
error = EBADF;
} else if (kq->kq_knhashmask == 0) {
kq->kq_knhash = tmp_knhash; kq->kq_knhash = tmp_knhash;
kq->kq_knhashmask = tmp_knhashmask; kq->kq_knhashmask = tmp_knhashmask;
} else { } else {
@ -1748,7 +1756,7 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
free(to_free, M_KQUEUE); free(to_free, M_KQUEUE);
KQ_NOTOWNED(kq); KQ_NOTOWNED(kq);
return 0; return (error);
} }
static void static void
@ -2597,6 +2605,8 @@ knote_attach(struct knote *kn, struct kqueue *kq)
KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
KQ_OWNED(kq); KQ_OWNED(kq);
if ((kq->kq_state & KQ_CLOSING) != 0)
return (EBADF);
if (kn->kn_fop->f_isfd) { if (kn->kn_fop->f_isfd) {
if (kn->kn_id >= kq->kq_knlistsize) if (kn->kn_id >= kq->kq_knlistsize)
return (ENOMEM); return (ENOMEM);