Ensure that knotes do not get registered when KQ_CLOSING is set.
KQ_CLOSING is set before draining the knotes associated with a kqueue, so we must ensure that new knotes are not added after that point. In particular, some kernel facilities may register for events on behalf of a userspace process and race with a close of the kqueue. PR: 228858 Reviewed by: kib Tested by: pho MFC after: 3 days Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D18316
This commit is contained in:
parent
01ec130dc7
commit
8a86bae908
@ -1460,8 +1460,11 @@ findkn:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if ((kev->flags & EV_ADD) == EV_ADD)
|
||||
kqueue_expand(kq, fops, kev->ident, waitok);
|
||||
if ((kev->flags & EV_ADD) == EV_ADD) {
|
||||
error = kqueue_expand(kq, fops, kev->ident, waitok);
|
||||
if (error != 0)
|
||||
goto done;
|
||||
}
|
||||
|
||||
KQ_LOCK(kq);
|
||||
|
||||
@ -1693,12 +1696,12 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
|
||||
{
|
||||
struct klist *list, *tmp_knhash, *to_free;
|
||||
u_long tmp_knhashmask;
|
||||
int size;
|
||||
int fd;
|
||||
int error, fd, size;
|
||||
int mflag = waitok ? M_WAITOK : M_NOWAIT;
|
||||
|
||||
KQ_NOTOWNED(kq);
|
||||
|
||||
error = 0;
|
||||
to_free = NULL;
|
||||
if (fops->f_isfd) {
|
||||
fd = ident;
|
||||
@ -1710,9 +1713,11 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
|
||||
if (list == NULL)
|
||||
return ENOMEM;
|
||||
KQ_LOCK(kq);
|
||||
if (kq->kq_knlistsize > fd) {
|
||||
if ((kq->kq_state & KQ_CLOSING) != 0) {
|
||||
to_free = list;
|
||||
error = EBADF;
|
||||
} else if (kq->kq_knlistsize > fd) {
|
||||
to_free = list;
|
||||
list = NULL;
|
||||
} else {
|
||||
if (kq->kq_knlist != NULL) {
|
||||
bcopy(kq->kq_knlist, list,
|
||||
@ -1734,9 +1739,12 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
|
||||
&tmp_knhashmask,
|
||||
waitok ? HASH_WAITOK : HASH_NOWAIT);
|
||||
if (tmp_knhash == NULL)
|
||||
return ENOMEM;
|
||||
return (ENOMEM);
|
||||
KQ_LOCK(kq);
|
||||
if (kq->kq_knhashmask == 0) {
|
||||
if ((kq->kq_state & KQ_CLOSING) != 0) {
|
||||
to_free = tmp_knhash;
|
||||
error = EBADF;
|
||||
} else if (kq->kq_knhashmask == 0) {
|
||||
kq->kq_knhash = tmp_knhash;
|
||||
kq->kq_knhashmask = tmp_knhashmask;
|
||||
} else {
|
||||
@ -1748,7 +1756,7 @@ kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
|
||||
free(to_free, M_KQUEUE);
|
||||
|
||||
KQ_NOTOWNED(kq);
|
||||
return 0;
|
||||
return (error);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2597,6 +2605,8 @@ knote_attach(struct knote *kn, struct kqueue *kq)
|
||||
KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
|
||||
KQ_OWNED(kq);
|
||||
|
||||
if ((kq->kq_state & KQ_CLOSING) != 0)
|
||||
return (EBADF);
|
||||
if (kn->kn_fop->f_isfd) {
|
||||
if (kn->kn_id >= kq->kq_knlistsize)
|
||||
return (ENOMEM);
|
||||
|
Loading…
x
Reference in New Issue
Block a user