Add locking to the kqueue subsystem. This also makes the kqueue subsystem
a more complete subsystem, and removes the knowlege of how things are implemented from the drivers. Include locking around filter ops, so a module like aio will know when not to be unloaded if there are outstanding knotes using it's filter ops. Currently, it uses the MTX_DUPOK even though it is not always safe to aquire duplicate locks. Witness currently doesn't support the ability to discover if a dup lock is ok (in some cases). Reviewed by: green, rwatson (both earlier versions)
This commit is contained in:
parent
93ab8d76ea
commit
ad3b9257c2
@ -196,6 +196,7 @@ targopen(struct cdev *dev, int flags, int fmt, struct thread *td)
|
||||
TAILQ_INIT(&softc->work_queue);
|
||||
TAILQ_INIT(&softc->abort_queue);
|
||||
TAILQ_INIT(&softc->user_ccb_queue);
|
||||
knlist_init(&softc->read_select.si_note, &softc->mtx);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -336,9 +337,7 @@ targkqfilter(struct cdev *dev, struct knote *kn)
|
||||
softc = (struct targ_softc *)dev->si_drv1;
|
||||
kn->kn_hook = (caddr_t)softc;
|
||||
kn->kn_fop = &targread_filtops;
|
||||
TARG_LOCK(softc);
|
||||
SLIST_INSERT_HEAD(&softc->read_select.si_note, kn, kn_selnext);
|
||||
TARG_UNLOCK(softc);
|
||||
knlist_add(&softc->read_select.si_note, kn, 0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -348,9 +347,7 @@ targreadfiltdetach(struct knote *kn)
|
||||
struct targ_softc *softc;
|
||||
|
||||
softc = (struct targ_softc *)kn->kn_hook;
|
||||
TARG_LOCK(softc);
|
||||
SLIST_REMOVE(&softc->read_select.si_note, kn, knote, kn_selnext);
|
||||
TARG_UNLOCK(softc);
|
||||
knlist_remove(&softc->read_select.si_note, kn, 0);
|
||||
}
|
||||
|
||||
/* Notify the user's kqueue when the user queue or abort queue gets a CCB */
|
||||
@ -361,10 +358,8 @@ targreadfilt(struct knote *kn, long hint)
|
||||
int retval;
|
||||
|
||||
softc = (struct targ_softc *)kn->kn_hook;
|
||||
TARG_LOCK(softc);
|
||||
retval = !TAILQ_EMPTY(&softc->user_ccb_queue) ||
|
||||
!TAILQ_EMPTY(&softc->abort_queue);
|
||||
TARG_UNLOCK(softc);
|
||||
return (retval);
|
||||
}
|
||||
|
||||
@ -1096,19 +1091,8 @@ abort_all_pending(struct targ_softc *softc)
|
||||
|
||||
/* If we aborted anything from the work queue, wakeup user. */
|
||||
if (!TAILQ_EMPTY(&softc->user_ccb_queue)
|
||||
|| !TAILQ_EMPTY(&softc->abort_queue)) {
|
||||
/*
|
||||
* XXX KNOTE calls back into targreadfilt, causing a
|
||||
* lock recursion. So unlock around calls to it although
|
||||
* this may open up a race allowing a user to submit
|
||||
* another CCB after we have aborted all pending ones
|
||||
* A better approach is to mark the softc as dying
|
||||
* under lock and check for this in targstart().
|
||||
*/
|
||||
TARG_UNLOCK(softc);
|
||||
|| !TAILQ_EMPTY(&softc->abort_queue))
|
||||
notify_user(softc);
|
||||
TARG_LOCK(softc);
|
||||
}
|
||||
}
|
||||
|
||||
/* Notify the user that data is ready */
|
||||
@ -1120,7 +1104,7 @@ notify_user(struct targ_softc *softc)
|
||||
* blocking read().
|
||||
*/
|
||||
selwakeuppri(&softc->read_select, PRIBIO);
|
||||
KNOTE(&softc->read_select.si_note, 0);
|
||||
KNOTE_LOCKED(&softc->read_select.si_note, 0);
|
||||
wakeup(&softc->user_ccb_queue);
|
||||
}
|
||||
|
||||
|
@ -265,7 +265,7 @@ miibus_linkchg(dev)
|
||||
if (ifp->if_link_state != link_state) {
|
||||
ifp->if_link_state = link_state;
|
||||
rt_ifmsg(ifp);
|
||||
KNOTE(&ifp->if_klist, link);
|
||||
KNOTE_UNLOCKED(&ifp->if_klist, link);
|
||||
if (ifp->if_nvlans != 0)
|
||||
(*vlan_link_state_p)(ifp, link);
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ fifo_kqfilter(ap)
|
||||
ap->a_kn->kn_hook = (caddr_t)so;
|
||||
|
||||
SOCKBUF_LOCK(sb);
|
||||
SLIST_INSERT_HEAD(&sb->sb_sel.si_note, ap->a_kn, kn_selnext);
|
||||
knlist_add(&sb->sb_sel.si_note, ap->a_kn, 1);
|
||||
sb->sb_flags |= SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
|
||||
@ -445,8 +445,8 @@ filt_fifordetach(struct knote *kn)
|
||||
struct socket *so = (struct socket *)kn->kn_hook;
|
||||
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
|
||||
if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
|
||||
knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
|
||||
if (knlist_empty(&so->so_rcv.sb_sel.si_note))
|
||||
so->so_rcv.sb_flags &= ~SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(&so->so_rcv);
|
||||
}
|
||||
@ -479,8 +479,8 @@ filt_fifowdetach(struct knote *kn)
|
||||
struct socket *so = (struct socket *)kn->kn_hook;
|
||||
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
|
||||
if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
|
||||
knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
|
||||
if (knlist_empty(&so->so_snd.sb_sel.si_note))
|
||||
so->so_snd.sb_flags &= ~SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
}
|
||||
|
@ -570,7 +570,7 @@ ext2_setattr(ap)
|
||||
return (EROFS);
|
||||
error = ext2_chmod(vp, (int)vap->va_mode, cred, td);
|
||||
}
|
||||
VN_KNOTE(vp, NOTE_ATTRIB);
|
||||
VN_KNOTE_UNLOCKED(vp, NOTE_ATTRIB);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1894,9 +1894,9 @@ ext2_kqfilter(ap)
|
||||
|
||||
if (vp->v_pollinfo == NULL)
|
||||
v_addpollinfo(vp);
|
||||
mtx_lock(&vp->v_pollinfo->vpi_lock);
|
||||
SLIST_INSERT_HEAD(&vp->v_pollinfo->vpi_selinfo.si_note, kn, kn_selnext);
|
||||
mtx_unlock(&vp->v_pollinfo->vpi_lock);
|
||||
if (vp->v_pollinfo == NULL)
|
||||
return ENOMEM;
|
||||
knlist_add(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -1907,10 +1907,7 @@ filt_ext2detach(struct knote *kn)
|
||||
struct vnode *vp = (struct vnode *)kn->kn_hook;
|
||||
|
||||
KASSERT(vp->v_pollinfo != NULL, ("Mising v_pollinfo"));
|
||||
mtx_lock(&vp->v_pollinfo->vpi_lock);
|
||||
SLIST_REMOVE(&vp->v_pollinfo->vpi_selinfo.si_note,
|
||||
kn, knote, kn_selnext);
|
||||
mtx_unlock(&vp->v_pollinfo->vpi_lock);
|
||||
knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
|
||||
}
|
||||
|
||||
/*ARGSUSED*/
|
||||
|
@ -570,7 +570,7 @@ ext2_setattr(ap)
|
||||
return (EROFS);
|
||||
error = ext2_chmod(vp, (int)vap->va_mode, cred, td);
|
||||
}
|
||||
VN_KNOTE(vp, NOTE_ATTRIB);
|
||||
VN_KNOTE_UNLOCKED(vp, NOTE_ATTRIB);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1894,9 +1894,9 @@ ext2_kqfilter(ap)
|
||||
|
||||
if (vp->v_pollinfo == NULL)
|
||||
v_addpollinfo(vp);
|
||||
mtx_lock(&vp->v_pollinfo->vpi_lock);
|
||||
SLIST_INSERT_HEAD(&vp->v_pollinfo->vpi_selinfo.si_note, kn, kn_selnext);
|
||||
mtx_unlock(&vp->v_pollinfo->vpi_lock);
|
||||
if (vp->v_pollinfo == NULL)
|
||||
return ENOMEM;
|
||||
knlist_add(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -1907,10 +1907,7 @@ filt_ext2detach(struct knote *kn)
|
||||
struct vnode *vp = (struct vnode *)kn->kn_hook;
|
||||
|
||||
KASSERT(vp->v_pollinfo != NULL, ("Mising v_pollinfo"));
|
||||
mtx_lock(&vp->v_pollinfo->vpi_lock);
|
||||
SLIST_REMOVE(&vp->v_pollinfo->vpi_selinfo.si_note,
|
||||
kn, knote, kn_selnext);
|
||||
mtx_unlock(&vp->v_pollinfo->vpi_lock);
|
||||
knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
|
||||
}
|
||||
|
||||
/*ARGSUSED*/
|
||||
|
@ -381,6 +381,7 @@ proc0_init(void *dummy __unused)
|
||||
p->p_flag = P_SYSTEM;
|
||||
p->p_sflag = PS_INMEM;
|
||||
p->p_state = PRS_NORMAL;
|
||||
knlist_init(&p->p_klist, &p->p_mtx);
|
||||
p->p_nice = NZERO;
|
||||
td->td_state = TDS_RUNNING;
|
||||
kg->kg_pri_class = PRI_TIMESHARE;
|
||||
|
@ -198,13 +198,7 @@ static struct cdevsw dead_cdevsw = {
|
||||
#define no_write (d_write_t *)enodev
|
||||
#define no_ioctl (d_ioctl_t *)enodev
|
||||
#define no_mmap (d_mmap_t *)enodev
|
||||
|
||||
static int
|
||||
no_kqfilter(struct cdev *dev __unused, struct knote *kn __unused)
|
||||
{
|
||||
|
||||
return (1);
|
||||
}
|
||||
#define no_kqfilter (d_kqfilter_t *)enodev
|
||||
|
||||
static void
|
||||
no_strategy(struct bio *bp)
|
||||
|
@ -985,12 +985,12 @@ close(td, uap)
|
||||
/*
|
||||
* we now hold the fp reference that used to be owned by the descriptor
|
||||
* array.
|
||||
* We have to unlock the FILEDESC *AFTER* knote_fdclose to prevent a
|
||||
* race of the fd getting opened, a knote added, and deleteing a knote
|
||||
* for the new fd.
|
||||
*/
|
||||
if (fd < fdp->fd_knlistsize) {
|
||||
FILEDESC_UNLOCK(fdp);
|
||||
knote_fdclose(td, fd);
|
||||
} else
|
||||
FILEDESC_UNLOCK(fdp);
|
||||
knote_fdclose(td, fd);
|
||||
FILEDESC_UNLOCK(fdp);
|
||||
|
||||
error = closef(fp, td);
|
||||
mtx_unlock(&Giant);
|
||||
@ -1424,7 +1424,6 @@ fdinit(fdp)
|
||||
newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
|
||||
newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
|
||||
newfdp->fd_fd.fd_nfiles = NDFILE;
|
||||
newfdp->fd_fd.fd_knlistsize = -1;
|
||||
newfdp->fd_fd.fd_map = newfdp->fd_dmap;
|
||||
return (&newfdp->fd_fd);
|
||||
}
|
||||
@ -1624,10 +1623,6 @@ fdfree(td)
|
||||
vrele(fdp->fd_rdir);
|
||||
if (fdp->fd_jdir)
|
||||
vrele(fdp->fd_jdir);
|
||||
if (fdp->fd_knlist)
|
||||
FREE(fdp->fd_knlist, M_KQUEUE);
|
||||
if (fdp->fd_knhash)
|
||||
FREE(fdp->fd_knhash, M_KQUEUE);
|
||||
mtx_destroy(&fdp->fd_mtx);
|
||||
FREE(fdp, M_FILEDESC);
|
||||
}
|
||||
@ -1681,11 +1676,7 @@ setugidsafety(td)
|
||||
if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
|
||||
struct file *fp;
|
||||
|
||||
if (i < fdp->fd_knlistsize) {
|
||||
FILEDESC_UNLOCK(fdp);
|
||||
knote_fdclose(td, i);
|
||||
FILEDESC_LOCK(fdp);
|
||||
}
|
||||
knote_fdclose(td, i);
|
||||
/*
|
||||
* NULL-out descriptor prior to close to avoid
|
||||
* a race while close blocks.
|
||||
@ -1728,11 +1719,7 @@ fdcloseexec(td)
|
||||
(fdp->fd_ofileflags[i] & UF_EXCLOSE)) {
|
||||
struct file *fp;
|
||||
|
||||
if (i < fdp->fd_knlistsize) {
|
||||
FILEDESC_UNLOCK(fdp);
|
||||
knote_fdclose(td, i);
|
||||
FILEDESC_LOCK(fdp);
|
||||
}
|
||||
knote_fdclose(td, i);
|
||||
/*
|
||||
* NULL-out descriptor prior to close to avoid
|
||||
* a race while close blocks.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -623,7 +623,7 @@ interpret:
|
||||
* Notify others that we exec'd, and clear the P_INEXEC flag
|
||||
* as we're now a bona fide freshly-execed process.
|
||||
*/
|
||||
KNOTE(&p->p_klist, NOTE_EXEC);
|
||||
KNOTE_LOCKED(&p->p_klist, NOTE_EXEC);
|
||||
p->p_flag &= ~P_INEXEC;
|
||||
|
||||
/*
|
||||
|
@ -438,18 +438,17 @@ retry:
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
ruadd(p->p_ru, &p->p_stats->p_cru);
|
||||
|
||||
mtx_unlock(&Giant);
|
||||
/*
|
||||
* Notify interested parties of our demise.
|
||||
*/
|
||||
KNOTE(&p->p_klist, NOTE_EXIT);
|
||||
mtx_unlock(&Giant);
|
||||
KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);
|
||||
/*
|
||||
* Just delete all entries in the p_klist. At this point we won't
|
||||
* report any more events, and there are nasty race conditions that
|
||||
* can beat us if we don't.
|
||||
*/
|
||||
while (SLIST_FIRST(&p->p_klist))
|
||||
SLIST_REMOVE_HEAD(&p->p_klist, kn_selnext);
|
||||
knlist_clear(&p->p_klist, 1);
|
||||
|
||||
/*
|
||||
* Notify parent that we're gone. If parent has the PS_NOCLDWAIT
|
||||
@ -532,6 +531,12 @@ retry:
|
||||
cnt.v_swtch++;
|
||||
sched_exit(p->p_pptr, td);
|
||||
|
||||
/*
|
||||
* hopefully no one will try to deliver a signal to the process this
|
||||
* late in the game.
|
||||
*/
|
||||
knlist_destroy(&p->p_klist);
|
||||
|
||||
/*
|
||||
* Make sure the scheduler takes this thread out of its tables etc.
|
||||
* This will also release this thread's reference to the ucred.
|
||||
|
@ -287,6 +287,7 @@ fork1(td, flags, pages, procp)
|
||||
#ifdef MAC
|
||||
mac_init_proc(newproc);
|
||||
#endif
|
||||
knlist_init(&newproc->p_klist, &newproc->p_mtx);
|
||||
|
||||
/* We have to lock the process tree while we look for a pid. */
|
||||
sx_slock(&proctree_lock);
|
||||
@ -722,7 +723,7 @@ again:
|
||||
/*
|
||||
* Tell any interested parties about the new process.
|
||||
*/
|
||||
KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
|
||||
KNOTE_LOCKED(&p1->p_klist, NOTE_FORK | p2->p_pid);
|
||||
|
||||
PROC_UNLOCK(p1);
|
||||
|
||||
|
@ -1674,7 +1674,7 @@ do_tdsignal(struct thread *td, int sig, sigtarget_t target)
|
||||
ps = p->p_sigacts;
|
||||
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
|
||||
KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig);
|
||||
|
||||
prop = sigprop(sig);
|
||||
|
||||
@ -2720,9 +2720,7 @@ filt_sigattach(struct knote *kn)
|
||||
kn->kn_ptr.p_proc = p;
|
||||
kn->kn_flags |= EV_CLEAR; /* automatically set */
|
||||
|
||||
PROC_LOCK(p);
|
||||
SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
|
||||
PROC_UNLOCK(p);
|
||||
knlist_add(&p->p_klist, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -2732,9 +2730,7 @@ filt_sigdetach(struct knote *kn)
|
||||
{
|
||||
struct proc *p = kn->kn_ptr.p_proc;
|
||||
|
||||
PROC_LOCK(p);
|
||||
SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
|
||||
PROC_UNLOCK(p);
|
||||
knlist_remove(&p->p_klist, kn, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -504,7 +504,7 @@ pipeselwakeup(cpipe)
|
||||
}
|
||||
if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
|
||||
pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
|
||||
KNOTE(&cpipe->pipe_sel.si_note, 0);
|
||||
KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -524,6 +524,7 @@ pipe_create(pipe)
|
||||
error = pipespace_new(pipe, SMALL_PIPE_SIZE);
|
||||
else
|
||||
error = pipespace_new(pipe, PIPE_SIZE);
|
||||
knlist_init(&pipe->pipe_sel.si_note, PIPE_MTX(pipe));
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1424,7 +1425,7 @@ pipeclose(cpipe)
|
||||
|
||||
ppipe->pipe_state |= PIPE_EOF;
|
||||
wakeup(ppipe);
|
||||
KNOTE(&ppipe->pipe_sel.si_note, 0);
|
||||
KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1438,6 +1439,8 @@ pipeclose(cpipe)
|
||||
PIPE_LOCK(cpipe);
|
||||
cpipe->pipe_present = 0;
|
||||
pipeunlock(cpipe);
|
||||
knlist_clear(&cpipe->pipe_sel.si_note, 1);
|
||||
knlist_destroy(&cpipe->pipe_sel.si_note);
|
||||
|
||||
/*
|
||||
* If both endpoints are now closed, release the memory for the
|
||||
@ -1476,10 +1479,10 @@ pipe_kqfilter(struct file *fp, struct knote *kn)
|
||||
break;
|
||||
default:
|
||||
PIPE_UNLOCK(cpipe);
|
||||
return (1);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
|
||||
knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
|
||||
PIPE_UNLOCK(cpipe);
|
||||
return (0);
|
||||
}
|
||||
@ -1497,7 +1500,7 @@ filt_pipedetach(struct knote *kn)
|
||||
}
|
||||
cpipe = cpipe->pipe_peer;
|
||||
}
|
||||
SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
|
||||
knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
|
||||
PIPE_UNLOCK(cpipe);
|
||||
}
|
||||
|
||||
@ -1507,6 +1510,7 @@ filt_piperead(struct knote *kn, long hint)
|
||||
{
|
||||
struct pipe *rpipe = kn->kn_fp->f_data;
|
||||
struct pipe *wpipe = rpipe->pipe_peer;
|
||||
int ret;
|
||||
|
||||
PIPE_LOCK(rpipe);
|
||||
kn->kn_data = rpipe->pipe_buffer.cnt;
|
||||
@ -1519,8 +1523,9 @@ filt_piperead(struct knote *kn, long hint)
|
||||
PIPE_UNLOCK(rpipe);
|
||||
return (1);
|
||||
}
|
||||
ret = kn->kn_data > 0;
|
||||
PIPE_UNLOCK(rpipe);
|
||||
return (kn->kn_data > 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*ARGSUSED*/
|
||||
|
@ -281,6 +281,8 @@ tty_close(struct tty *tp)
|
||||
tp->t_pgrp = NULL;
|
||||
tp->t_session = NULL;
|
||||
tp->t_state = 0;
|
||||
knlist_clear(&tp->t_rsel.si_note, 0);
|
||||
knlist_clear(&tp->t_wsel.si_note, 0);
|
||||
ttyrel(tp);
|
||||
splx(s);
|
||||
return (0);
|
||||
@ -1259,7 +1261,7 @@ int
|
||||
ttykqfilter(struct cdev *dev, struct knote *kn)
|
||||
{
|
||||
struct tty *tp;
|
||||
struct klist *klist;
|
||||
struct knlist *klist;
|
||||
int s;
|
||||
|
||||
KASSERT(devsw(dev)->d_flags & D_TTY,
|
||||
@ -1277,13 +1279,13 @@ ttykqfilter(struct cdev *dev, struct knote *kn)
|
||||
kn->kn_fop = &ttywrite_filtops;
|
||||
break;
|
||||
default:
|
||||
return (1);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
kn->kn_hook = (caddr_t)dev;
|
||||
|
||||
s = spltty();
|
||||
SLIST_INSERT_HEAD(klist, kn, kn_selnext);
|
||||
knlist_add(klist, kn, 0);
|
||||
splx(s);
|
||||
|
||||
return (0);
|
||||
@ -1295,7 +1297,7 @@ filt_ttyrdetach(struct knote *kn)
|
||||
struct tty *tp = ((struct cdev *)kn->kn_hook)->si_tty;
|
||||
int s = spltty();
|
||||
|
||||
SLIST_REMOVE(&tp->t_rsel.si_note, kn, knote, kn_selnext);
|
||||
knlist_remove(&tp->t_rsel.si_note, kn, 0);
|
||||
splx(s);
|
||||
}
|
||||
|
||||
@ -1318,7 +1320,7 @@ filt_ttywdetach(struct knote *kn)
|
||||
struct tty *tp = ((struct cdev *)kn->kn_hook)->si_tty;
|
||||
int s = spltty();
|
||||
|
||||
SLIST_REMOVE(&tp->t_wsel.si_note, kn, knote, kn_selnext);
|
||||
knlist_remove(&tp->t_wsel.si_note, kn, 0);
|
||||
splx(s);
|
||||
}
|
||||
|
||||
@ -2365,7 +2367,7 @@ ttwakeup(struct tty *tp)
|
||||
if (ISSET(tp->t_state, TS_ASYNC) && tp->t_sigio != NULL)
|
||||
pgsigio(&tp->t_sigio, SIGIO, (tp->t_session != NULL));
|
||||
wakeup(TSA_HUP_OR_INPUT(tp));
|
||||
KNOTE(&tp->t_rsel.si_note, 0);
|
||||
KNOTE_UNLOCKED(&tp->t_rsel.si_note, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2389,7 +2391,7 @@ ttwwakeup(struct tty *tp)
|
||||
CLR(tp->t_state, TS_SO_OLOWAT);
|
||||
wakeup(TSA_OLOWAT(tp));
|
||||
}
|
||||
KNOTE(&tp->t_wsel.si_note, 0);
|
||||
KNOTE_UNLOCKED(&tp->t_wsel.si_note, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2754,6 +2756,8 @@ ttyrel(struct tty *tp)
|
||||
TAILQ_REMOVE(&tty_list, tp, t_list);
|
||||
mtx_unlock(&tp->t_mtx);
|
||||
mtx_unlock(&tty_list_mutex);
|
||||
knlist_destroy(&tp->t_rsel.si_note);
|
||||
knlist_destroy(&tp->t_wsel.si_note);
|
||||
mtx_destroy(&tp->t_mtx);
|
||||
free(tp, M_TTYS);
|
||||
return (i);
|
||||
@ -2789,6 +2793,8 @@ ttymalloc(struct tty *tp)
|
||||
mtx_lock(&tty_list_mutex);
|
||||
TAILQ_INSERT_TAIL(&tty_list, tp, t_list);
|
||||
mtx_unlock(&tty_list_mutex);
|
||||
knlist_init(&tp->t_rsel.si_note, &tp->t_mtx);
|
||||
knlist_init(&tp->t_wsel.si_note, &tp->t_mtx);
|
||||
return (tp);
|
||||
}
|
||||
|
||||
|
@ -528,11 +528,11 @@ cnkqfilter(struct cdev *dev, struct knote *kn)
|
||||
|
||||
cnd = STAILQ_FIRST(&cn_devlist);
|
||||
if (cn_mute || CND_INVALID(cnd, curthread))
|
||||
return (1);
|
||||
return (EINVAL);
|
||||
dev = cnd->cnd_vp->v_rdev;
|
||||
if (dev != NULL)
|
||||
return ((*devsw(dev)->d_kqfilter)(dev, kn));
|
||||
return (1);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -242,6 +242,8 @@ sonewconn(head, connstatus)
|
||||
mac_create_socket_from_socket(head, so);
|
||||
SOCK_UNLOCK(head);
|
||||
#endif
|
||||
knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
|
||||
knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
|
||||
if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
|
||||
(*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
|
||||
sodealloc(so);
|
||||
@ -403,7 +405,7 @@ sowakeup(so, sb)
|
||||
sb->sb_flags &= ~SB_WAIT;
|
||||
wakeup(&sb->sb_cc);
|
||||
}
|
||||
KNOTE(&sb->sb_sel.si_note, 0);
|
||||
KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
|
||||
pgsigio(&so->so_sigio, SIGIO, 0);
|
||||
|
@ -221,6 +221,8 @@ socreate(dom, aso, type, proto, cred, td)
|
||||
mac_create_socket(cred, so);
|
||||
#endif
|
||||
SOCK_LOCK(so);
|
||||
knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
|
||||
knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
|
||||
soref(so);
|
||||
SOCK_UNLOCK(so);
|
||||
error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
|
||||
@ -378,6 +380,8 @@ sofree(so)
|
||||
sbrelease_locked(&so->so_snd, so);
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
sorflush(so);
|
||||
knlist_destroy(&so->so_rcv.sb_sel.si_note);
|
||||
knlist_destroy(&so->so_snd.sb_sel.si_note);
|
||||
sodealloc(so);
|
||||
}
|
||||
|
||||
@ -2141,11 +2145,11 @@ soo_kqfilter(struct file *fp, struct knote *kn)
|
||||
sb = &so->so_snd;
|
||||
break;
|
||||
default:
|
||||
return (1);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
SOCKBUF_LOCK(sb);
|
||||
SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
|
||||
knlist_add(&sb->sb_sel.si_note, kn, 1);
|
||||
sb->sb_flags |= SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
return (0);
|
||||
@ -2157,8 +2161,8 @@ filt_sordetach(struct knote *kn)
|
||||
struct socket *so = kn->kn_fp->f_data;
|
||||
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
|
||||
if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
|
||||
knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
|
||||
if (knlist_empty(&so->so_rcv.sb_sel.si_note))
|
||||
so->so_rcv.sb_flags &= ~SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(&so->so_rcv);
|
||||
}
|
||||
@ -2200,8 +2204,8 @@ filt_sowdetach(struct knote *kn)
|
||||
struct socket *so = kn->kn_fp->f_data;
|
||||
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
|
||||
if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
|
||||
knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
|
||||
if (knlist_empty(&so->so_snd.sb_sel.si_note))
|
||||
so->so_snd.sb_flags &= ~SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
}
|
||||
|
@ -242,6 +242,8 @@ sonewconn(head, connstatus)
|
||||
mac_create_socket_from_socket(head, so);
|
||||
SOCK_UNLOCK(head);
|
||||
#endif
|
||||
knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
|
||||
knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
|
||||
if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
|
||||
(*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
|
||||
sodealloc(so);
|
||||
@ -403,7 +405,7 @@ sowakeup(so, sb)
|
||||
sb->sb_flags &= ~SB_WAIT;
|
||||
wakeup(&sb->sb_cc);
|
||||
}
|
||||
KNOTE(&sb->sb_sel.si_note, 0);
|
||||
KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
|
||||
pgsigio(&so->so_sigio, SIGIO, 0);
|
||||
|
@ -334,7 +334,7 @@ accept1(td, uap, compat)
|
||||
td->td_retval[0] = fd;
|
||||
|
||||
/* connection has been removed from the listen queue */
|
||||
KNOTE(&head->so_rcv.sb_sel.si_note, 0);
|
||||
KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
|
||||
|
||||
pgid = fgetown(&head->so_sigio);
|
||||
if (pgid != 0)
|
||||
|
@ -182,7 +182,7 @@ struct aiocblist {
|
||||
struct file *fd_file; /* Pointer to file structure */
|
||||
struct aio_liojob *lio; /* Optional lio job */
|
||||
struct aiocb *uuaiocb; /* Pointer in userspace of aiocb */
|
||||
struct klist klist; /* list of knotes */
|
||||
struct knlist klist; /* list of knotes */
|
||||
struct aiocb uaiocb; /* Kernel I/O control block */
|
||||
};
|
||||
|
||||
@ -368,6 +368,7 @@ aio_onceonly(void)
|
||||
static int
|
||||
aio_unload(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
/*
|
||||
* XXX: no unloads by default, it's too dangerous.
|
||||
@ -377,11 +378,14 @@ aio_unload(void)
|
||||
if (!unloadable)
|
||||
return (EOPNOTSUPP);
|
||||
|
||||
error = kqueue_del_filteropts(EVFILT_AIO);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
async_io_version = 0;
|
||||
aio_swake = NULL;
|
||||
EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
|
||||
EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
|
||||
kqueue_del_filteropts(EVFILT_AIO);
|
||||
p31b_setcfg(CTL_P1003_1B_AIO_LISTIO_MAX, -1);
|
||||
p31b_setcfg(CTL_P1003_1B_AIO_MAX, -1);
|
||||
p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, -1);
|
||||
@ -482,7 +486,7 @@ aio_free_entry(struct aiocblist *aiocbe)
|
||||
* OWNING thread? (or maybe the running thread?)
|
||||
* There is a semantic problem here...
|
||||
*/
|
||||
knote_remove(FIRST_THREAD_IN_PROC(p), &aiocbe->klist); /* XXXKSE */
|
||||
knlist_clear(&aiocbe->klist, 0); /* XXXKSE */
|
||||
|
||||
if ((ki->kaio_flags & KAIO_WAKEUP) || ((ki->kaio_flags & KAIO_RUNDOWN)
|
||||
&& ((ki->kaio_buffer_count == 0) && (ki->kaio_queue_count == 0)))) {
|
||||
@ -933,7 +937,7 @@ aio_daemon(void *uproc)
|
||||
TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist);
|
||||
TAILQ_INSERT_TAIL(&ki->kaio_jobdone, aiocbe, plist);
|
||||
splx(s);
|
||||
KNOTE(&aiocbe->klist, 0);
|
||||
KNOTE_UNLOCKED(&aiocbe->klist, 0);
|
||||
|
||||
if (aiocbe->jobflags & AIOCBLIST_RUNDOWN) {
|
||||
wakeup(aiocbe);
|
||||
@ -1171,7 +1175,7 @@ aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
|
||||
}
|
||||
splx(s);
|
||||
if (notify)
|
||||
KNOTE(&aiocbe->klist, 0);
|
||||
KNOTE_UNLOCKED(&aiocbe->klist, 0);
|
||||
return (0);
|
||||
|
||||
doerror:
|
||||
@ -1296,7 +1300,8 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
|
||||
aiocbe->inputcharge = 0;
|
||||
aiocbe->outputcharge = 0;
|
||||
callout_handle_init(&aiocbe->timeouthandle);
|
||||
SLIST_INIT(&aiocbe->klist);
|
||||
/* XXX - need a lock */
|
||||
knlist_init(&aiocbe->klist, NULL);
|
||||
|
||||
suword(&job->_aiocb_private.status, -1);
|
||||
suword(&job->_aiocb_private.error, 0);
|
||||
@ -1415,7 +1420,7 @@ _aio_aqueue(struct thread *td, struct aiocb *job, struct aio_liojob *lj, int typ
|
||||
kev.filter = EVFILT_AIO;
|
||||
kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1;
|
||||
kev.data = (intptr_t)aiocbe;
|
||||
error = kqueue_register(kq, &kev, td);
|
||||
error = kqueue_register(kq, &kev, td, 1);
|
||||
aqueue_fail:
|
||||
if (error) {
|
||||
fdrop(fp, td);
|
||||
@ -2187,7 +2192,7 @@ aio_physwakeup(struct buf *bp)
|
||||
TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist);
|
||||
TAILQ_INSERT_TAIL(&ki->kaio_bufdone, aiocbe, plist);
|
||||
|
||||
KNOTE(&aiocbe->klist, 0);
|
||||
KNOTE_UNLOCKED(&aiocbe->klist, 0);
|
||||
/* Do the wakeup. */
|
||||
if (ki->kaio_flags & (KAIO_RUNDOWN|KAIO_WAKEUP)) {
|
||||
ki->kaio_flags &= ~KAIO_WAKEUP;
|
||||
@ -2289,7 +2294,7 @@ filt_aioattach(struct knote *kn)
|
||||
return (EPERM);
|
||||
kn->kn_flags &= ~EV_FLAG1;
|
||||
|
||||
SLIST_INSERT_HEAD(&aiocbe->klist, kn, kn_selnext);
|
||||
knlist_add(&aiocbe->klist, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -2300,7 +2305,7 @@ filt_aiodetach(struct knote *kn)
|
||||
{
|
||||
struct aiocblist *aiocbe = (struct aiocblist *)kn->kn_sdata;
|
||||
|
||||
SLIST_REMOVE(&aiocbe->klist, kn, knote, kn_selnext);
|
||||
knlist_remove(&aiocbe->klist, kn, 0);
|
||||
}
|
||||
|
||||
/* kqueue filter function */
|
||||
|
@ -849,6 +849,7 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_cachedid = -1;
|
||||
VI_UNLOCK(vp);
|
||||
if (pollinfo != NULL) {
|
||||
knlist_destroy(&pollinfo->vpi_selinfo.si_note);
|
||||
mtx_destroy(&pollinfo->vpi_lock);
|
||||
uma_zfree(vnodepoll_zone, pollinfo);
|
||||
}
|
||||
@ -3256,6 +3257,8 @@ v_addpollinfo(struct vnode *vp)
|
||||
}
|
||||
vp->v_pollinfo = vi;
|
||||
mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
|
||||
knlist_init(&vp->v_pollinfo->vpi_selinfo.si_note,
|
||||
&vp->v_pollinfo->vpi_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3341,7 +3344,7 @@ vn_pollgone(vp)
|
||||
{
|
||||
|
||||
mtx_lock(&vp->v_pollinfo->vpi_lock);
|
||||
VN_KNOTE(vp, NOTE_REVOKE);
|
||||
VN_KNOTE_LOCKED(vp, NOTE_REVOKE);
|
||||
if (vp->v_pollinfo->vpi_events) {
|
||||
vp->v_pollinfo->vpi_events = 0;
|
||||
selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
|
||||
@ -3981,13 +3984,21 @@ vop_unlock_post(void *ap, int rc)
|
||||
}
|
||||
#endif /* DEBUG_VFS_LOCKS */
|
||||
|
||||
static struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist);
|
||||
static struct knlist fs_knlist;
|
||||
|
||||
static void
|
||||
vfs_event_init(void *arg)
|
||||
{
|
||||
knlist_init(&fs_knlist, NULL);
|
||||
}
|
||||
/* XXX - correct order? */
|
||||
SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
|
||||
|
||||
void
|
||||
vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
|
||||
{
|
||||
|
||||
KNOTE(&fs_klist, event);
|
||||
KNOTE_UNLOCKED(&fs_knlist, event);
|
||||
}
|
||||
|
||||
static int filt_fsattach(struct knote *kn);
|
||||
@ -4002,7 +4013,7 @@ filt_fsattach(struct knote *kn)
|
||||
{
|
||||
|
||||
kn->kn_flags |= EV_CLEAR;
|
||||
SLIST_INSERT_HEAD(&fs_klist, kn, kn_selnext);
|
||||
knlist_add(&fs_knlist, kn, 0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -4010,7 +4021,7 @@ static void
|
||||
filt_fsdetach(struct knote *kn)
|
||||
{
|
||||
|
||||
SLIST_REMOVE(&fs_klist, kn, knote, kn_selnext);
|
||||
knlist_remove(&fs_knlist, kn, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1108,10 +1108,13 @@ vfs_write_resume(mp)
|
||||
static int
|
||||
vn_kqfilter(struct file *fp, struct knote *kn)
|
||||
{
|
||||
int error;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
mtx_lock(&Giant);
|
||||
error = VOP_KQFILTER(fp->f_vnode, kn);
|
||||
mtx_unlock(&Giant);
|
||||
|
||||
return (VOP_KQFILTER(fp->f_vnode, kn));
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -353,6 +353,7 @@ bpfopen(dev, flags, fmt, td)
|
||||
#endif
|
||||
mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
|
||||
callout_init(&d->bd_callout, debug_mpsafenet ? CALLOUT_MPSAFE : 0);
|
||||
knlist_init(&d->bd_sel.si_note, &d->bd_mtx);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -384,6 +385,7 @@ bpfclose(dev, flags, fmt, td)
|
||||
#ifdef MAC
|
||||
mac_destroy_bpfdesc(d);
|
||||
#endif /* MAC */
|
||||
knlist_destroy(&d->bd_sel.si_note);
|
||||
bpf_freed(d);
|
||||
dev->si_drv1 = 0;
|
||||
free(d, M_BPF);
|
||||
@ -525,7 +527,7 @@ bpf_wakeup(d)
|
||||
pgsigio(&d->bd_sigio, d->bd_sig, 0);
|
||||
|
||||
selwakeuppri(&d->bd_sel, PRINET);
|
||||
KNOTE(&d->bd_sel.si_note, 0);
|
||||
KNOTE_LOCKED(&d->bd_sel.si_note, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1089,9 +1091,7 @@ bpfkqfilter(dev, kn)
|
||||
|
||||
kn->kn_fop = &bpfread_filtops;
|
||||
kn->kn_hook = d;
|
||||
BPFD_LOCK(d);
|
||||
SLIST_INSERT_HEAD(&d->bd_sel.si_note, kn, kn_selnext);
|
||||
BPFD_UNLOCK(d);
|
||||
knlist_add(&d->bd_sel.si_note, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -1102,9 +1102,7 @@ filt_bpfdetach(kn)
|
||||
{
|
||||
struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
|
||||
|
||||
BPFD_LOCK(d);
|
||||
SLIST_REMOVE(&d->bd_sel.si_note, kn, knote, kn_selnext);
|
||||
BPFD_UNLOCK(d);
|
||||
knlist_remove(&d->bd_sel.si_note, kn, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
|
42
sys/net/if.c
42
sys/net/if.c
@ -109,7 +109,7 @@ struct ifnethead ifnet; /* depend on static init XXX */
|
||||
struct mtx ifnet_lock;
|
||||
|
||||
static int if_indexlim = 8;
|
||||
static struct klist ifklist;
|
||||
static struct knlist ifklist;
|
||||
|
||||
static void filt_netdetach(struct knote *kn);
|
||||
static int filt_netdev(struct knote *kn, long hint);
|
||||
@ -185,10 +185,18 @@ netioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td
|
||||
static int
|
||||
netkqfilter(struct cdev *dev, struct knote *kn)
|
||||
{
|
||||
struct klist *klist;
|
||||
struct knlist *klist;
|
||||
struct ifnet *ifp;
|
||||
int idx;
|
||||
|
||||
switch (kn->kn_filter) {
|
||||
case EVFILT_NETDEV:
|
||||
kn->kn_fop = &netdev_filtops;
|
||||
break;
|
||||
default:
|
||||
return (1);
|
||||
}
|
||||
|
||||
idx = minor(dev);
|
||||
if (idx == 0) {
|
||||
klist = &ifklist;
|
||||
@ -199,18 +207,9 @@ netkqfilter(struct cdev *dev, struct knote *kn)
|
||||
klist = &ifp->if_klist;
|
||||
}
|
||||
|
||||
switch (kn->kn_filter) {
|
||||
case EVFILT_NETDEV:
|
||||
kn->kn_fop = &netdev_filtops;
|
||||
break;
|
||||
default:
|
||||
return (1);
|
||||
}
|
||||
|
||||
kn->kn_hook = (caddr_t)klist;
|
||||
|
||||
/* XXX locking? */
|
||||
SLIST_INSERT_HEAD(klist, kn, kn_selnext);
|
||||
knlist_add(klist, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -218,27 +217,30 @@ netkqfilter(struct cdev *dev, struct knote *kn)
|
||||
static void
|
||||
filt_netdetach(struct knote *kn)
|
||||
{
|
||||
struct klist *klist = (struct klist *)kn->kn_hook;
|
||||
struct knlist *klist = (struct knlist *)kn->kn_hook;
|
||||
|
||||
if (kn->kn_status & KN_DETACHED)
|
||||
return;
|
||||
SLIST_REMOVE(klist, kn, knote, kn_selnext);
|
||||
|
||||
knlist_remove(klist, kn, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
filt_netdev(struct knote *kn, long hint)
|
||||
{
|
||||
struct knlist *klist = (struct knlist *)kn->kn_hook;
|
||||
|
||||
/*
|
||||
* Currently NOTE_EXIT is abused to indicate device detach.
|
||||
*/
|
||||
if (hint == NOTE_EXIT) {
|
||||
kn->kn_data = NOTE_LINKINV;
|
||||
kn->kn_status |= KN_DETACHED;
|
||||
kn->kn_flags |= (EV_EOF | EV_ONESHOT);
|
||||
knlist_remove_inevent(klist, kn);
|
||||
return (1);
|
||||
}
|
||||
kn->kn_data = hint; /* current status */
|
||||
if (hint != 0)
|
||||
kn->kn_data = hint; /* current status */
|
||||
if (kn->kn_sfflags & hint)
|
||||
kn->kn_fflags |= hint;
|
||||
return (kn->kn_fflags != 0);
|
||||
@ -257,7 +259,7 @@ if_init(void *dummy __unused)
|
||||
|
||||
IFNET_LOCK_INIT();
|
||||
TAILQ_INIT(&ifnet);
|
||||
SLIST_INIT(&ifklist);
|
||||
knlist_init(&ifklist, NULL);
|
||||
if_grow(); /* create initial table */
|
||||
ifdev_byindex(0) = make_dev(&net_cdevsw, 0,
|
||||
UID_ROOT, GID_WHEEL, 0600, "network");
|
||||
@ -383,7 +385,7 @@ if_attach(struct ifnet *ifp)
|
||||
TAILQ_INIT(&ifp->if_addrhead);
|
||||
TAILQ_INIT(&ifp->if_prefixhead);
|
||||
TAILQ_INIT(&ifp->if_multiaddrs);
|
||||
SLIST_INIT(&ifp->if_klist);
|
||||
knlist_init(&ifp->if_klist, NULL);
|
||||
getmicrotime(&ifp->if_lastchange);
|
||||
|
||||
#ifdef MAC
|
||||
@ -620,7 +622,9 @@ if_detach(struct ifnet *ifp)
|
||||
#ifdef MAC
|
||||
mac_destroy_ifnet(ifp);
|
||||
#endif /* MAC */
|
||||
KNOTE(&ifp->if_klist, NOTE_EXIT);
|
||||
KNOTE_UNLOCKED(&ifp->if_klist, NOTE_EXIT);
|
||||
knlist_clear(&ifp->if_klist, 0);
|
||||
knlist_destroy(&ifp->if_klist);
|
||||
IFNET_WLOCK();
|
||||
found = 0;
|
||||
TAILQ_FOREACH(iter, &ifnet, if_link)
|
||||
|
@ -146,7 +146,7 @@ struct ifnet {
|
||||
* However, access to the AF_LINK address through this
|
||||
* field is deprecated. Use ifaddr_byindex() instead.
|
||||
*/
|
||||
struct klist if_klist; /* events attached to this if */
|
||||
struct knlist if_klist; /* events attached to this if */
|
||||
int if_pcount; /* number of promiscuous listeners */
|
||||
void *if_carp; /* carp (tbd) interface pointer */
|
||||
struct bpf_if *if_bpf; /* packet filter structure */
|
||||
|
@ -821,7 +821,7 @@ vlan_link_state(struct ifnet *ifp, int link)
|
||||
if (ifv->ifv_p == ifp) {
|
||||
ifv->ifv_if.if_link_state = ifv->ifv_p->if_link_state;
|
||||
rt_ifmsg(&(ifv->ifv_if));
|
||||
KNOTE(&ifp->if_klist, link);
|
||||
KNOTE_UNLOCKED(&ifp->if_klist, link);
|
||||
}
|
||||
}
|
||||
VLAN_UNLOCK();
|
||||
|
@ -118,9 +118,16 @@ struct kevent {
|
||||
* This is currently visible to userland to work around broken
|
||||
* programs which pull in <sys/proc.h>.
|
||||
*/
|
||||
#include <sys/queue.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/_lock.h>
|
||||
#include <sys/_mutex.h>
|
||||
struct knote;
|
||||
SLIST_HEAD(klist, knote);
|
||||
struct knlist {
|
||||
struct mtx *kl_lock; /* lock to protect kll_list */
|
||||
struct klist kl_list;
|
||||
};
|
||||
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
@ -128,8 +135,14 @@ SLIST_HEAD(klist, knote);
|
||||
MALLOC_DECLARE(M_KQUEUE);
|
||||
#endif
|
||||
|
||||
#define KNOTE(list, hint) \
|
||||
do { if ((list) != NULL) knote(list, hint); } while (0)
|
||||
struct kqueue;
|
||||
SLIST_HEAD(kqlist, kqueue);
|
||||
|
||||
#define KNOTE(list, hist, lock) knote(list, hist, lock)
|
||||
#define KNOTE_LOCKED(list, hint) knote(list, hint, 1)
|
||||
#define KNOTE_UNLOCKED(list, hint) knote(list, hint, 0)
|
||||
#define KNOTE_STATUS_BEGIN(kn) knote_status(kn, 1)
|
||||
#define KNOTE_STATUS_END(kn) knote_status(kn, 0)
|
||||
|
||||
/*
|
||||
* Flag indicating hint is a signal. Used by EVFILT_SIGNAL, and also
|
||||
@ -144,13 +157,28 @@ struct filterops {
|
||||
int (*f_event)(struct knote *kn, long hint);
|
||||
};
|
||||
|
||||
/*
|
||||
* Setting the KN_INFLUX flag enables you to unlock the kq that this knote
|
||||
* is on, and modify kn_status as if you had the KQ lock.
|
||||
*
|
||||
* kn_sfflags, kn_sdata, and kn_kevent are protected by the knlist lock.
|
||||
*/
|
||||
struct knote {
|
||||
SLIST_ENTRY(knote) kn_link; /* for fd */
|
||||
SLIST_ENTRY(knote) kn_link; /* for kq */
|
||||
SLIST_ENTRY(knote) kn_selnext; /* for struct selinfo */
|
||||
struct knlist *kn_knlist; /* f_attach populated */
|
||||
TAILQ_ENTRY(knote) kn_tqe;
|
||||
struct kqueue *kn_kq; /* which queue we are on */
|
||||
struct kevent kn_kevent;
|
||||
int kn_status;
|
||||
int kn_status; /* protected by kq lock */
|
||||
#define KN_ACTIVE 0x01 /* event has been triggered */
|
||||
#define KN_QUEUED 0x02 /* event is on queue */
|
||||
#define KN_DISABLED 0x04 /* event is disabled */
|
||||
#define KN_DETACHED 0x08 /* knote is detached */
|
||||
#define KN_INFLUX 0x10 /* knote is in flux */
|
||||
#define KN_MARKER 0x20 /* ignore this knote */
|
||||
#define KN_KQUEUE 0x40 /* this knote belongs to a kq */
|
||||
#define KN_HASKQLOCK 0x80 /* for _inevent */
|
||||
int kn_sfflags; /* saved filter flags */
|
||||
intptr_t kn_sdata; /* saved data field */
|
||||
union {
|
||||
@ -159,10 +187,6 @@ struct knote {
|
||||
} kn_ptr;
|
||||
struct filterops *kn_fop;
|
||||
void *kn_hook;
|
||||
#define KN_ACTIVE 0x01 /* event has been triggered */
|
||||
#define KN_QUEUED 0x02 /* event is on queue */
|
||||
#define KN_DISABLED 0x04 /* event is disabled */
|
||||
#define KN_DETACHED 0x08 /* knote is detached */
|
||||
|
||||
#define kn_id kn_kevent.ident
|
||||
#define kn_filter kn_kevent.filter
|
||||
@ -174,12 +198,20 @@ struct knote {
|
||||
|
||||
struct thread;
|
||||
struct proc;
|
||||
struct knlist;
|
||||
|
||||
extern void knote(struct klist *list, long hint);
|
||||
extern void knote_remove(struct thread *p, struct klist *list);
|
||||
extern void knote(struct knlist *list, long hint, int islocked);
|
||||
extern void knote_status(struct knote *kn, int begin);
|
||||
extern void knlist_add(struct knlist *knl, struct knote *kn, int islocked);
|
||||
extern void knlist_remove(struct knlist *knl, struct knote *kn, int islocked);
|
||||
extern void knlist_remove_inevent(struct knlist *knl, struct knote *kn);
|
||||
extern int knlist_empty(struct knlist *knl);
|
||||
extern void knlist_init(struct knlist *knl, struct mtx *mtx);
|
||||
extern void knlist_destroy(struct knlist *knl);
|
||||
extern void knlist_clear(struct knlist *knl, int islocked);
|
||||
extern void knote_fdclose(struct thread *p, int fd);
|
||||
extern int kqueue_register(struct kqueue *kq,
|
||||
struct kevent *kev, struct thread *p);
|
||||
struct kevent *kev, struct thread *p, int waitok);
|
||||
extern int kqueue_add_filteropts(int filt, struct filterops *filtops);
|
||||
extern int kqueue_del_filteropts(int filt);
|
||||
|
||||
|
@ -29,11 +29,20 @@
|
||||
#ifndef _SYS_EVENTVAR_H_
|
||||
#define _SYS_EVENTVAR_H_
|
||||
|
||||
#ifndef _KERNEL
|
||||
#error "no user-servicable parts inside"
|
||||
#endif
|
||||
|
||||
#include <sys/_task.h>
|
||||
|
||||
#define KQ_NEVENTS 8 /* minimize copy{in,out} calls */
|
||||
#define KQEXTENT 256 /* linear growth by this amount */
|
||||
|
||||
struct kqueue {
|
||||
TAILQ_HEAD(kqlist, knote) kq_head; /* list of pending event */
|
||||
struct mtx kq_lock;
|
||||
int kq_refcnt;
|
||||
SLIST_ENTRY(kqueue) kq_list;
|
||||
TAILQ_HEAD(, knote) kq_head; /* list of pending event */
|
||||
int kq_count; /* number of pending events */
|
||||
struct selinfo kq_sel;
|
||||
struct sigio *kq_sigio;
|
||||
@ -41,8 +50,16 @@ struct kqueue {
|
||||
int kq_state;
|
||||
#define KQ_SEL 0x01
|
||||
#define KQ_SLEEP 0x02
|
||||
#define KQ_ASYNC 0x04
|
||||
struct kevent kq_kev[KQ_NEVENTS];
|
||||
#define KQ_FLUXWAIT 0x04 /* waiting for a in flux kn */
|
||||
#define KQ_ASYNC 0x08
|
||||
#define KQ_CLOSING 0x10
|
||||
#define KQ_TASKSCHED 0x20 /* task scheduled */
|
||||
#define KQ_TASKDRAIN 0x40 /* waiting for task to drain */
|
||||
int kq_knlistsize; /* size of knlist */
|
||||
struct klist *kq_knlist; /* list of knotes */
|
||||
u_long kq_knhashmask; /* size of knhash */
|
||||
struct klist *kq_knhash; /* hash table for knotes */
|
||||
struct task kq_task;
|
||||
};
|
||||
|
||||
#endif /* !_SYS_EVENTVAR_H_ */
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define _SYS_FILEDESC_H_
|
||||
|
||||
#include <sys/queue.h>
|
||||
#include <sys/event.h>
|
||||
#include <sys/_lock.h>
|
||||
#include <sys/_mutex.h>
|
||||
|
||||
@ -71,11 +72,8 @@ struct filedesc {
|
||||
u_short fd_cmask; /* mask for file creation */
|
||||
u_short fd_refcnt; /* reference count */
|
||||
|
||||
int fd_knlistsize; /* size of knlist */
|
||||
struct klist *fd_knlist; /* list of attached knotes */
|
||||
u_long fd_knhashmask; /* size of knhash */
|
||||
struct klist *fd_knhash; /* hash table for attached knotes */
|
||||
struct mtx fd_mtx; /* protects members of this struct */
|
||||
struct kqlist fd_kqlist; /* list of kqueues on this filedesc */
|
||||
int fd_holdleaderscount; /* block fdfree() for shared close() */
|
||||
int fd_holdleaderswakeup; /* fdfree() needs wakeup */
|
||||
};
|
||||
|
@ -608,7 +608,6 @@ struct proc {
|
||||
struct vnode *p_textvp; /* (b) Vnode of executable. */
|
||||
sigset_t p_siglist; /* (c) Sigs not delivered to a td. */
|
||||
char p_lock; /* (c) Proclock (prevent swap) count. */
|
||||
struct klist p_klist; /* (c) Knotes attached to this proc. */
|
||||
struct sigiolst p_sigiolst; /* (c) List of sigio sources. */
|
||||
int p_sigparent; /* (c) Signal to parent on exit. */
|
||||
int p_sig; /* (n) For core dump/debugger XXX. */
|
||||
@ -638,6 +637,7 @@ struct proc {
|
||||
#define p_endcopy p_xstat
|
||||
|
||||
u_short p_xstat; /* (c) Exit status; also stop sig. */
|
||||
struct knlist p_klist; /* (c) Knotes attached to this proc. */
|
||||
int p_numthreads; /* (j) Number of threads. */
|
||||
int p_numksegrps; /* (c) number of ksegrps */
|
||||
struct mdproc p_md; /* Any machine-dependent fields. */
|
||||
|
@ -42,7 +42,7 @@
|
||||
struct selinfo {
|
||||
TAILQ_ENTRY(selinfo) si_thrlist; /* list hung off of thread */
|
||||
struct thread *si_thread; /* thread waiting */
|
||||
struct klist si_note; /* kernel note list */
|
||||
struct knlist si_note; /* kernel note list */
|
||||
short si_flags; /* see below */
|
||||
};
|
||||
#define SI_COLL 0x0001 /* collision occurred */
|
||||
|
@ -201,11 +201,13 @@ struct xvnode {
|
||||
vn_pollevent((vp), (events)); \
|
||||
} while (0)
|
||||
|
||||
#define VN_KNOTE(vp, b) \
|
||||
#define VN_KNOTE(vp, b, a) \
|
||||
do { \
|
||||
if ((vp)->v_pollinfo != NULL) \
|
||||
KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b)); \
|
||||
KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b), (a)); \
|
||||
} while (0)
|
||||
#define VN_KNOTE_LOCKED(vp, b) VN_KNOTE(vp, b, 1)
|
||||
#define VN_KNOTE_UNLOCKED(vp, b) VN_KNOTE(vp, b, 0)
|
||||
|
||||
/*
|
||||
* Vnode flags.
|
||||
|
@ -725,7 +725,7 @@ ffs_write(ap)
|
||||
DIP_SET(ip, i_mode, ip->i_mode);
|
||||
}
|
||||
if (resid > uio->uio_resid)
|
||||
VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
|
||||
VN_KNOTE_UNLOCKED(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
|
||||
if (error) {
|
||||
if (ioflag & IO_UNIT) {
|
||||
(void)UFS_TRUNCATE(vp, osize,
|
||||
|
@ -397,7 +397,7 @@ ufs_setacl(ap)
|
||||
ip->i_flag |= IN_CHANGE;
|
||||
}
|
||||
|
||||
VN_KNOTE(ap->a_vp, NOTE_ATTRIB);
|
||||
VN_KNOTE_UNLOCKED(ap->a_vp, NOTE_ATTRIB);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ ufs_create(ap)
|
||||
ap->a_dvp, ap->a_vpp, ap->a_cnp);
|
||||
if (error)
|
||||
return (error);
|
||||
VN_KNOTE(ap->a_dvp, NOTE_WRITE);
|
||||
VN_KNOTE_UNLOCKED(ap->a_dvp, NOTE_WRITE);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -225,7 +225,7 @@ ufs_mknod(ap)
|
||||
ap->a_dvp, vpp, ap->a_cnp);
|
||||
if (error)
|
||||
return (error);
|
||||
VN_KNOTE(ap->a_dvp, NOTE_WRITE);
|
||||
VN_KNOTE_UNLOCKED(ap->a_dvp, NOTE_WRITE);
|
||||
ip = VTOI(*vpp);
|
||||
ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
|
||||
if (vap->va_rdev != VNOVAL) {
|
||||
@ -615,7 +615,7 @@ ufs_setattr(ap)
|
||||
return (EPERM);
|
||||
error = ufs_chmod(vp, (int)vap->va_mode, cred, td);
|
||||
}
|
||||
VN_KNOTE(vp, NOTE_ATTRIB);
|
||||
VN_KNOTE_UNLOCKED(vp, NOTE_ATTRIB);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -799,8 +799,8 @@ ufs_remove(ap)
|
||||
error = ufs_dirremove(dvp, ip, ap->a_cnp->cn_flags, 0);
|
||||
if (ip->i_nlink <= 0)
|
||||
vp->v_vflag |= VV_NOSYNC;
|
||||
VN_KNOTE(vp, NOTE_DELETE);
|
||||
VN_KNOTE(dvp, NOTE_WRITE);
|
||||
VN_KNOTE_UNLOCKED(vp, NOTE_DELETE);
|
||||
VN_KNOTE_UNLOCKED(dvp, NOTE_WRITE);
|
||||
out:
|
||||
return (error);
|
||||
}
|
||||
@ -861,8 +861,8 @@ ufs_link(ap)
|
||||
softdep_change_linkcnt(ip);
|
||||
}
|
||||
out:
|
||||
VN_KNOTE(vp, NOTE_LINK);
|
||||
VN_KNOTE(tdvp, NOTE_WRITE);
|
||||
VN_KNOTE_UNLOCKED(vp, NOTE_LINK);
|
||||
VN_KNOTE_UNLOCKED(tdvp, NOTE_WRITE);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1037,7 +1037,7 @@ abortit:
|
||||
oldparent = dp->i_number;
|
||||
doingdirectory = 1;
|
||||
}
|
||||
VN_KNOTE(fdvp, NOTE_WRITE); /* XXX right place? */
|
||||
VN_KNOTE_UNLOCKED(fdvp, NOTE_WRITE); /* XXX right place? */
|
||||
vrele(fdvp);
|
||||
|
||||
/*
|
||||
@ -1146,7 +1146,7 @@ abortit:
|
||||
}
|
||||
goto bad;
|
||||
}
|
||||
VN_KNOTE(tdvp, NOTE_WRITE);
|
||||
VN_KNOTE_UNLOCKED(tdvp, NOTE_WRITE);
|
||||
vput(tdvp);
|
||||
} else {
|
||||
if (xp->i_dev != dp->i_dev || xp->i_dev != ip->i_dev)
|
||||
@ -1230,9 +1230,9 @@ abortit:
|
||||
tcnp->cn_cred, tcnp->cn_thread)) != 0)
|
||||
goto bad;
|
||||
}
|
||||
VN_KNOTE(tdvp, NOTE_WRITE);
|
||||
VN_KNOTE_UNLOCKED(tdvp, NOTE_WRITE);
|
||||
vput(tdvp);
|
||||
VN_KNOTE(tvp, NOTE_DELETE);
|
||||
VN_KNOTE_UNLOCKED(tvp, NOTE_DELETE);
|
||||
vput(tvp);
|
||||
xp = NULL;
|
||||
}
|
||||
@ -1302,7 +1302,7 @@ abortit:
|
||||
error = ufs_dirremove(fdvp, xp, fcnp->cn_flags, 0);
|
||||
xp->i_flag &= ~IN_RENAME;
|
||||
}
|
||||
VN_KNOTE(fvp, NOTE_RENAME);
|
||||
VN_KNOTE_UNLOCKED(fvp, NOTE_RENAME);
|
||||
if (dp)
|
||||
vput(fdvp);
|
||||
if (xp)
|
||||
@ -1620,7 +1620,7 @@ ufs_mkdir(ap)
|
||||
|
||||
bad:
|
||||
if (error == 0) {
|
||||
VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
|
||||
VN_KNOTE_UNLOCKED(dvp, NOTE_WRITE | NOTE_LINK);
|
||||
*ap->a_vpp = tvp;
|
||||
} else {
|
||||
dp->i_effnlink--;
|
||||
@ -1713,7 +1713,7 @@ ufs_rmdir(ap)
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
|
||||
VN_KNOTE_UNLOCKED(dvp, NOTE_WRITE | NOTE_LINK);
|
||||
cache_purge(dvp);
|
||||
/*
|
||||
* Truncate inode. The only stuff left in the directory is "." and
|
||||
@ -1742,7 +1742,7 @@ ufs_rmdir(ap)
|
||||
ufsdirhash_free(ip);
|
||||
#endif
|
||||
out:
|
||||
VN_KNOTE(vp, NOTE_DELETE);
|
||||
VN_KNOTE_UNLOCKED(vp, NOTE_DELETE);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1767,7 +1767,7 @@ ufs_symlink(ap)
|
||||
vpp, ap->a_cnp);
|
||||
if (error)
|
||||
return (error);
|
||||
VN_KNOTE(ap->a_dvp, NOTE_WRITE);
|
||||
VN_KNOTE_UNLOCKED(ap->a_dvp, NOTE_WRITE);
|
||||
vp = *vpp;
|
||||
len = strlen(ap->a_target);
|
||||
if (len < vp->v_mount->mnt_maxsymlinklen) {
|
||||
@ -2620,9 +2620,9 @@ ufs_kqfilter(ap)
|
||||
|
||||
if (vp->v_pollinfo == NULL)
|
||||
v_addpollinfo(vp);
|
||||
mtx_lock(&vp->v_pollinfo->vpi_lock);
|
||||
SLIST_INSERT_HEAD(&vp->v_pollinfo->vpi_selinfo.si_note, kn, kn_selnext);
|
||||
mtx_unlock(&vp->v_pollinfo->vpi_lock);
|
||||
if (vp->v_pollinfo == NULL)
|
||||
return ENOMEM;
|
||||
knlist_add(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -2633,10 +2633,7 @@ filt_ufsdetach(struct knote *kn)
|
||||
struct vnode *vp = (struct vnode *)kn->kn_hook;
|
||||
|
||||
KASSERT(vp->v_pollinfo != NULL, ("Mising v_pollinfo"));
|
||||
mtx_lock(&vp->v_pollinfo->vpi_lock);
|
||||
SLIST_REMOVE(&vp->v_pollinfo->vpi_selinfo.si_note,
|
||||
kn, knote, kn_selnext);
|
||||
mtx_unlock(&vp->v_pollinfo->vpi_lock);
|
||||
knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
|
||||
}
|
||||
|
||||
/*ARGSUSED*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user