Merge some additional leaf node socket buffer locking from
rwatson_netperf: Introduce conditional locking of the socket buffer in fifofs kqueue filters; KNOTE() will be called holding the socket buffer locks in fifofs, but sometimes the kqueue() system call will poll using the same entry point without holding the socket buffer lock. Introduce conditional locking of the socket buffer in the socket kqueue filters; KNOTE() will be called holding the socket buffer locks in the socket code, but sometimes the kqueue() system call will poll using the same entry points without holding the socket buffer lock. Simplify the logic in sodisconnect() since we no longer need spls. NOTE: To remove conditional locking in the kqueue filters, it would make sense to use a separate kqueue API entry into the socket/fifo code when calling from the kqueue() system call.
This commit is contained in:
parent
da181cc144
commit
c012260726
@ -444,23 +444,33 @@ filt_fifordetach(struct knote *kn)
|
||||
{
|
||||
struct socket *so = (struct socket *)kn->kn_hook;
|
||||
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
|
||||
if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
|
||||
so->so_rcv.sb_flags &= ~SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(&so->so_rcv);
|
||||
}
|
||||
|
||||
static int
|
||||
filt_fiforead(struct knote *kn, long hint)
|
||||
{
|
||||
struct socket *so = (struct socket *)kn->kn_hook;
|
||||
int need_lock, result;
|
||||
|
||||
need_lock = !SOCKBUF_OWNED(&so->so_rcv);
|
||||
if (need_lock)
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
kn->kn_data = so->so_rcv.sb_cc;
|
||||
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
|
||||
kn->kn_flags |= EV_EOF;
|
||||
return (1);
|
||||
result = 1;
|
||||
} else {
|
||||
kn->kn_flags &= ~EV_EOF;
|
||||
result = (kn->kn_data > 0);
|
||||
}
|
||||
kn->kn_flags &= ~EV_EOF;
|
||||
return (kn->kn_data > 0);
|
||||
if (need_lock)
|
||||
SOCKBUF_UNLOCK(&so->so_rcv);
|
||||
return (result);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -468,23 +478,34 @@ filt_fifowdetach(struct knote *kn)
|
||||
{
|
||||
struct socket *so = (struct socket *)kn->kn_hook;
|
||||
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
|
||||
if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
|
||||
so->so_snd.sb_flags &= ~SB_KNOTE;
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
}
|
||||
|
||||
static int
|
||||
filt_fifowrite(struct knote *kn, long hint)
|
||||
{
|
||||
struct socket *so = (struct socket *)kn->kn_hook;
|
||||
int need_lock, result;
|
||||
|
||||
need_lock = !SOCKBUF_OWNED(&so->so_snd);
|
||||
if (need_lock)
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
kn->kn_data = sbspace(&so->so_snd);
|
||||
/* Unlocked read. */
|
||||
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
|
||||
kn->kn_flags |= EV_EOF;
|
||||
return (1);
|
||||
result = 1;
|
||||
} else {
|
||||
kn->kn_flags &= ~EV_EOF;
|
||||
result = (kn->kn_data >= so->so_snd.sb_lowat);
|
||||
}
|
||||
kn->kn_flags &= ~EV_EOF;
|
||||
return (kn->kn_data >= so->so_snd.sb_lowat);
|
||||
if (need_lock)
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
return (result);
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
|
@ -506,20 +506,13 @@ int
|
||||
sodisconnect(so)
|
||||
struct socket *so;
|
||||
{
|
||||
int s = splnet();
|
||||
int error;
|
||||
|
||||
if ((so->so_state & SS_ISCONNECTED) == 0) {
|
||||
error = ENOTCONN;
|
||||
goto bad;
|
||||
}
|
||||
if (so->so_state & SS_ISDISCONNECTING) {
|
||||
error = EALREADY;
|
||||
goto bad;
|
||||
}
|
||||
if ((so->so_state & SS_ISCONNECTED) == 0)
|
||||
return (ENOTCONN);
|
||||
if (so->so_state & SS_ISDISCONNECTING)
|
||||
return (EALREADY);
|
||||
error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
|
||||
bad:
|
||||
splx(s);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1913,8 +1906,16 @@ static int
|
||||
filt_soread(struct knote *kn, long hint)
|
||||
{
|
||||
struct socket *so = kn->kn_fp->f_data;
|
||||
int result;
|
||||
int need_lock, result;
|
||||
|
||||
/*
|
||||
* XXXRW: Conditional locking because filt_soread() can be called
|
||||
* either from KNOTE() in the socket context where the socket buffer
|
||||
* lock is already held, or from kqueue() itself.
|
||||
*/
|
||||
need_lock = !SOCKBUF_OWNED(&so->so_rcv);
|
||||
if (need_lock)
|
||||
SOCKBUF_LOCK(&so->so_rcv);
|
||||
kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
|
||||
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
|
||||
kn->kn_flags |= EV_EOF;
|
||||
@ -1926,6 +1927,8 @@ filt_soread(struct knote *kn, long hint)
|
||||
result = (kn->kn_data >= kn->kn_sdata);
|
||||
else
|
||||
result = (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
|
||||
if (need_lock)
|
||||
SOCKBUF_UNLOCK(&so->so_rcv);
|
||||
return (result);
|
||||
}
|
||||
|
||||
@ -1946,8 +1949,16 @@ static int
|
||||
filt_sowrite(struct knote *kn, long hint)
|
||||
{
|
||||
struct socket *so = kn->kn_fp->f_data;
|
||||
int result;
|
||||
int need_lock, result;
|
||||
|
||||
/*
|
||||
* XXXRW: Conditional locking because filt_soread() can be called
|
||||
* either from KNOTE() in the socket context where the socket buffer
|
||||
* lock is already held, or from kqueue() itself.
|
||||
*/
|
||||
need_lock = !SOCKBUF_OWNED(&so->so_snd);
|
||||
if (need_lock)
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
kn->kn_data = sbspace(&so->so_snd);
|
||||
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
|
||||
kn->kn_flags |= EV_EOF;
|
||||
@ -1962,6 +1973,8 @@ filt_sowrite(struct knote *kn, long hint)
|
||||
result = (kn->kn_data >= kn->kn_sdata);
|
||||
else
|
||||
result = (kn->kn_data >= so->so_snd.sb_lowat);
|
||||
if (need_lock)
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
return (result);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user