Correct two problems relating to sorflush(), which is called to flush

read socket buffers in shutdown() and close():

- Call socantrcvmore() before sblock() to dislodge any threads that
  might be sleeping (potentially indefinitely) while holding sblock(),
  such as a thread blocked in recv().

- Flag the sblock() call as non-interruptible so that a signal
  delivered to the thread calling sorflush() doesn't cause sblock() to
  fail.  The sblock() is required to ensure that all other socket
  consumer threads have, in fact, left, and do not enter, the socket
  buffer until we're done flushin it.

To implement the latter, change the 'flags' argument to sblock() to
accept two flags, SBL_WAIT and SBL_NOINTR, rather than one M_WAITOK
flag.  When SBL_NOINTR is set, it forces a non-interruptible sx
acquisition, regardless of the setting of the disposition of SB_NOINTR
on the socket buffer; without this change it would be possible for
another thread to clear SB_NOINTR between when the socket buffer mutex
is released and sblock() is invoked.

Reviewed by:	bz, kmacy
Reported by:	Jos Backus <jos at catnook dot com>
This commit is contained in:
Robert Watson 2008-01-31 08:22:24 +00:00
parent 5f1f828a63
commit 265de5bb62
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=175845
7 changed files with 36 additions and 13 deletions

View File

@ -137,8 +137,12 @@ int
sblock(struct sockbuf *sb, int flags)
{
if (flags == M_WAITOK) {
if (sb->sb_flags & SB_NOINTR) {
KASSERT((flags & SBL_VALID) == flags,
("sblock: flags invalid (0x%x)", flags));
if (flags & SBL_WAIT) {
if ((sb->sb_flags & SB_NOINTR) ||
(flags & SBL_NOINTR)) {
sx_xlock(&sb->sb_sx);
return (0);
}

View File

@ -916,7 +916,7 @@ sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
}
#endif /*ZERO_COPY_SOCKETS*/
#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
int
sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
@ -1884,10 +1884,16 @@ sorflush(struct socket *so)
* however, we have to initialize and destroy the mutex in the copy
* so that dom_dispose() and sbrelease() can lock t as needed.
*/
(void) sblock(sb, M_WAITOK);
SOCKBUF_LOCK(sb);
sb->sb_flags |= SB_NOINTR;
socantrcvmore_locked(so);
/*
* Dislodge threads currently blocked in receive and wait to acquire
* a lock against other simultaneous readers before clearing the
* socket buffer. Don't let our acquire be interrupted by a signal
* despite any existing socket disposition on interruptable waiting.
*/
socantrcvmore(so);
(void) sblock(sb, SBL_WAIT | SBL_NOINTR);
/*
* Invalidate/clear most of the sockbuf structure, but leave selinfo
* and mutex data unchanged.

View File

@ -1863,8 +1863,13 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
}
}
/* Protect against multiple writers to the socket. */
(void) sblock(&so->so_snd, M_WAITOK);
/*
* Protect against multiple writers to the socket.
*
* XXXRW: Historically this has assumed non-interruptibility, so now
* we implement that, but possibly shouldn't.
*/
(void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
/*
* Loop through the pages of the file, starting with the requested

View File

@ -2509,7 +2509,8 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
atomic_add_int(&(*stcb)->asoc.refcnt, 1);
SCTP_TCB_UNLOCK((*stcb));
sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
0);
SCTP_TCB_LOCK((*stcb));
atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);

View File

@ -134,7 +134,7 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
atomic_add_int(&stcb->asoc.refcnt, 1);
SCTP_TCB_UNLOCK(stcb);
sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
return (0);
@ -230,7 +230,7 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
* And now the final hack. We move data in the pending side i.e.
* head to the new socket buffer. Let the GRUBBING begin :-0
*/
sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
atomic_subtract_int(&stcb->asoc.refcnt, 1);
return (newso);
}

View File

@ -4993,7 +4993,7 @@ sctp_sorecvmsg(struct socket *so,
sctp_misc_ints(SCTP_SORECV_ENTERPL,
rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
}
error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
sockbuf_lock = 1;
if (error) {
goto release_unlocked;

View File

@ -272,6 +272,13 @@ struct xsocket {
* Macros for sockets and socket buffering.
*/
/*
* Flags to sblock().
*/
#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */
#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */
#define SBL_VALID (SBL_WAIT | SBL_NOINTR)
/*
* Do we need to notify the other side when I/O is possible?
*/