Reimplement socket buffer tear-down in sofree(): as the socket is no

longer referenced by other threads (hence our freeing it), we don't need
to set the can't send and can't receive flags, wake up the consumers,
perform two levels of locking, etc.  Implement a fast-path teardown,
sbdestroy(), which flushes and releases each socket buffer.  A manual
dom_dispose of the receive buffer is still required explicitly to GC
any in-flight file descriptors, etc, before flushing the buffer.

This results in a 9% UP performance improvement and 16% SMP performance
improvement on a tight loop of socket();close(); in micro-benchmarking,
but will likely also affect CPU-bound macro-benchmark performance.
This commit is contained in:
Robert Watson 2006-08-01 10:30:26 +00:00
parent 225ade520e
commit eaa6dfbcc2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=160875
3 changed files with 76 additions and 27 deletions

View File

@ -64,6 +64,10 @@ static u_long sb_max_adj =
static u_long sb_efficiency = 8; /* parameter for sbreserve() */
static void sbdrop_internal(register struct sockbuf *sb, register int len);
static void sbflush_internal(register struct sockbuf *sb);
static void sbrelease_internal(struct sockbuf *sb, struct socket *so);
/*
* Socantsendmore indicates that no more data will be sent on the
* socket; it would normally be applied to a socket when the user
@ -331,6 +335,18 @@ sbreserve(sb, cc, so, td)
/*
* Free mbufs held by a socket, and reserved mbuf space.
*/
static void
sbrelease_internal(sb, so)
struct sockbuf *sb;
struct socket *so;
{
sbflush_internal(sb);
(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
RLIM_INFINITY);
sb->sb_mbmax = 0;
}
void
sbrelease_locked(sb, so)
struct sockbuf *sb;
@ -339,10 +355,7 @@ sbrelease_locked(sb, so)
SOCKBUF_LOCK_ASSERT(sb);
sbflush_locked(sb);
(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
RLIM_INFINITY);
sb->sb_mbmax = 0;
sbrelease_internal(sb, so);
}
void
@ -355,6 +368,17 @@ sbrelease(sb, so)
sbrelease_locked(sb, so);
SOCKBUF_UNLOCK(sb);
}
void
sbdestroy(sb, so)
struct sockbuf *sb;
struct socket *so;
{
sbrelease_internal(sb, so);
}
/*
* Routines to add and remove
* data from an mbuf queue.
@ -823,13 +847,11 @@ sbcompress(sb, m, n)
* Free all mbufs in a sockbuf.
* Check that all resources are reclaimed.
*/
void
sbflush_locked(sb)
static void
sbflush_internal(sb)
register struct sockbuf *sb;
{
SOCKBUF_LOCK_ASSERT(sb);
if (sb->sb_flags & SB_LOCK)
panic("sbflush_locked: locked");
while (sb->sb_mbcnt) {
@ -839,12 +861,21 @@ sbflush_locked(sb)
*/
if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
break;
sbdrop_locked(sb, (int)sb->sb_cc);
sbdrop_internal(sb, (int)sb->sb_cc);
}
if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
panic("sbflush_locked: cc %u || mb %p || mbcnt %u", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
}
void
sbflush_locked(sb)
register struct sockbuf *sb;
{
SOCKBUF_LOCK_ASSERT(sb);
sbflush_internal(sb);
}
void
sbflush(sb)
register struct sockbuf *sb;
@ -858,16 +889,14 @@ sbflush(sb)
/*
* Drop data from (the front of) a sockbuf.
*/
void
sbdrop_locked(sb, len)
static void
sbdrop_internal(sb, len)
register struct sockbuf *sb;
register int len;
{
register struct mbuf *m;
struct mbuf *next;
SOCKBUF_LOCK_ASSERT(sb);
next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
while (len > 0) {
if (m == 0) {
@ -915,6 +944,17 @@ sbdrop_locked(sb, len)
/*
* Drop data from (the front of) a sockbuf.
*/
void
sbdrop_locked(sb, len)
register struct sockbuf *sb;
register int len;
{
SOCKBUF_LOCK_ASSERT(sb);
sbdrop_internal(sb, len);
}
void
sbdrop(sb, len)
register struct sockbuf *sb;

View File

@ -555,6 +555,7 @@ void
sofree(so)
struct socket *so;
{
struct protosw *pr = so->so_proto;
struct socket *head;
ACCEPT_LOCK_ASSERT();
@ -588,24 +589,31 @@ sofree(so)
SOCK_UNLOCK(so);
ACCEPT_UNLOCK();
SOCKBUF_LOCK(&so->so_snd);
so->so_snd.sb_flags |= SB_NOINTR;
(void)sblock(&so->so_snd, M_WAITOK);
/*
* socantsendmore_locked() drops the socket buffer mutex so that it
* can safely perform wakeups. Re-acquire the mutex before
* continuing.
* From this point on, we assume that no other references to this
* socket exist anywhere else in the stack. Therefore, no locks need
* to be acquired or held.
*
* We used to do a lot of socket buffer and socket locking here, as
* well as invoke sorflush() and perform wakeups. The direct call to
* dom_dispose() and sbrelease_internal() are an inlining of what was
* necessary from sorflush().
*
* Notice that the socket buffer and kqueue state are torn down
* before calling pru_detach. This means that protocols shold not
* assume they can perform socket wakeups, etc, in their detach
* code.
*/
socantsendmore_locked(so);
SOCKBUF_LOCK(&so->so_snd);
sbunlock(&so->so_snd);
sbrelease_locked(&so->so_snd, so);
SOCKBUF_UNLOCK(&so->so_snd);
sorflush(so);
KASSERT((so->so_snd.sb_flags & SB_LOCK) == 0, ("sofree: snd sblock"));
KASSERT((so->so_rcv.sb_flags & SB_LOCK) == 0, ("sofree: rcv sblock"));
sbdestroy(&so->so_snd, so);
if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
(*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
sbdestroy(&so->so_rcv, so);
knlist_destroy(&so->so_rcv.sb_sel.si_note);
knlist_destroy(&so->so_snd.sb_sel.si_note);
if (so->so_proto->pr_usrreqs->pru_detach != NULL)
(*so->so_proto->pr_usrreqs->pru_detach)(so);
if (pr->pr_usrreqs->pru_detach != NULL)
(*pr->pr_usrreqs->pru_detach)(so);
sodealloc(so);
}

View File

@ -477,6 +477,7 @@ void sbcheck(struct sockbuf *sb);
void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n);
struct mbuf *
sbcreatecontrol(caddr_t p, int size, int type, int level);
void sbdestroy(struct sockbuf *sb, struct socket *so);
void sbdrop(struct sockbuf *sb, int len);
void sbdrop_locked(struct sockbuf *sb, int len);
void sbdroprecord(struct sockbuf *sb);