Change the nblock mutex, protecting the needsbuffer buffer deficit

flags, to rwlock.  Lock it in read mode when used from subroutines
called from buffer release code paths.

The needsbuffer is now updated using atomics, while read lock of
nblock prevents loosing the wakeups from bufspacewakeup() and
bufcountadd() in getnewbuf_bufd_help().

In several interesting loads, needsbuffer flags are never set, while
buffers are reused quickly.  This causes brelse() and bqrelse() from
different threads to content on the nblock.  Now they take nblock in
read mode, together with needsbuffer not needing an update, allowing
higher parallelism.

Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2014-06-09 03:38:03 +00:00
parent 4648ba0a0f
commit 7f82c6c17f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=267255

View File

@ -254,7 +254,7 @@ static struct mtx_padalign rbreqlock;
/*
* Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
*/
static struct mtx_padalign nblock;
static struct rwlock_padalign nblock;
/*
* Lock that protects bdirtywait.
@ -299,7 +299,7 @@ static int runningbufreq;
* Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(),
* getnewbuf(), and getblk().
*/
static int needsbuffer;
static volatile int needsbuffer;
/*
* Synchronization for bwillwrite() waiters.
@ -457,18 +457,27 @@ bdirtyadd(void)
static __inline void
bufspacewakeup(void)
{
int need_wakeup, on;
/*
* If someone is waiting for BUF space, wake them up. Even
* though we haven't freed the kva space yet, the waiting
* process will be able to now.
*/
mtx_lock(&nblock);
if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
wakeup(&needsbuffer);
rw_rlock(&nblock);
for (;;) {
need_wakeup = 0;
on = needsbuffer;
if ((on & VFS_BIO_NEED_BUFSPACE) == 0)
break;
need_wakeup = 1;
if (atomic_cmpset_rel_int(&needsbuffer, on,
on & ~VFS_BIO_NEED_BUFSPACE))
break;
}
mtx_unlock(&nblock);
if (need_wakeup)
wakeup((void *)&needsbuffer);
rw_runlock(&nblock);
}
/*
@ -528,7 +537,7 @@ runningbufwakeup(struct buf *bp)
static __inline void
bufcountadd(struct buf *bp)
{
int old;
int mask, need_wakeup, old, on;
KASSERT((bp->b_flags & B_INFREECNT) == 0,
("buf %p already counted as free", bp));
@ -536,14 +545,22 @@ bufcountadd(struct buf *bp)
old = atomic_fetchadd_int(&numfreebuffers, 1);
KASSERT(old >= 0 && old < nbuf,
("numfreebuffers climbed to %d", old + 1));
mtx_lock(&nblock);
if (needsbuffer) {
needsbuffer &= ~VFS_BIO_NEED_ANY;
if (numfreebuffers >= hifreebuffers)
needsbuffer &= ~VFS_BIO_NEED_FREE;
wakeup(&needsbuffer);
mask = VFS_BIO_NEED_ANY;
if (numfreebuffers >= hifreebuffers)
mask |= VFS_BIO_NEED_FREE;
rw_rlock(&nblock);
for (;;) {
need_wakeup = 0;
on = needsbuffer;
if (on == 0)
break;
need_wakeup = 1;
if (atomic_cmpset_rel_int(&needsbuffer, on, on & ~mask))
break;
}
mtx_unlock(&nblock);
if (need_wakeup)
wakeup((void *)&needsbuffer);
rw_runlock(&nblock);
}
/*
@ -787,7 +804,7 @@ bufinit(void)
mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF);
mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF);
mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
rw_init(&nblock, "needsbuffer lock");
mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
@ -2085,9 +2102,7 @@ getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo,
waitmsg = "newbuf";
flags = VFS_BIO_NEED_ANY;
}
mtx_lock(&nblock);
needsbuffer |= flags;
mtx_unlock(&nblock);
atomic_set_int(&needsbuffer, flags);
mtx_unlock(&bqclean);
bd_speedup(); /* heeeelp */
@ -2097,12 +2112,11 @@ getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo,
td = curthread;
cnt = 0;
wait = MNT_NOWAIT;
mtx_lock(&nblock);
while (needsbuffer & flags) {
rw_wlock(&nblock);
while ((needsbuffer & flags) != 0) {
if (vp != NULL && vp->v_type != VCHR &&
(td->td_pflags & TDP_BUFNEED) == 0) {
mtx_unlock(&nblock);
rw_wunlock(&nblock);
/*
* getblk() is called with a vnode locked, and
* some majority of the dirty buffers may as
@ -2124,15 +2138,16 @@ getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo,
atomic_add_long(&notbufdflushes, 1);
curthread_pflags_restore(norunbuf);
}
mtx_lock(&nblock);
rw_wlock(&nblock);
if ((needsbuffer & flags) == 0)
break;
}
if (msleep(&needsbuffer, &nblock, (PRIBIO + 4) | slpflag,
waitmsg, slptimeo))
error = rw_sleep((void *)&needsbuffer, &nblock, (PRIBIO + 4) |
slpflag, waitmsg, slptimeo);
if (error != 0)
break;
}
mtx_unlock(&nblock);
rw_wunlock(&nblock);
}
static void