The swap pager is compatible with direct dispatch. It does its own

locking and doesn't sleep. Flag the consumer we create as such. In
addition, decrement the in flight index when we have an out of memory
error after having incremented it previously. This would have
prevented swapoff from working if the swap pager ever hit a resource
shortage trying to swap out something (the swap in path always waits
for a bio, so won't have this issue). Simplify the close logic by
abandoning the use of private and initializing the index to 1 and
dropping that reference when we previously set private.

Also, set sw_id only while sw_dev_mtx is held. This should only affect
swapping to a vnode, as opposed to a geom whose close always sets it to
NULL with sw_dev_mtx held.

Differential Review: https://reviews.freebsd.org/D3547
This commit is contained in:
Warner Losh 2015-09-08 17:47:56 +00:00
parent d5ad1d0d6d
commit 9e3e3fe5b3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=287567

View File

@ -2345,8 +2345,8 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
swap_pager_swapoff(sp);
sp->sw_close(curthread, sp);
sp->sw_id = NULL;
mtx_lock(&sw_dev_mtx);
sp->sw_id = NULL;
TAILQ_REMOVE(&swtailq, sp, sw_list);
nswapdev--;
if (nswapdev == 0) {
@ -2532,6 +2532,33 @@ swapgeom_close_ev(void *arg, int flags)
g_destroy_consumer(cp);
}
/*
* Add a reference to the g_consumer for an inflight transaction.
*/
static void
swapgeom_acquire(struct g_consumer *cp)
{
mtx_assert(&sw_dev_mtx, MA_OWNED);
cp->index++;
}
/*
* Remove a reference from the g_consumer. Post a close event if
* all referneces go away.
*/
static void
swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
{
mtx_assert(&sw_dev_mtx, MA_OWNED);
cp->index--;
if (cp->index == 0) {
if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
sp->sw_id = NULL;
}
}
static void
swapgeom_done(struct bio *bp2)
{
@ -2547,13 +2574,9 @@ swapgeom_done(struct bio *bp2)
bp->b_resid = bp->b_bcount - bp2->bio_completed;
bp->b_error = bp2->bio_error;
bufdone(bp);
sp = bp2->bio_caller1;
mtx_lock(&sw_dev_mtx);
if ((--cp->index) == 0 && cp->private) {
if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0) {
sp = bp2->bio_caller1;
sp->sw_id = NULL;
}
}
swapgeom_release(cp, sp);
mtx_unlock(&sw_dev_mtx);
g_destroy_bio(bp2);
}
@ -2573,13 +2596,16 @@ swapgeom_strategy(struct buf *bp, struct swdevt *sp)
bufdone(bp);
return;
}
cp->index++;
swapgeom_acquire(cp);
mtx_unlock(&sw_dev_mtx);
if (bp->b_iocmd == BIO_WRITE)
bio = g_new_bio();
else
bio = g_alloc_bio();
if (bio == NULL) {
mtx_lock(&sw_dev_mtx);
swapgeom_release(cp, sp);
mtx_unlock(&sw_dev_mtx);
bp->b_error = ENOMEM;
bp->b_ioflags |= BIO_ERROR;
bufdone(bp);
@ -2619,7 +2645,12 @@ swapgeom_orphan(struct g_consumer *cp)
break;
}
}
cp->private = (void *)(uintptr_t)1;
/*
* Drop reference we were created with. Do directly since we're in a
* special context where we don't have to queue the call to
* swapgeom_close_ev().
*/
cp->index--;
destroy = ((sp != NULL) && (cp->index == 0));
if (destroy)
sp->sw_id = NULL;
@ -2680,8 +2711,8 @@ swapongeom_ev(void *arg, int flags)
if (gp == NULL)
gp = g_new_geomf(&g_swap_class, "swap");
cp = g_new_consumer(gp);
cp->index = 0; /* Number of active I/Os. */
cp->private = NULL; /* Orphanization flag */
cp->index = 1; /* Number of active I/Os, plus one for being active. */
cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
g_attach(cp, pp);
/*
* XXX: Everytime you think you can improve the margin for