Replace global swhash in swap pager with per-object trie to track swap

blocks assigned to the object pages.

- The global swhash_mtx is removed, trie is synchronized by the
  corresponding object lock.
- The swp_pager_meta_free_all() function used during object
  termination is optimized by only looking at the trie instead of
  having to search whole hash for the swap blocks owned by the object.
- On swap_pager_swapoff(), instead of iterating over the swhash,
  global object list have to be inspected. There, we have to ensure
  that we do see valid trie content if we see that the object type is
  swap.
Sizing of the swblk zone is same as for swblock zone, each swblk maps
SWAP_META_PAGES pages.

Proposed by:	alc
Reviewed by:	alc, markj (previous version)
Tested by:	alc, pho (previous version)
Sponsored by:	The FreeBSD Foundation
MFC after:	1 month
Differential revision:	https://reviews.freebsd.org/D11435
This commit is contained in:
Konstantin Belousov 2017-08-25 23:13:21 +00:00
parent 12fb14f36d
commit f425ab8e50
3 changed files with 291 additions and 294 deletions

View File

@ -87,6 +87,7 @@ __FBSDID("$FreeBSD$");
#include <sys/namei.h> #include <sys/namei.h>
#include <sys/vnode.h> #include <sys/vnode.h>
#include <sys/malloc.h> #include <sys/malloc.h>
#include <sys/pctrie.h>
#include <sys/racct.h> #include <sys/racct.h>
#include <sys/resource.h> #include <sys/resource.h>
#include <sys/resourcevar.h> #include <sys/resourcevar.h>
@ -127,22 +128,17 @@ __FBSDID("$FreeBSD$");
#define SWB_NPAGES MAX_PAGEOUT_CLUSTER #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
#endif #endif
/* #define SWAP_META_PAGES PCTRIE_COUNT
* The swblock structure maps an object and a small, fixed-size range
* of page indices to disk addresses within a swap area.
* The collection of these mappings is implemented as a hash table.
* Unused disk addresses within a swap area are allocated and managed
* using a blist.
*/
#define SWAP_META_PAGES 32
#define SWAP_META_MASK (SWAP_META_PAGES - 1)
struct swblock { /*
struct swblock *swb_hnext; * A swblk structure maps each page index within a
vm_object_t swb_object; * SWAP_META_PAGES-aligned and sized range to the address of an
vm_pindex_t swb_index; * on-disk swap block (or SWAPBLK_NONE). The collection of these
int swb_count; * mappings for an entire vm object is implemented as a pc-trie.
daddr_t swb_pages[SWAP_META_PAGES]; */
struct swblk {
vm_pindex_t p;
daddr_t d[SWAP_META_PAGES];
}; };
static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data"); static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
@ -328,10 +324,6 @@ SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I", CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
"Maximum running async swap ops"); "Maximum running async swap ops");
static struct swblock **swhash;
static int swhash_mask;
static struct mtx swhash_mtx;
static struct sx sw_alloc_sx; static struct sx sw_alloc_sx;
/* /*
@ -345,7 +337,8 @@ static struct sx sw_alloc_sx;
(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
static struct pagerlst swap_pager_object_list[NOBJLISTS]; static struct pagerlst swap_pager_object_list[NOBJLISTS];
static uma_zone_t swap_zone; static uma_zone_t swblk_zone;
static uma_zone_t swpctrie_zone;
/* /*
* pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
@ -403,12 +396,28 @@ static daddr_t swp_pager_getswapspace(int npages);
/* /*
* Metadata functions * Metadata functions
*/ */
static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index);
static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t); static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
static void swp_pager_meta_free_all(vm_object_t); static void swp_pager_meta_free_all(vm_object_t);
static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
static void *
swblk_trie_alloc(struct pctrie *ptree)
{
return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
M_USE_RESERVE : 0)));
}
static void
swblk_trie_free(struct pctrie *ptree, void *node)
{
uma_zfree(swpctrie_zone, node);
}
PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
/* /*
* SWP_SIZECHECK() - update swap_pager_full indication * SWP_SIZECHECK() - update swap_pager_full indication
* *
@ -436,33 +445,6 @@ swp_sizecheck(void)
} }
} }
/*
* SWP_PAGER_HASH() - hash swap meta data
*
* This is an helper function which hashes the swapblk given
* the object and page index. It returns a pointer to a pointer
* to the object, or a pointer to a NULL pointer if it could not
* find a swapblk.
*/
static struct swblock **
swp_pager_hash(vm_object_t object, vm_pindex_t index)
{
struct swblock **pswap;
struct swblock *swap;
index &= ~(vm_pindex_t)SWAP_META_MASK;
pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
while ((swap = *pswap) != NULL) {
if (swap->swb_object == object &&
swap->swb_index == index
) {
break;
}
pswap = &swap->swb_hnext;
}
return (pswap);
}
/* /*
* SWAP_PAGER_INIT() - initialize the swap pager! * SWAP_PAGER_INIT() - initialize the swap pager!
* *
@ -528,21 +510,25 @@ swap_pager_swap_init(void)
mtx_unlock(&pbuf_mtx); mtx_unlock(&pbuf_mtx);
/* /*
* Initialize our zone. Right now I'm just guessing on the number * Initialize our zone, guessing on the number we need based
* we need based on the number of pages in the system. Each swblock * on the number of pages in the system.
* can hold 32 pages, so this is probably overkill. This reservation
* is typically limited to around 32MB by default.
*/ */
n = vm_cnt.v_page_count / 2; n = vm_cnt.v_page_count / 2;
if (maxswzone && n > maxswzone / sizeof(struct swblock)) if (maxswzone && n > maxswzone / sizeof(struct swblk))
n = maxswzone / sizeof(struct swblock); n = maxswzone / sizeof(struct swblk);
swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
pctrie_zone_init, NULL, UMA_ALIGN_PTR,
UMA_ZONE_NOFREE | UMA_ZONE_VM);
if (swpctrie_zone == NULL)
panic("failed to create swap pctrie zone.");
swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
NULL, NULL, _Alignof(struct swblk) - 1,
UMA_ZONE_NOFREE | UMA_ZONE_VM);
if (swblk_zone == NULL)
panic("failed to create swap blk zone.");
n2 = n; n2 = n;
swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
if (swap_zone == NULL)
panic("failed to create swap_zone.");
do { do {
if (uma_zone_reserve_kva(swap_zone, n)) if (uma_zone_reserve_kva(swblk_zone, n))
break; break;
/* /*
* if the allocation failed, try a zone two thirds the * if the allocation failed, try a zone two thirds the
@ -551,24 +537,13 @@ swap_pager_swap_init(void)
n -= ((n + 2) / 3); n -= ((n + 2) / 3);
} while (n > 0); } while (n > 0);
if (n2 != n) if (n2 != n)
printf("Swap zone entries reduced from %lu to %lu.\n", n2, n); printf("Swap blk zone entries reduced from %lu to %lu.\n",
n2, n);
swap_maxpages = n * SWAP_META_PAGES; swap_maxpages = n * SWAP_META_PAGES;
swzone = n * sizeof(struct swblock); swzone = n * sizeof(struct swblk);
n2 = n; if (!uma_zone_reserve_kva(swpctrie_zone, n))
printf("Cannot reserve swap pctrie zone, "
/* "reduce kern.maxswzone.\n");
* Initialize our meta-data hash table. The swapper does not need to
* be quite as efficient as the VM system, so we do not use an
* oversized hash table.
*
* n: size of hash table, must be power of 2
* swhash_mask: hash table index mask
*/
for (n = 1; n < n2 / 8; n *= 2)
;
swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
swhash_mask = n - 1;
mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF);
} }
static vm_object_t static vm_object_t
@ -582,14 +557,20 @@ swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
return (NULL); return (NULL);
crhold(cred); crhold(cred);
} }
/*
* The un_pager.swp.swp_blks trie is initialized by
* vm_object_allocate() to ensure the correct order of
* visibility to other threads.
*/
object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset + object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
PAGE_MASK + size)); PAGE_MASK + size));
object->handle = handle; object->handle = handle;
if (cred != NULL) { if (cred != NULL) {
object->cred = cred; object->cred = cred;
object->charge = size; object->charge = size;
} }
object->un_pager.swp.swp_bcount = 0;
return (object); return (object);
} }
@ -1651,50 +1632,56 @@ swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
static void static void
swap_pager_swapoff(struct swdevt *sp) swap_pager_swapoff(struct swdevt *sp)
{ {
struct swblock *swap; struct swblk *sb;
vm_object_t locked_obj, object; vm_object_t object;
vm_pindex_t pindex; vm_pindex_t pi;
int i, j, retries; int i, retries;
sx_assert(&swdev_syscall_lock, SA_XLOCKED); sx_assert(&swdev_syscall_lock, SA_XLOCKED);
retries = 0; retries = 0;
locked_obj = NULL;
full_rescan: full_rescan:
mtx_lock(&swhash_mtx); mtx_lock(&vm_object_list_mtx);
for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */ TAILQ_FOREACH(object, &vm_object_list, object_list) {
restart: if (object->type != OBJT_SWAP)
for (swap = swhash[i]; swap != NULL; swap = swap->swb_hnext) { continue;
object = swap->swb_object; mtx_unlock(&vm_object_list_mtx);
pindex = swap->swb_index; /* Depends on type-stability. */
for (j = 0; j < SWAP_META_PAGES; ++j) { VM_OBJECT_WLOCK(object);
if (!swp_pager_isondev(swap->swb_pages[j], sp))
/*
* Dead objects are eventually terminated on their own.
*/
if ((object->flags & OBJ_DEAD) != 0)
goto next_obj;
/*
* Sync with fences placed after pctrie
* initialization. We must not access pctrie below
* unless we checked that our object is swap and not
* dead.
*/
atomic_thread_fence_acq();
if (object->type != OBJT_SWAP)
goto next_obj;
for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
&object->un_pager.swp.swp_blks, pi)) != NULL; ) {
pi = sb->p + SWAP_META_PAGES;
for (i = 0; i < SWAP_META_PAGES; i++) {
if (sb->d[i] == SWAPBLK_NONE)
continue; continue;
if (locked_obj != object) { if (swp_pager_isondev(sb->d[i], sp))
if (locked_obj != NULL) swp_pager_force_pagein(object,
VM_OBJECT_WUNLOCK(locked_obj); sb->p + i);
locked_obj = object;
if (!VM_OBJECT_TRYWLOCK(object)) {
mtx_unlock(&swhash_mtx);
/* Depends on type-stability. */
VM_OBJECT_WLOCK(object);
mtx_lock(&swhash_mtx);
goto restart;
}
}
MPASS(locked_obj == object);
mtx_unlock(&swhash_mtx);
swp_pager_force_pagein(object, pindex + j);
mtx_lock(&swhash_mtx);
goto restart;
} }
} }
next_obj:
VM_OBJECT_WUNLOCK(object);
mtx_lock(&vm_object_list_mtx);
} }
mtx_unlock(&swhash_mtx); mtx_unlock(&vm_object_list_mtx);
if (locked_obj != NULL) {
VM_OBJECT_WUNLOCK(locked_obj);
locked_obj = NULL;
}
if (sp->sw_used) { if (sp->sw_used) {
/* /*
* Objects may be locked or paging to the device being * Objects may be locked or paging to the device being
@ -1738,85 +1725,88 @@ restart:
static void static void
swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
{ {
static volatile int exhausted; static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
struct swblock *swap; struct swblk *sb;
struct swblock **pswap; vm_pindex_t modpi, rdpi;
int idx; int error, i;
VM_OBJECT_ASSERT_WLOCKED(object); VM_OBJECT_ASSERT_WLOCKED(object);
/* /*
* Convert default object to swap object if necessary * Convert default object to swap object if necessary
*/ */
if (object->type != OBJT_SWAP) { if (object->type != OBJT_SWAP) {
pctrie_init(&object->un_pager.swp.swp_blks);
/*
* Ensure that swap_pager_swapoff()'s iteration over
* object_list does not see a garbage pctrie.
*/
atomic_thread_fence_rel();
object->type = OBJT_SWAP; object->type = OBJT_SWAP;
object->un_pager.swp.swp_bcount = 0;
KASSERT(object->handle == NULL, ("default pager with handle")); KASSERT(object->handle == NULL, ("default pager with handle"));
} }
/* rdpi = rounddown(pindex, SWAP_META_PAGES);
* Locate hash entry. If not found create, but if we aren't adding sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
* anything just return. If we run out of space in the map we wait if (sb == NULL) {
* and, since the hash table may have changed, retry.
*/
retry:
mtx_lock(&swhash_mtx);
pswap = swp_pager_hash(object, pindex);
if ((swap = *pswap) == NULL) {
int i;
if (swapblk == SWAPBLK_NONE) if (swapblk == SWAPBLK_NONE)
goto done; return;
for (;;) {
swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT | sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
(curproc == pageproc ? M_USE_RESERVE : 0)); pageproc ? M_USE_RESERVE : 0));
if (swap == NULL) { if (sb != NULL) {
mtx_unlock(&swhash_mtx); sb->p = rdpi;
for (i = 0; i < SWAP_META_PAGES; i++)
sb->d[i] = SWAPBLK_NONE;
if (atomic_cmpset_int(&swblk_zone_exhausted,
1, 0))
printf("swblk zone ok\n");
break;
}
VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(object);
if (uma_zone_exhausted(swap_zone)) { if (uma_zone_exhausted(swblk_zone)) {
if (atomic_cmpset_int(&exhausted, 0, 1)) if (atomic_cmpset_int(&swblk_zone_exhausted,
printf("swap zone exhausted, " 0, 1))
printf("swap blk zone exhausted, "
"increase kern.maxswzone\n"); "increase kern.maxswzone\n");
vm_pageout_oom(VM_OOM_SWAPZ); vm_pageout_oom(VM_OOM_SWAPZ);
pause("swzonex", 10); pause("swzonxb", 10);
} else
VM_WAIT;
VM_OBJECT_WLOCK(object);
}
for (;;) {
error = SWAP_PCTRIE_INSERT(
&object->un_pager.swp.swp_blks, sb);
if (error == 0) {
if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1, 0))
printf("swpctrie zone ok\n");
break;
}
VM_OBJECT_WUNLOCK(object);
if (uma_zone_exhausted(swpctrie_zone)) {
if (atomic_cmpset_int(&swpctrie_zone_exhausted,
0, 1))
printf("swap pctrie zone exhausted, "
"increase kern.maxswzone\n");
vm_pageout_oom(VM_OOM_SWAPZ);
pause("swzonxp", 10);
} else } else
VM_WAIT; VM_WAIT;
VM_OBJECT_WLOCK(object); VM_OBJECT_WLOCK(object);
goto retry;
} }
if (atomic_cmpset_int(&exhausted, 1, 0))
printf("swap zone ok\n");
swap->swb_hnext = NULL;
swap->swb_object = object;
swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK;
swap->swb_count = 0;
++object->un_pager.swp.swp_bcount;
for (i = 0; i < SWAP_META_PAGES; ++i)
swap->swb_pages[i] = SWAPBLK_NONE;
} }
MPASS(sb->p == rdpi);
/* modpi = pindex % SWAP_META_PAGES;
* Delete prior contents of metadata /* Delete prior contents of metadata. */
*/ if (sb->d[modpi] != SWAPBLK_NONE)
idx = pindex & SWAP_META_MASK; swp_pager_freeswapspace(sb->d[modpi], 1);
/* Enter block into metadata. */
if (swap->swb_pages[idx] != SWAPBLK_NONE) { sb->d[modpi] = swapblk;
swp_pager_freeswapspace(swap->swb_pages[idx], 1);
--swap->swb_count;
}
/*
* Enter block into metadata
*/
swap->swb_pages[idx] = swapblk;
if (swapblk != SWAPBLK_NONE)
++swap->swb_count;
done:
mtx_unlock(&swhash_mtx);
} }
/* /*
@ -1830,42 +1820,40 @@ done:
* with resident pages. * with resident pages.
*/ */
static void static void
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count) swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count)
{ {
struct swblock **pswap, *swap; struct swblk *sb;
vm_pindex_t c; vm_pindex_t last;
daddr_t v; int i;
int n, sidx; bool empty;
VM_OBJECT_ASSERT_LOCKED(object); VM_OBJECT_ASSERT_LOCKED(object);
if (object->type != OBJT_SWAP || count == 0) if (object->type != OBJT_SWAP || count == 0)
return; return;
mtx_lock(&swhash_mtx); last = pindex + count - 1;
for (c = 0; c < count;) { for (;;) {
pswap = swp_pager_hash(object, index); sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
sidx = index & SWAP_META_MASK; rounddown(pindex, SWAP_META_PAGES));
n = SWAP_META_PAGES - sidx; if (sb == NULL || sb->p > last)
index += n; break;
if ((swap = *pswap) == NULL) { empty = true;
c += n; for (i = 0; i < SWAP_META_PAGES; i++) {
continue; if (sb->d[i] == SWAPBLK_NONE)
}
for (; c < count && sidx < SWAP_META_PAGES; ++c, ++sidx) {
if ((v = swap->swb_pages[sidx]) == SWAPBLK_NONE)
continue; continue;
swp_pager_freeswapspace(v, 1); if (pindex <= sb->p + i && sb->p + i <= last) {
swap->swb_pages[sidx] = SWAPBLK_NONE; swp_pager_freeswapspace(sb->d[i], 1);
if (--swap->swb_count == 0) { sb->d[i] = SWAPBLK_NONE;
*pswap = swap->swb_hnext; } else
uma_zfree(swap_zone, swap); empty = false;
--object->un_pager.swp.swp_bcount; }
c += SWAP_META_PAGES - sidx; pindex = sb->p + SWAP_META_PAGES;
break; if (empty) {
} SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
sb->p);
uma_zfree(swblk_zone, sb);
} }
} }
mtx_unlock(&swhash_mtx);
} }
/* /*
@ -1877,36 +1865,23 @@ swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count)
static void static void
swp_pager_meta_free_all(vm_object_t object) swp_pager_meta_free_all(vm_object_t object)
{ {
struct swblock **pswap, *swap; struct swblk *sb;
vm_pindex_t index; vm_pindex_t pindex;
daddr_t v;
int i; int i;
VM_OBJECT_ASSERT_WLOCKED(object); VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP) if (object->type != OBJT_SWAP)
return; return;
index = 0; for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
while (object->un_pager.swp.swp_bcount != 0) { &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
mtx_lock(&swhash_mtx); pindex = sb->p + SWAP_META_PAGES;
pswap = swp_pager_hash(object, index); for (i = 0; i < SWAP_META_PAGES; i++) {
if ((swap = *pswap) != NULL) { if (sb->d[i] != SWAPBLK_NONE)
for (i = 0; i < SWAP_META_PAGES; ++i) { swp_pager_freeswapspace(sb->d[i], 1);
v = swap->swb_pages[i];
if (v != SWAPBLK_NONE) {
--swap->swb_count;
swp_pager_freeswapspace(v, 1);
}
}
if (swap->swb_count != 0)
panic(
"swap_pager_meta_free_all: swb_count != 0");
*pswap = swap->swb_hnext;
uma_zfree(swap_zone, swap);
--object->un_pager.swp.swp_bcount;
} }
mtx_unlock(&swhash_mtx); SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
index += SWAP_META_PAGES; uma_zfree(swblk_zone, sb);
} }
} }
@ -1920,9 +1895,6 @@ swp_pager_meta_free_all(vm_object_t object)
* was invalid. This routine will automatically free any invalid * was invalid. This routine will automatically free any invalid
* meta-data swapblks. * meta-data swapblks.
* *
* It is not possible to store invalid swapblks in the swap meta data
* (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
*
* When acting on a busy resident page and paging is in progress, we * When acting on a busy resident page and paging is in progress, we
* have to wait until paging is complete but otherwise can act on the * have to wait until paging is complete but otherwise can act on the
* busy page. * busy page.
@ -1933,10 +1905,9 @@ swp_pager_meta_free_all(vm_object_t object)
static daddr_t static daddr_t
swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags) swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
{ {
struct swblock **pswap; struct swblk *sb;
struct swblock *swap;
daddr_t r1; daddr_t r1;
int idx; int i;
VM_OBJECT_ASSERT_LOCKED(object); VM_OBJECT_ASSERT_LOCKED(object);
/* /*
@ -1946,30 +1917,29 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
if (object->type != OBJT_SWAP) if (object->type != OBJT_SWAP)
return (SWAPBLK_NONE); return (SWAPBLK_NONE);
r1 = SWAPBLK_NONE; sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
mtx_lock(&swhash_mtx); rounddown(pindex, SWAP_META_PAGES));
pswap = swp_pager_hash(object, pindex); if (sb == NULL)
return (SWAPBLK_NONE);
if ((swap = *pswap) != NULL) { r1 = sb->d[pindex % SWAP_META_PAGES];
idx = pindex & SWAP_META_MASK; if (r1 == SWAPBLK_NONE)
r1 = swap->swb_pages[idx]; return (SWAPBLK_NONE);
if ((flags & (SWM_FREE | SWM_POP)) != 0) {
if (r1 != SWAPBLK_NONE) { sb->d[pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
if (flags & SWM_FREE) { for (i = 0; i < SWAP_META_PAGES; i++) {
swp_pager_freeswapspace(r1, 1); if (sb->d[i] != SWAPBLK_NONE)
r1 = SWAPBLK_NONE; break;
} }
if (flags & (SWM_FREE|SWM_POP)) { if (i == SWAP_META_PAGES) {
swap->swb_pages[idx] = SWAPBLK_NONE; SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
if (--swap->swb_count == 0) { rounddown(pindex, SWAP_META_PAGES));
*pswap = swap->swb_hnext; uma_zfree(swblk_zone, sb);
uma_zfree(swap_zone, swap);
--object->un_pager.swp.swp_bcount;
}
}
} }
} }
mtx_unlock(&swhash_mtx); if ((flags & SWM_FREE) != 0) {
swp_pager_freeswapspace(r1, 1);
r1 = SWAPBLK_NONE;
}
return (r1); return (r1);
} }
@ -1983,32 +1953,38 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
vm_pindex_t vm_pindex_t
swap_pager_find_least(vm_object_t object, vm_pindex_t pindex) swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
{ {
struct swblock **pswap, *swap; struct swblk *sb;
vm_pindex_t i, j, lim; int i;
int idx;
VM_OBJECT_ASSERT_LOCKED(object); VM_OBJECT_ASSERT_LOCKED(object);
if (object->type != OBJT_SWAP || object->un_pager.swp.swp_bcount == 0) if (object->type != OBJT_SWAP)
return (object->size); return (object->size);
mtx_lock(&swhash_mtx); sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
for (j = pindex; j < object->size; j = lim) { rounddown(pindex, SWAP_META_PAGES));
pswap = swp_pager_hash(object, j); if (sb == NULL)
lim = rounddown2(j + SWAP_META_PAGES, SWAP_META_PAGES); return (object->size);
if (lim > object->size) if (sb->p < pindex) {
lim = object->size; for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
if ((swap = *pswap) != NULL) { if (sb->d[i] != SWAPBLK_NONE)
for (idx = j & SWAP_META_MASK, i = j; i < lim; return (sb->p + i);
i++, idx++) {
if (swap->swb_pages[idx] != SWAPBLK_NONE)
goto found;
}
} }
sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
roundup(pindex, SWAP_META_PAGES));
if (sb == NULL)
return (object->size);
} }
i = object->size; for (i = 0; i < SWAP_META_PAGES; i++) {
found: if (sb->d[i] != SWAPBLK_NONE)
mtx_unlock(&swhash_mtx); return (sb->p + i);
return (i); }
/*
* We get here if a swblk is present in the trie but it
* doesn't map any blocks.
*/
MPASS(0);
return (object->size);
} }
/* /*
@ -2044,7 +2020,7 @@ sys_swapon(struct thread *td, struct swapon_args *uap)
* Swap metadata may not fit in the KVM if we have physical * Swap metadata may not fit in the KVM if we have physical
* memory of >1GB. * memory of >1GB.
*/ */
if (swap_zone == NULL) { if (swblk_zone == NULL) {
error = ENOMEM; error = ENOMEM;
goto done; goto done;
} }
@ -2088,7 +2064,7 @@ swapon_check_swzone(unsigned long npages)
unsigned long maxpages; unsigned long maxpages;
/* absolute maximum we can handle assuming 100% efficiency */ /* absolute maximum we can handle assuming 100% efficiency */
maxpages = uma_zone_get_max(swap_zone) * SWAP_META_PAGES; maxpages = uma_zone_get_max(swblk_zone) * SWAP_META_PAGES;
/* recommend using no more than half that amount */ /* recommend using no more than half that amount */
if (npages > maxpages / 2) { if (npages > maxpages / 2) {
@ -2413,15 +2389,9 @@ SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
"Swap statistics by device"); "Swap statistics by device");
/* /*
* vmspace_swap_count() - count the approximate swap usage in pages for a * Count the approximate swap usage in pages for a vmspace. The
* vmspace. * shadowed or not yet copied on write swap blocks are not accounted.
* * The map must be locked.
* The map must be locked.
*
* Swap usage is determined by taking the proportional swap used by
* VM objects backing the VM map. To make up for fractional losses,
* if the VM object has any swap use at all the associated map entries
* count for at least 1 swap page.
*/ */
long long
vmspace_swap_count(struct vmspace *vmspace) vmspace_swap_count(struct vmspace *vmspace)
@ -2429,23 +2399,38 @@ vmspace_swap_count(struct vmspace *vmspace)
vm_map_t map; vm_map_t map;
vm_map_entry_t cur; vm_map_entry_t cur;
vm_object_t object; vm_object_t object;
long count, n; struct swblk *sb;
vm_pindex_t e, pi;
long count;
int i;
map = &vmspace->vm_map; map = &vmspace->vm_map;
count = 0; count = 0;
for (cur = map->header.next; cur != &map->header; cur = cur->next) { for (cur = map->header.next; cur != &map->header; cur = cur->next) {
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
(object = cur->object.vm_object) != NULL) { continue;
VM_OBJECT_WLOCK(object); object = cur->object.vm_object;
if (object->type == OBJT_SWAP && if (object == NULL || object->type != OBJT_SWAP)
object->un_pager.swp.swp_bcount != 0) { continue;
n = (cur->end - cur->start) / PAGE_SIZE; VM_OBJECT_RLOCK(object);
count += object->un_pager.swp.swp_bcount * if (object->type != OBJT_SWAP)
SWAP_META_PAGES * n / object->size + 1; goto unlock;
pi = OFF_TO_IDX(cur->offset);
e = pi + OFF_TO_IDX(cur->end - cur->start);
for (;; pi = sb->p + SWAP_META_PAGES) {
sb = SWAP_PCTRIE_LOOKUP_GE(
&object->un_pager.swp.swp_blks, pi);
if (sb == NULL || sb->p >= e)
break;
for (i = 0; i < SWAP_META_PAGES; i++) {
if (sb->p + i < e &&
sb->d[i] != SWAPBLK_NONE)
count++;
} }
VM_OBJECT_WUNLOCK(object);
} }
unlock:
VM_OBJECT_RUNLOCK(object);
} }
return (count); return (count);
} }

View File

@ -73,6 +73,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/mount.h> #include <sys/mount.h>
#include <sys/kernel.h> #include <sys/kernel.h>
#include <sys/pctrie.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
#include <sys/mutex.h> #include <sys/mutex.h>
#include <sys/proc.h> /* for curproc, pageproc */ #include <sys/proc.h> /* for curproc, pageproc */
@ -208,6 +209,7 @@ vm_object_zinit(void *mem, int size, int flags)
object->paging_in_progress = 0; object->paging_in_progress = 0;
object->resident_page_count = 0; object->resident_page_count = 0;
object->shadow_count = 0; object->shadow_count = 0;
object->flags = OBJ_DEAD;
mtx_lock(&vm_object_list_mtx); mtx_lock(&vm_object_list_mtx);
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
@ -223,6 +225,16 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
LIST_INIT(&object->shadow_head); LIST_INIT(&object->shadow_head);
object->type = type; object->type = type;
if (type == OBJT_SWAP)
pctrie_init(&object->un_pager.swp.swp_blks);
/*
* Ensure that swap_pager_swapoff() iteration over object_list
* sees up to date type and pctrie head if it observed
* non-dead object.
*/
atomic_thread_fence_rel();
switch (type) { switch (type) {
case OBJT_DEAD: case OBJT_DEAD:
panic("_vm_object_allocate: can't create OBJT_DEAD"); panic("_vm_object_allocate: can't create OBJT_DEAD");

View File

@ -70,6 +70,7 @@
#include <sys/queue.h> #include <sys/queue.h>
#include <sys/_lock.h> #include <sys/_lock.h>
#include <sys/_mutex.h> #include <sys/_mutex.h>
#include <sys/_pctrie.h>
#include <sys/_rwlock.h> #include <sys/_rwlock.h>
#include <vm/_vm_radix.h> #include <vm/_vm_radix.h>
@ -151,13 +152,12 @@ struct vm_object {
* the handle changed and hash-chain * the handle changed and hash-chain
* invalid. * invalid.
* *
* swp_bcount - number of swap 'swblock' metablocks, each * swp_blks - pc-trie of the allocated swap blocks.
* contains up to 16 swapblk assignments. *
* see vm/swap_pager.h
*/ */
struct { struct {
void *swp_tmpfs; void *swp_tmpfs;
int swp_bcount; struct pctrie swp_blks;
} swp; } swp;
} un_pager; } un_pager;
struct ucred *cred; struct ucred *cred;