Protect pager object creation with sx locks.

Protect pager object list manipulation with a mutex.

It doesn't look possible to combine them under a single sx lock because
creation may block and we can't have the object list manipulation block
on anything other than a mutex because of interrupt requests.
This commit is contained in:
Alfred Perlstein 2001-04-18 20:24:16 +00:00
parent e18935281b
commit a9fa2c05fc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=75675
3 changed files with 43 additions and 33 deletions

View File

@ -43,6 +43,7 @@
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/mman.h>
#include <sys/sx.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@ -62,6 +63,11 @@ static boolean_t dev_pager_haspage __P((vm_object_t, vm_pindex_t, int *,
/* list of device pager objects */
static struct pagerlst dev_pager_object_list;
/* protect against object creation */
static struct sx dev_pager_sx;
/* protect list manipulation */
static struct mtx dev_pager_mtx;
static vm_zone_t fakepg_zone;
static struct vm_zone fakepg_zone_store;
@ -69,8 +75,6 @@ static struct vm_zone fakepg_zone_store;
static vm_page_t dev_pager_getfake __P((vm_offset_t));
static void dev_pager_putfake __P((vm_page_t));
static int dev_pager_alloc_lock, dev_pager_alloc_lock_want;
struct pagerops devicepagerops = {
dev_pager_init,
dev_pager_alloc,
@ -85,6 +89,8 @@ static void
dev_pager_init()
{
TAILQ_INIT(&dev_pager_object_list);
sx_init(&dev_pager_sx, "dev_pager create");
mtx_init(&dev_pager_mtx, "dev_pager list", MTX_DEF);
fakepg_zone = &fakepg_zone_store;
zinitna(fakepg_zone, NULL, "DP fakepg", sizeof(struct vm_page), 0, 0, 2);
}
@ -130,12 +136,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t fo
/*
* Lock to prevent object creation race condition.
*/
while (dev_pager_alloc_lock) {
dev_pager_alloc_lock_want++;
tsleep(&dev_pager_alloc_lock, PVM, "dvpall", 0);
dev_pager_alloc_lock_want--;
}
dev_pager_alloc_lock = 1;
sx_xlock(&dev_pager_sx);
/*
* Look up pager, creating as necessary.
@ -149,7 +150,9 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t fo
OFF_TO_IDX(foff + size));
object->handle = handle;
TAILQ_INIT(&object->un_pager.devp.devp_pglist);
mtx_lock(&dev_pager_mtx);
TAILQ_INSERT_TAIL(&dev_pager_object_list, object, pager_object_list);
mtx_unlock(&dev_pager_mtx);
} else {
/*
* Gain a reference to the object.
@ -159,9 +162,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t fo
object->size = OFF_TO_IDX(foff + size);
}
dev_pager_alloc_lock = 0;
if (dev_pager_alloc_lock_want)
wakeup(&dev_pager_alloc_lock);
sx_xunlock(&dev_pager_sx);
return (object);
}
@ -172,7 +173,9 @@ dev_pager_dealloc(object)
{
vm_page_t m;
mtx_lock(&dev_pager_mtx);
TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
mtx_unlock(&dev_pager_mtx);
/*
* Free up our fake pages.
*/

View File

@ -29,8 +29,10 @@
#include <sys/systm.h>
#include <sys/linker_set.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/mman.h>
#include <sys/sysctl.h>
#include <sys/sx.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@ -38,16 +40,20 @@
#include <vm/vm_pager.h>
#include <vm/vm_zone.h>
/* prevent concurrant creation races */
static struct sx phys_pager_sx;
/* list of device pager objects */
static struct pagerlst phys_pager_object_list;
static int phys_pager_alloc_lock, phys_pager_alloc_lock_want;
/* protect access to phys_pager_object_list */
static struct mtx phys_pager_mtx;
static void
phys_pager_init(void)
{
TAILQ_INIT(&phys_pager_object_list);
sx_init(&phys_pager_sx, "phys_pager create");
mtx_init(&phys_pager_mtx, "phys_pager list", MTX_DEF);
}
static vm_object_t
@ -68,12 +74,7 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
/*
* Lock to prevent object creation race condition.
*/
while (phys_pager_alloc_lock) {
phys_pager_alloc_lock_want++;
tsleep(&phys_pager_alloc_lock, PVM, "ppall", 0);
phys_pager_alloc_lock_want--;
}
phys_pager_alloc_lock = 1;
sx_xlock(&phys_pager_sx);
/*
* Look up pager, creating as necessary.
@ -86,8 +87,10 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
object = vm_object_allocate(OBJT_PHYS,
OFF_TO_IDX(foff + size));
object->handle = handle;
mtx_lock(&phys_pager_mtx);
TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
pager_object_list);
mtx_unlock(&phys_pager_mtx);
} else {
/*
* Gain a reference to the object.
@ -96,9 +99,7 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
if (OFF_TO_IDX(foff + size) > object->size)
object->size = OFF_TO_IDX(foff + size);
}
phys_pager_alloc_lock = 0;
if (phys_pager_alloc_lock_want)
wakeup(&phys_pager_alloc_lock);
sx_xunlock(&phys_pager_sx);
} else {
object = vm_object_allocate(OBJT_PHYS,
OFF_TO_IDX(foff + size));
@ -111,8 +112,11 @@ static void
phys_pager_dealloc(vm_object_t object)
{
if (object->handle != NULL)
if (object->handle != NULL) {
mtx_lock(&phys_pager_mtx);
TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
mtx_unlock(&phys_pager_mtx);
}
}
static int

View File

@ -80,6 +80,7 @@
#include <sys/sysctl.h>
#include <sys/blist.h>
#include <sys/lock.h>
#include <sys/sx.h>
#include <sys/vmmeter.h>
#ifndef MAX_PAGEOUT_CLUSTER
@ -118,7 +119,6 @@ static int nsw_wcount_sync; /* limit write buffers / synchronous */
static int nsw_wcount_async; /* limit write buffers / asynchronous */
static int nsw_wcount_async_max;/* assigned maximum */
static int nsw_cluster_max; /* maximum VOP I/O allowed */
static int sw_alloc_interlock; /* swap pager allocation interlock */
struct blist *swapblist;
static struct swblock **swhash;
@ -145,6 +145,8 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
#define NOBJLIST(handle) \
(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
static struct sx sw_alloc_sx; /* prevent concurrant creation */
static struct mtx sw_alloc_mtx; /* protect list manipulation */
static struct pagerlst swap_pager_object_list[NOBJLISTS];
struct pagerlst swap_pager_un_object_list;
vm_zone_t swap_zone;
@ -262,6 +264,8 @@ swap_pager_init()
for (i = 0; i < NOBJLISTS; ++i)
TAILQ_INIT(&swap_pager_object_list[i]);
TAILQ_INIT(&swap_pager_un_object_list);
sx_init(&sw_alloc_sx, "swap_pager create");
mtx_init(&sw_alloc_mtx, "swap_pager list", MTX_DEF);
/*
* Device Stripe, in PAGE_SIZE'd blocks
@ -385,11 +389,7 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
* of the handle.
*/
while (sw_alloc_interlock) {
sw_alloc_interlock = -1;
tsleep(&sw_alloc_interlock, PVM, "swpalc", 0);
}
sw_alloc_interlock = 1;
sx_xlock(&sw_alloc_sx);
object = vm_pager_object_lookup(NOBJLIST(handle), handle);
@ -403,10 +403,7 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
}
if (sw_alloc_interlock < 0)
wakeup(&sw_alloc_interlock);
sw_alloc_interlock = 0;
sx_xunlock(&sw_alloc_sx);
} else {
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(offset + PAGE_MASK + size));
@ -441,11 +438,13 @@ swap_pager_dealloc(object)
* pageout completion.
*/
mtx_lock(&sw_alloc_mtx);
if (object->handle == NULL) {
TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
} else {
TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
}
mtx_unlock(&sw_alloc_mtx);
vm_object_pip_wait(object, "swpdea");
@ -642,6 +641,7 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
*/
if (destroysource) {
mtx_lock(&sw_alloc_mtx);
if (srcobject->handle == NULL) {
TAILQ_REMOVE(
&swap_pager_un_object_list,
@ -655,6 +655,7 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
pager_object_list
);
}
mtx_unlock(&sw_alloc_mtx);
}
/*
@ -1753,6 +1754,7 @@ swp_pager_meta_build(
object->type = OBJT_SWAP;
object->un_pager.swp.swp_bcount = 0;
mtx_lock(&sw_alloc_mtx);
if (object->handle != NULL) {
TAILQ_INSERT_TAIL(
NOBJLIST(object->handle),
@ -1766,6 +1768,7 @@ swp_pager_meta_build(
pager_object_list
);
}
mtx_unlock(&sw_alloc_mtx);
}
/*