- Introduce and use a new tunable "debug.mpsafevm". At present, setting

"debug.mpsafevm" results in (almost) Giant-free execution of zero-fill
   page faults.  (Giant is held only briefly, just long enough to determine
   if there is a vnode backing the faulting address.)

   Also, condition the acquisition and release of Giant around calls to
   pmap_remove() on "debug.mpsafevm".

   The effect on performance is significant.  On my dual Opteron, I see a
   3.6% reduction in "buildworld" time.

 - Use atomic operations to update several counters in vm_fault().
This commit is contained in:
Alan Cox 2004-08-16 06:16:12 +00:00
parent 391d4a3856
commit c1fbc251cd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=133807
4 changed files with 36 additions and 11 deletions

View File

@ -110,6 +110,19 @@ struct vm_page;
typedef struct vm_page *vm_page_t;
#endif /* _KERNEL */
/*
* Virtual memory MPSAFE temporary workarounds.
*/
extern int debug_mpsafevm; /* defined in vm/vm_meter.c */
#define VM_LOCK_GIANT() do { \
if (!debug_mpsafevm) \
mtx_lock(&Giant); \
} while (0)
#define VM_UNLOCK_GIANT() do { \
if (!debug_mpsafevm) \
mtx_unlock(&Giant); \
} while (0)
/*
* Information passed from the machine-independant VM initialization code
* for use by machine-dependant code (mainly for MMU support)

View File

@ -168,10 +168,12 @@ _unlock_things(struct faultstate *fs, int dealloc)
unlock_map(fs);
if (fs->vp != NULL) {
vput(fs->vp);
if (debug_mpsafevm)
mtx_unlock(&Giant);
fs->vp = NULL;
}
if (dealloc)
mtx_unlock(&Giant);
VM_UNLOCK_GIANT();
}
#define unlock_things(fs) _unlock_things(fs, 0)
@ -293,6 +295,8 @@ RetryFault:;
VM_OBJECT_LOCK(fs.first_object);
vm_object_reference_locked(fs.first_object);
fs.vp = vnode_pager_lock(fs.first_object);
if (fs.vp == NULL && debug_mpsafevm)
mtx_unlock(&Giant);
vm_object_pip_add(fs.first_object, 1);
fs.lookup_still_valid = TRUE;
@ -364,8 +368,8 @@ RetryFault:;
vm_page_lock_queues();
if (!vm_page_sleep_if_busy(fs.m, TRUE, "vmpfw"))
vm_page_unlock_queues();
cnt.v_intrans++;
mtx_unlock(&Giant);
atomic_add_int(&cnt.v_intrans, 1);
VM_UNLOCK_GIANT();
vm_object_deallocate(fs.first_object);
goto RetryFault;
}
@ -637,9 +641,9 @@ RetryFault:;
if ((fs.m->flags & PG_ZERO) == 0) {
pmap_zero_page(fs.m);
} else {
cnt.v_ozfod++;
atomic_add_int(&cnt.v_ozfod, 1);
}
cnt.v_zfod++;
atomic_add_int(&cnt.v_zfod, 1);
fs.m->valid = VM_PAGE_BITS_ALL;
break; /* break to PAGE HAS BEEN FOUND */
} else {
@ -722,7 +726,7 @@ RetryFault:;
vm_page_unlock_queues();
fs.first_m = fs.m;
fs.m = NULL;
cnt.v_cow_optim++;
atomic_add_int(&cnt.v_cow_optim, 1);
} else {
/*
* Oh, well, lets copy it.
@ -750,7 +754,7 @@ RetryFault:;
fs.m = fs.first_m;
if (!is_first_object_locked)
VM_OBJECT_LOCK(fs.object);
cnt.v_cow_faults++;
atomic_add_int(&cnt.v_cow_faults, 1);
} else {
prot &= ~VM_PROT_WRITE;
}

View File

@ -2114,9 +2114,9 @@ vm_map_sync(
}
if (invalidate) {
mtx_lock(&Giant);
VM_LOCK_GIANT();
pmap_remove(map->pmap, start, end);
mtx_unlock(&Giant);
VM_UNLOCK_GIANT();
}
/*
* Make a second pass, cleaning/uncaching pages from the indicated
@ -2282,10 +2282,10 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
}
if (!map->system_map)
mtx_lock(&Giant);
VM_LOCK_GIANT();
pmap_remove(map->pmap, entry->start, entry->end);
if (!map->system_map)
mtx_unlock(&Giant);
VM_UNLOCK_GIANT();
/*
* Delete the entry (which may delete the object) only after

View File

@ -52,6 +52,14 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <sys/sysctl.h>
/*
* Virtual memory MPSAFE temporary workarounds.
*/
int debug_mpsafevm;
TUNABLE_INT("debug.mpsafevm", &debug_mpsafevm);
SYSCTL_INT(_debug, OID_AUTO, mpsafevm, CTLFLAG_RD, &debug_mpsafevm, 0,
"Enable/disable MPSAFE virtual memory support");
struct vmmeter cnt;
int maxslp = MAXSLP;