- When the VM daemon is out of swap space and looking for a

process to kill, don't block on a map lock while holding the
  process lock.  Instead, skip processes whose map locks are held
  and find something else to kill.
- Add vm_map_trylock_read() to support the above.

Reviewed by:	alc, mike (mentor)
This commit is contained in:
David Schultz 2003-03-12 23:13:16 +00:00
parent edcd017d67
commit 72d97679ff
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=112167
3 changed files with 23 additions and 4 deletions

View File

@ -346,6 +346,8 @@ vmspace_exitfree(struct proc *p)
* vmspace_swap_count() - count the approximate swap useage in pages for a
* vmspace.
*
* The map must be locked.
*
* Swap useage is determined by taking the proportional swap used by
* VM objects backing the VM map. To make up for fractional losses,
* if the VM object has any swap use at all the associated map entries
@ -358,7 +360,6 @@ vmspace_swap_count(struct vmspace *vmspace)
vm_map_entry_t cur;
int count = 0;
vm_map_lock_read(map);
for (cur = map->header.next; cur != &map->header; cur = cur->next) {
vm_object_t object;
@ -374,7 +375,6 @@ vmspace_swap_count(struct vmspace *vmspace)
}
}
}
vm_map_unlock_read(map);
return (count);
}
@ -438,6 +438,17 @@ _vm_map_trylock(vm_map_t map, const char *file, int line)
return (error == 0);
}
int
_vm_map_trylock_read(vm_map_t map, const char *file, int line)
{
int error;
error = map->system_map ?
!_mtx_trylock(&map->system_mtx, 0, file, line) :
lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
return (error == 0);
}
int
_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
{

View File

@ -248,6 +248,7 @@ void _vm_map_unlock(vm_map_t map, const char *file, int line);
void _vm_map_lock_read(vm_map_t map, const char *file, int line);
void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
int _vm_map_trylock(vm_map_t map, const char *file, int line);
int _vm_map_trylock_read(vm_map_t map, const char *file, int line);
int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
int vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait);
@ -258,6 +259,8 @@ void vm_map_wakeup(vm_map_t map);
#define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_trylock_read(map) \
_vm_map_trylock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_upgrade(map) \
_vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_downgrade(map) \

View File

@ -1214,8 +1214,13 @@ vm_pageout_scan(int pass)
/*
* get the process size
*/
size = vmspace_resident_count(p->p_vmspace) +
vmspace_swap_count(p->p_vmspace);
if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) {
PROC_UNLOCK(p);
continue;
}
size = vmspace_swap_count(p->p_vmspace);
vm_map_unlock_read(&p->p_vmspace->vm_map);
size += vmspace_resident_count(p->p_vmspace);
/*
* if the this process is bigger than the biggest one
* remember it.