o Use vm_map_wire() and vm_map_unwire() in place of vm_map_pageable() and
vm_map_user_pageable(). o Remove vm_map_pageable() and vm_map_user_pageable(). o Remove vm_map_clear_recursive() and vm_map_set_recursive(). (They were only used by vm_map_pageable() and vm_map_user_pageable().) Reviewed by: tegge
This commit is contained in:
parent
b6037a7953
commit
1d7cf06c8c
@ -261,7 +261,7 @@ contigmalloc1(
|
||||
OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
|
||||
tmp_addr += PAGE_SIZE;
|
||||
}
|
||||
vm_map_pageable(map, addr, addr + size, FALSE);
|
||||
vm_map_wire(map, addr, addr + size, FALSE);
|
||||
|
||||
splx(s);
|
||||
return ((void *)addr);
|
||||
|
@ -170,8 +170,7 @@ vslock(addr, len)
|
||||
u_int len;
|
||||
{
|
||||
GIANT_REQUIRED;
|
||||
vm_map_pageable(&curproc->p_vmspace->vm_map,
|
||||
trunc_page((vm_offset_t)addr),
|
||||
vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
|
||||
round_page((vm_offset_t)addr + len), FALSE);
|
||||
}
|
||||
|
||||
@ -181,9 +180,9 @@ vsunlock(addr, len)
|
||||
u_int len;
|
||||
{
|
||||
GIANT_REQUIRED;
|
||||
vm_map_pageable(&curproc->p_vmspace->vm_map,
|
||||
vm_map_unwire(&curproc->p_vmspace->vm_map,
|
||||
trunc_page((vm_offset_t)addr),
|
||||
round_page((vm_offset_t)addr + len), TRUE);
|
||||
round_page((vm_offset_t)addr + len), FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -212,7 +212,7 @@ kmem_alloc(map, size)
|
||||
/*
|
||||
* And finally, mark the data as non-pageable.
|
||||
*/
|
||||
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
|
||||
(void) vm_map_wire(map, addr, addr + size, FALSE);
|
||||
|
||||
return (addr);
|
||||
}
|
||||
|
397
sys/vm/vm_map.c
397
sys/vm/vm_map.c
@ -413,16 +413,6 @@ _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
|
||||
("%s: lock not held", __func__));
|
||||
}
|
||||
|
||||
void
|
||||
_vm_map_set_recursive(vm_map_t map, const char *file, int line)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
_vm_map_clear_recursive(vm_map_t map, const char *file, int line)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_unlock_and_wait:
|
||||
*/
|
||||
@ -1802,393 +1792,6 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
* Implement the semantics of mlock
|
||||
*/
|
||||
int
|
||||
vm_map_user_pageable(
|
||||
vm_map_t map,
|
||||
vm_offset_t start,
|
||||
vm_offset_t end,
|
||||
boolean_t new_pageable)
|
||||
{
|
||||
vm_map_entry_t entry;
|
||||
vm_map_entry_t start_entry;
|
||||
vm_offset_t estart;
|
||||
vm_offset_t eend;
|
||||
int rv;
|
||||
|
||||
vm_map_lock(map);
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
|
||||
if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
|
||||
vm_map_unlock(map);
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
}
|
||||
|
||||
if (new_pageable) {
|
||||
|
||||
entry = start_entry;
|
||||
vm_map_clip_start(map, entry, start);
|
||||
|
||||
/*
|
||||
* Now decrement the wiring count for each region. If a region
|
||||
* becomes completely unwired, unwire its physical pages and
|
||||
* mappings.
|
||||
*/
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
if (entry->eflags & MAP_ENTRY_USER_WIRED) {
|
||||
vm_map_clip_end(map, entry, end);
|
||||
entry->eflags &= ~MAP_ENTRY_USER_WIRED;
|
||||
entry->wired_count--;
|
||||
if (entry->wired_count == 0)
|
||||
vm_fault_unwire(map, entry->start, entry->end);
|
||||
}
|
||||
vm_map_simplify_entry(map,entry);
|
||||
entry = entry->next;
|
||||
}
|
||||
} else {
|
||||
|
||||
entry = start_entry;
|
||||
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
|
||||
if (entry->eflags & MAP_ENTRY_USER_WIRED) {
|
||||
entry = entry->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry->wired_count != 0) {
|
||||
entry->wired_count++;
|
||||
entry->eflags |= MAP_ENTRY_USER_WIRED;
|
||||
entry = entry->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Here on entry being newly wired */
|
||||
|
||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
|
||||
int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
|
||||
if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
|
||||
|
||||
vm_object_shadow(&entry->object.vm_object,
|
||||
&entry->offset,
|
||||
atop(entry->end - entry->start));
|
||||
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
|
||||
|
||||
} else if (entry->object.vm_object == NULL &&
|
||||
!map->system_map) {
|
||||
|
||||
entry->object.vm_object =
|
||||
vm_object_allocate(OBJT_DEFAULT,
|
||||
atop(entry->end - entry->start));
|
||||
entry->offset = (vm_offset_t) 0;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
vm_map_clip_start(map, entry, start);
|
||||
vm_map_clip_end(map, entry, end);
|
||||
|
||||
entry->wired_count++;
|
||||
entry->eflags |= MAP_ENTRY_USER_WIRED;
|
||||
estart = entry->start;
|
||||
eend = entry->end;
|
||||
|
||||
/* First we need to allow map modifications */
|
||||
vm_map_set_recursive(map);
|
||||
vm_map_lock_downgrade(map);
|
||||
map->timestamp++;
|
||||
|
||||
rv = vm_fault_user_wire(map, entry->start, entry->end);
|
||||
if (rv) {
|
||||
|
||||
entry->wired_count--;
|
||||
entry->eflags &= ~MAP_ENTRY_USER_WIRED;
|
||||
|
||||
vm_map_clear_recursive(map);
|
||||
vm_map_unlock(map);
|
||||
|
||||
/*
|
||||
* At this point, the map is unlocked, and
|
||||
* entry might no longer be valid. Use copy
|
||||
* of entry start value obtained while entry
|
||||
* was valid.
|
||||
*/
|
||||
(void) vm_map_user_pageable(map, start, estart,
|
||||
TRUE);
|
||||
return rv;
|
||||
}
|
||||
|
||||
vm_map_clear_recursive(map);
|
||||
if (vm_map_lock_upgrade(map)) {
|
||||
vm_map_lock(map);
|
||||
if (vm_map_lookup_entry(map, estart, &entry)
|
||||
== FALSE) {
|
||||
vm_map_unlock(map);
|
||||
/*
|
||||
* vm_fault_user_wire succeded, thus
|
||||
* the area between start and eend
|
||||
* is wired and has to be unwired
|
||||
* here as part of the cleanup.
|
||||
*/
|
||||
(void) vm_map_user_pageable(map,
|
||||
start,
|
||||
eend,
|
||||
TRUE);
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
}
|
||||
}
|
||||
vm_map_simplify_entry(map,entry);
|
||||
}
|
||||
}
|
||||
map->timestamp++;
|
||||
vm_map_unlock(map);
|
||||
return KERN_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_pageable:
|
||||
*
|
||||
* Sets the pageability of the specified address
|
||||
* range in the target map. Regions specified
|
||||
* as not pageable require locked-down physical
|
||||
* memory and physical page maps.
|
||||
*
|
||||
* The map must not be locked, but a reference
|
||||
* must remain to the map throughout the call.
|
||||
*/
|
||||
int
|
||||
vm_map_pageable(
|
||||
vm_map_t map,
|
||||
vm_offset_t start,
|
||||
vm_offset_t end,
|
||||
boolean_t new_pageable)
|
||||
{
|
||||
vm_map_entry_t entry;
|
||||
vm_map_entry_t start_entry;
|
||||
vm_offset_t failed = 0;
|
||||
int rv;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
|
||||
/*
|
||||
* Only one pageability change may take place at one time, since
|
||||
* vm_fault assumes it will be called only once for each
|
||||
* wiring/unwiring. Therefore, we have to make sure we're actually
|
||||
* changing the pageability for the entire region. We do so before
|
||||
* making any changes.
|
||||
*/
|
||||
if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
|
||||
vm_map_unlock(map);
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
}
|
||||
entry = start_entry;
|
||||
|
||||
/*
|
||||
* Actions are rather different for wiring and unwiring, so we have
|
||||
* two separate cases.
|
||||
*/
|
||||
if (new_pageable) {
|
||||
vm_map_clip_start(map, entry, start);
|
||||
|
||||
/*
|
||||
* Unwiring. First ensure that the range to be unwired is
|
||||
* really wired down and that there are no holes.
|
||||
*/
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
if (entry->wired_count == 0 ||
|
||||
(entry->end < end &&
|
||||
(entry->next == &map->header ||
|
||||
entry->next->start > entry->end))) {
|
||||
vm_map_unlock(map);
|
||||
return (KERN_INVALID_ARGUMENT);
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now decrement the wiring count for each region. If a region
|
||||
* becomes completely unwired, unwire its physical pages and
|
||||
* mappings.
|
||||
*/
|
||||
entry = start_entry;
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
vm_map_clip_end(map, entry, end);
|
||||
|
||||
entry->wired_count--;
|
||||
if (entry->wired_count == 0)
|
||||
vm_fault_unwire(map, entry->start, entry->end);
|
||||
|
||||
vm_map_simplify_entry(map, entry);
|
||||
|
||||
entry = entry->next;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Wiring. We must do this in two passes:
|
||||
*
|
||||
* 1. Holding the write lock, we create any shadow or zero-fill
|
||||
* objects that need to be created. Then we clip each map
|
||||
* entry to the region to be wired and increment its wiring
|
||||
* count. We create objects before clipping the map entries
|
||||
* to avoid object proliferation.
|
||||
*
|
||||
* 2. We downgrade to a read lock, and call vm_fault_wire to
|
||||
* fault in the pages for any newly wired area (wired_count is
|
||||
* 1).
|
||||
*
|
||||
* Downgrading to a read lock for vm_fault_wire avoids a possible
|
||||
* deadlock with another process that may have faulted on one
|
||||
* of the pages to be wired (it would mark the page busy,
|
||||
* blocking us, then in turn block on the map lock that we
|
||||
* hold). Because of problems in the recursive lock package,
|
||||
* we cannot upgrade to a write lock in vm_map_lookup. Thus,
|
||||
* any actions that require the write lock must be done
|
||||
* beforehand. Because we keep the read lock on the map, the
|
||||
* copy-on-write status of the entries we modify here cannot
|
||||
* change.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pass 1.
|
||||
*/
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
if (entry->wired_count == 0) {
|
||||
|
||||
/*
|
||||
* Perform actions of vm_map_lookup that need
|
||||
* the write lock on the map: create a shadow
|
||||
* object for a copy-on-write region, or an
|
||||
* object for a zero-fill region.
|
||||
*
|
||||
* We don't have to do this for entries that
|
||||
* point to sub maps, because we won't
|
||||
* hold the lock on the sub map.
|
||||
*/
|
||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
|
||||
int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
|
||||
if (copyflag &&
|
||||
((entry->protection & VM_PROT_WRITE) != 0)) {
|
||||
|
||||
vm_object_shadow(&entry->object.vm_object,
|
||||
&entry->offset,
|
||||
atop(entry->end - entry->start));
|
||||
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
|
||||
} else if (entry->object.vm_object == NULL &&
|
||||
!map->system_map) {
|
||||
entry->object.vm_object =
|
||||
vm_object_allocate(OBJT_DEFAULT,
|
||||
atop(entry->end - entry->start));
|
||||
entry->offset = (vm_offset_t) 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
vm_map_clip_start(map, entry, start);
|
||||
vm_map_clip_end(map, entry, end);
|
||||
entry->wired_count++;
|
||||
|
||||
/*
|
||||
* Check for holes
|
||||
*/
|
||||
if (entry->end < end &&
|
||||
(entry->next == &map->header ||
|
||||
entry->next->start > entry->end)) {
|
||||
/*
|
||||
* Found one. Object creation actions do not
|
||||
* need to be undone, but the wired counts
|
||||
* need to be restored.
|
||||
*/
|
||||
while (entry != &map->header && entry->end > start) {
|
||||
entry->wired_count--;
|
||||
entry = entry->prev;
|
||||
}
|
||||
vm_map_unlock(map);
|
||||
return (KERN_INVALID_ARGUMENT);
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pass 2.
|
||||
*/
|
||||
|
||||
/*
|
||||
* HACK HACK HACK HACK
|
||||
*
|
||||
* If we are wiring in the kernel map or a submap of it,
|
||||
* unlock the map to avoid deadlocks. We trust that the
|
||||
* kernel is well-behaved, and therefore will not do
|
||||
* anything destructive to this region of the map while
|
||||
* we have it unlocked. We cannot trust user processes
|
||||
* to do the same.
|
||||
*
|
||||
* HACK HACK HACK HACK
|
||||
*/
|
||||
if (vm_map_pmap(map) == kernel_pmap) {
|
||||
vm_map_unlock(map); /* trust me ... */
|
||||
} else {
|
||||
vm_map_lock_downgrade(map);
|
||||
}
|
||||
|
||||
rv = 0;
|
||||
entry = start_entry;
|
||||
while (entry != &map->header && entry->start < end) {
|
||||
/*
|
||||
* If vm_fault_wire fails for any page we need to undo
|
||||
* what has been done. We decrement the wiring count
|
||||
* for those pages which have not yet been wired (now)
|
||||
* and unwire those that have (later).
|
||||
*
|
||||
* XXX this violates the locking protocol on the map,
|
||||
* needs to be fixed.
|
||||
*/
|
||||
if (rv)
|
||||
entry->wired_count--;
|
||||
else if (entry->wired_count == 1) {
|
||||
rv = vm_fault_wire(map, entry->start, entry->end);
|
||||
if (rv) {
|
||||
failed = entry->start;
|
||||
entry->wired_count--;
|
||||
}
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
|
||||
if (vm_map_pmap(map) == kernel_pmap) {
|
||||
vm_map_lock(map);
|
||||
}
|
||||
if (rv) {
|
||||
vm_map_unlock(map);
|
||||
(void) vm_map_pageable(map, start, failed, TRUE);
|
||||
return (rv);
|
||||
}
|
||||
/*
|
||||
* An exclusive lock on the map is needed in order to call
|
||||
* vm_map_simplify_entry(). If the current lock on the map
|
||||
* is only a shared lock, an upgrade is needed.
|
||||
*/
|
||||
if (vm_map_pmap(map) != kernel_pmap &&
|
||||
vm_map_lock_upgrade(map)) {
|
||||
vm_map_lock(map);
|
||||
if (vm_map_lookup_entry(map, start, &start_entry) ==
|
||||
FALSE) {
|
||||
vm_map_unlock(map);
|
||||
return KERN_SUCCESS;
|
||||
}
|
||||
}
|
||||
vm_map_simplify_entry(map, start_entry);
|
||||
}
|
||||
|
||||
vm_map_unlock(map);
|
||||
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_clean
|
||||
*
|
||||
|
@ -245,8 +245,6 @@ void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
|
||||
int _vm_map_trylock(vm_map_t map, const char *file, int line);
|
||||
int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
|
||||
void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
|
||||
void _vm_map_set_recursive(vm_map_t map, const char *file, int line);
|
||||
void _vm_map_clear_recursive(vm_map_t map, const char *file, int line);
|
||||
|
||||
#define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
|
||||
#define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
|
||||
@ -257,10 +255,6 @@ void _vm_map_clear_recursive(vm_map_t map, const char *file, int line);
|
||||
_vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
|
||||
#define vm_map_lock_downgrade(map) \
|
||||
_vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE)
|
||||
#define vm_map_set_recursive(map) \
|
||||
_vm_map_set_recursive(map, LOCK_FILE, LOCK_LINE)
|
||||
#define vm_map_clear_recursive(map) \
|
||||
_vm_map_clear_recursive(map, LOCK_FILE, LOCK_LINE)
|
||||
|
||||
long vmspace_resident_count(struct vmspace *vmspace);
|
||||
#endif /* _KERNEL */
|
||||
@ -306,8 +300,6 @@ int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_obje
|
||||
vm_pindex_t *, vm_prot_t *, boolean_t *);
|
||||
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
|
||||
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
|
||||
int vm_map_pageable (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||
int vm_map_user_pageable (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||
int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
|
||||
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
|
||||
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
|
||||
|
@ -1012,8 +1012,8 @@ mlock(td, uap)
|
||||
#endif
|
||||
|
||||
mtx_lock(&Giant);
|
||||
error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr,
|
||||
addr + size, FALSE);
|
||||
error = vm_map_wire(&td->td_proc->p_vmspace->vm_map, addr,
|
||||
addr + size, TRUE);
|
||||
mtx_unlock(&Giant);
|
||||
return (error == KERN_SUCCESS ? 0 : ENOMEM);
|
||||
}
|
||||
@ -1093,7 +1093,7 @@ munlock(td, uap)
|
||||
#endif
|
||||
|
||||
mtx_lock(&Giant);
|
||||
error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr,
|
||||
error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr,
|
||||
addr + size, TRUE);
|
||||
mtx_unlock(&Giant);
|
||||
return (error == KERN_SUCCESS ? 0 : ENOMEM);
|
||||
|
Loading…
Reference in New Issue
Block a user