Change inlines back into mainline code in preparation for mutexing. Also,
most of these inlines had been bloated in -current far beyond their original intent. Normalize prototypes and function declarations to be ANSI only (half already were). And do some general cleanup. (kernel size also reduced by 50-100K, but that isn't the prime intent)
This commit is contained in:
parent
617e358cdf
commit
1b40f8c036
289
sys/vm/vm_map.c
289
sys/vm/vm_map.c
@ -138,18 +138,8 @@ static struct vm_map_entry map_entry_init[MAX_MAPENT];
|
|||||||
static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
|
static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
|
||||||
static struct vm_map map_init[MAX_KMAP];
|
static struct vm_map map_init[MAX_KMAP];
|
||||||
|
|
||||||
static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
|
|
||||||
static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
|
|
||||||
static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
|
|
||||||
static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
|
|
||||||
static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
|
|
||||||
static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
|
|
||||||
static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
|
|
||||||
vm_map_entry_t));
|
|
||||||
static void vm_map_split __P((vm_map_entry_t));
|
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_map_startup()
|
vm_map_startup(void)
|
||||||
{
|
{
|
||||||
mapzone = &mapzone_store;
|
mapzone = &mapzone_store;
|
||||||
zbootinit(mapzone, "MAP", sizeof (struct vm_map),
|
zbootinit(mapzone, "MAP", sizeof (struct vm_map),
|
||||||
@ -185,7 +175,8 @@ vmspace_alloc(min, max)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_init2(void) {
|
vm_init2(void)
|
||||||
|
{
|
||||||
zinitna(kmapentzone, &kmapentobj,
|
zinitna(kmapentzone, &kmapentobj,
|
||||||
NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
|
NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
|
||||||
zinitna(mapentzone, &mapentobj,
|
zinitna(mapentzone, &mapentobj,
|
||||||
@ -198,8 +189,7 @@ vm_init2(void) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vmspace_free(vm)
|
vmspace_free(struct vmspace *vm)
|
||||||
struct vmspace *vm;
|
|
||||||
{
|
{
|
||||||
GIANT_REQUIRED;
|
GIANT_REQUIRED;
|
||||||
|
|
||||||
@ -259,6 +249,119 @@ vmspace_swap_count(struct vmspace *vmspace)
|
|||||||
return(count);
|
return(count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u_char
|
||||||
|
vm_map_entry_behavior(struct vm_map_entry *entry)
|
||||||
|
{
|
||||||
|
return entry->eflags & MAP_ENTRY_BEHAV_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
|
||||||
|
{
|
||||||
|
entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
|
||||||
|
(behavior & MAP_ENTRY_BEHAV_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_lock(vm_map_t map)
|
||||||
|
{
|
||||||
|
vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
|
||||||
|
if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curproc) != 0)
|
||||||
|
panic("vm_map_lock: failed to get lock");
|
||||||
|
map->timestamp++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_unlock(vm_map_t map)
|
||||||
|
{
|
||||||
|
vm_map_printf("locking map LK_RELEASE: %p\n", map);
|
||||||
|
lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_lock_read(vm_map_t map)
|
||||||
|
{
|
||||||
|
vm_map_printf("locking map LK_SHARED: %p\n", map);
|
||||||
|
lockmgr(&(map)->lock, LK_SHARED, NULL, curproc);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_unlock_read(vm_map_t map)
|
||||||
|
{
|
||||||
|
vm_map_printf("locking map LK_RELEASE: %p\n", map);
|
||||||
|
lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __inline__ int
|
||||||
|
_vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
|
||||||
|
int error;
|
||||||
|
|
||||||
|
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
|
||||||
|
error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, p);
|
||||||
|
if (error == 0)
|
||||||
|
map->timestamp++;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
vm_map_lock_upgrade(vm_map_t map)
|
||||||
|
{
|
||||||
|
return(_vm_map_lock_upgrade(map, curproc));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_lock_downgrade(vm_map_t map)
|
||||||
|
{
|
||||||
|
vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
|
||||||
|
lockmgr(&map->lock, LK_DOWNGRADE, NULL, curproc);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_set_recursive(vm_map_t map)
|
||||||
|
{
|
||||||
|
mtx_lock((map)->lock.lk_interlock);
|
||||||
|
map->lock.lk_flags |= LK_CANRECURSE;
|
||||||
|
mtx_unlock((map)->lock.lk_interlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_map_clear_recursive(vm_map_t map)
|
||||||
|
{
|
||||||
|
mtx_lock((map)->lock.lk_interlock);
|
||||||
|
map->lock.lk_flags &= ~LK_CANRECURSE;
|
||||||
|
mtx_unlock((map)->lock.lk_interlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
vm_offset_t
|
||||||
|
vm_map_min(vm_map_t map)
|
||||||
|
{
|
||||||
|
return(map->min_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
vm_offset_t
|
||||||
|
vm_map_max(vm_map_t map)
|
||||||
|
{
|
||||||
|
return(map->max_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct pmap *
|
||||||
|
vm_map_pmap(vm_map_t map)
|
||||||
|
{
|
||||||
|
return(map->pmap);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct pmap *
|
||||||
|
vmspace_pmap(struct vmspace *vmspace)
|
||||||
|
{
|
||||||
|
return &vmspace->vm_pmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
long
|
||||||
|
vmspace_resident_count(struct vmspace *vmspace)
|
||||||
|
{
|
||||||
|
return pmap_resident_count(vmspace_pmap(vmspace));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_map_create:
|
* vm_map_create:
|
||||||
*
|
*
|
||||||
@ -267,9 +370,7 @@ vmspace_swap_count(struct vmspace *vmspace)
|
|||||||
* the given lower and upper address bounds.
|
* the given lower and upper address bounds.
|
||||||
*/
|
*/
|
||||||
vm_map_t
|
vm_map_t
|
||||||
vm_map_create(pmap, min, max)
|
vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
|
||||||
pmap_t pmap;
|
|
||||||
vm_offset_t min, max;
|
|
||||||
{
|
{
|
||||||
vm_map_t result;
|
vm_map_t result;
|
||||||
|
|
||||||
@ -288,9 +389,7 @@ vm_map_create(pmap, min, max)
|
|||||||
* The pmap is set elsewhere.
|
* The pmap is set elsewhere.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_map_init(map, min, max)
|
vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
|
||||||
struct vm_map *map;
|
|
||||||
vm_offset_t min, max;
|
|
||||||
{
|
{
|
||||||
GIANT_REQUIRED;
|
GIANT_REQUIRED;
|
||||||
|
|
||||||
@ -321,9 +420,7 @@ vm_map_destroy(map)
|
|||||||
* Inverse of vm_map_entry_create.
|
* Inverse of vm_map_entry_create.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
vm_map_entry_dispose(map, entry)
|
vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
|
||||||
vm_map_t map;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
{
|
{
|
||||||
zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
|
zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
|
||||||
}
|
}
|
||||||
@ -335,8 +432,7 @@ vm_map_entry_dispose(map, entry)
|
|||||||
* No entry fields are filled in.
|
* No entry fields are filled in.
|
||||||
*/
|
*/
|
||||||
static vm_map_entry_t
|
static vm_map_entry_t
|
||||||
vm_map_entry_create(map)
|
vm_map_entry_create(vm_map_t map)
|
||||||
vm_map_t map;
|
|
||||||
{
|
{
|
||||||
vm_map_entry_t new_entry;
|
vm_map_entry_t new_entry;
|
||||||
|
|
||||||
@ -402,10 +498,10 @@ vm_map_entry_unlink(vm_map_t map,
|
|||||||
* actually contained in the map.
|
* actually contained in the map.
|
||||||
*/
|
*/
|
||||||
boolean_t
|
boolean_t
|
||||||
vm_map_lookup_entry(map, address, entry)
|
vm_map_lookup_entry(
|
||||||
vm_map_t map;
|
vm_map_t map,
|
||||||
vm_offset_t address;
|
vm_offset_t address,
|
||||||
vm_map_entry_t *entry; /* OUT */
|
vm_map_entry_t *entry) /* OUT */
|
||||||
{
|
{
|
||||||
vm_map_entry_t cur;
|
vm_map_entry_t cur;
|
||||||
vm_map_entry_t last;
|
vm_map_entry_t last;
|
||||||
@ -644,11 +740,11 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
|||||||
* `start'. The map must be locked. Returns 0 on success, 1 on no space.
|
* `start'. The map must be locked. Returns 0 on success, 1 on no space.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_map_findspace(map, start, length, addr)
|
vm_map_findspace(
|
||||||
vm_map_t map;
|
vm_map_t map,
|
||||||
vm_offset_t start;
|
vm_offset_t start,
|
||||||
vm_size_t length;
|
vm_size_t length,
|
||||||
vm_offset_t *addr;
|
vm_offset_t *addr)
|
||||||
{
|
{
|
||||||
vm_map_entry_t entry, next;
|
vm_map_entry_t entry, next;
|
||||||
vm_offset_t end;
|
vm_offset_t end;
|
||||||
@ -762,9 +858,7 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
|||||||
* both neighbors.
|
* both neighbors.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_map_simplify_entry(map, entry)
|
vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
|
||||||
vm_map_t map;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
{
|
{
|
||||||
vm_map_entry_t next, prev;
|
vm_map_entry_t next, prev;
|
||||||
vm_size_t prevsize, esize;
|
vm_size_t prevsize, esize;
|
||||||
@ -841,10 +935,7 @@ vm_map_simplify_entry(map, entry)
|
|||||||
* the entry must be split.
|
* the entry must be split.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
_vm_map_clip_start(map, entry, start)
|
_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
|
||||||
vm_map_t map;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
vm_offset_t start;
|
|
||||||
{
|
{
|
||||||
vm_map_entry_t new_entry;
|
vm_map_entry_t new_entry;
|
||||||
|
|
||||||
@ -905,10 +996,7 @@ _vm_map_clip_start(map, entry, start)
|
|||||||
* the entry must be split.
|
* the entry must be split.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
_vm_map_clip_end(map, entry, end)
|
_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
|
||||||
vm_map_t map;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
vm_offset_t end;
|
|
||||||
{
|
{
|
||||||
vm_map_entry_t new_entry;
|
vm_map_entry_t new_entry;
|
||||||
|
|
||||||
@ -980,11 +1068,11 @@ _vm_map_clip_end(map, entry, end)
|
|||||||
* submap (if desired). [Better yet, don't try it.]
|
* submap (if desired). [Better yet, don't try it.]
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_map_submap(map, start, end, submap)
|
vm_map_submap(
|
||||||
vm_map_t map;
|
vm_map_t map,
|
||||||
vm_offset_t start;
|
vm_offset_t start,
|
||||||
vm_offset_t end;
|
vm_offset_t end,
|
||||||
vm_map_t submap;
|
vm_map_t submap)
|
||||||
{
|
{
|
||||||
vm_map_entry_t entry;
|
vm_map_entry_t entry;
|
||||||
int result = KERN_INVALID_ARGUMENT;
|
int result = KERN_INVALID_ARGUMENT;
|
||||||
@ -1111,10 +1199,11 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
int
|
int
|
||||||
vm_map_madvise(map, start, end, behav)
|
vm_map_madvise(
|
||||||
vm_map_t map;
|
vm_map_t map,
|
||||||
vm_offset_t start, end;
|
vm_offset_t start,
|
||||||
int behav;
|
vm_offset_t end,
|
||||||
|
int behav)
|
||||||
{
|
{
|
||||||
vm_map_entry_t current, entry;
|
vm_map_entry_t current, entry;
|
||||||
int modify_map = 0;
|
int modify_map = 0;
|
||||||
@ -1313,11 +1402,11 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
|||||||
* Implement the semantics of mlock
|
* Implement the semantics of mlock
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_map_user_pageable(map, start, end, new_pageable)
|
vm_map_user_pageable(
|
||||||
vm_map_t map;
|
vm_map_t map,
|
||||||
vm_offset_t start;
|
vm_offset_t start,
|
||||||
vm_offset_t end;
|
vm_offset_t end,
|
||||||
boolean_t new_pageable;
|
boolean_t new_pageable)
|
||||||
{
|
{
|
||||||
vm_map_entry_t entry;
|
vm_map_entry_t entry;
|
||||||
vm_map_entry_t start_entry;
|
vm_map_entry_t start_entry;
|
||||||
@ -1451,11 +1540,11 @@ vm_map_user_pageable(map, start, end, new_pageable)
|
|||||||
* must remain to the map throughout the call.
|
* must remain to the map throughout the call.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_map_pageable(map, start, end, new_pageable)
|
vm_map_pageable(
|
||||||
vm_map_t map;
|
vm_map_t map,
|
||||||
vm_offset_t start;
|
vm_offset_t start,
|
||||||
vm_offset_t end;
|
vm_offset_t end,
|
||||||
boolean_t new_pageable;
|
boolean_t new_pageable)
|
||||||
{
|
{
|
||||||
vm_map_entry_t entry;
|
vm_map_entry_t entry;
|
||||||
vm_map_entry_t start_entry;
|
vm_map_entry_t start_entry;
|
||||||
@ -1681,12 +1770,12 @@ vm_map_pageable(map, start, end, new_pageable)
|
|||||||
* Returns an error if any part of the specified range is not mapped.
|
* Returns an error if any part of the specified range is not mapped.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_map_clean(map, start, end, syncio, invalidate)
|
vm_map_clean(
|
||||||
vm_map_t map;
|
vm_map_t map,
|
||||||
vm_offset_t start;
|
vm_offset_t start,
|
||||||
vm_offset_t end;
|
vm_offset_t end,
|
||||||
boolean_t syncio;
|
boolean_t syncio,
|
||||||
boolean_t invalidate;
|
boolean_t invalidate)
|
||||||
{
|
{
|
||||||
vm_map_entry_t current;
|
vm_map_entry_t current;
|
||||||
vm_map_entry_t entry;
|
vm_map_entry_t entry;
|
||||||
@ -1807,9 +1896,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
|
|||||||
* [This is the reason for this routine's existence.]
|
* [This is the reason for this routine's existence.]
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
vm_map_entry_unwire(map, entry)
|
vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
|
||||||
vm_map_t map;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
{
|
{
|
||||||
vm_fault_unwire(map, entry->start, entry->end);
|
vm_fault_unwire(map, entry->start, entry->end);
|
||||||
entry->wired_count = 0;
|
entry->wired_count = 0;
|
||||||
@ -1821,9 +1908,7 @@ vm_map_entry_unwire(map, entry)
|
|||||||
* Deallocate the given entry from the target map.
|
* Deallocate the given entry from the target map.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
vm_map_entry_delete(map, entry)
|
vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
|
||||||
vm_map_t map;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
{
|
{
|
||||||
vm_map_entry_unlink(map, entry);
|
vm_map_entry_unlink(map, entry);
|
||||||
map->size -= entry->end - entry->start;
|
map->size -= entry->end - entry->start;
|
||||||
@ -1842,10 +1927,7 @@ vm_map_entry_delete(map, entry)
|
|||||||
* map.
|
* map.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_map_delete(map, start, end)
|
vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
|
||||||
vm_map_t map;
|
|
||||||
vm_offset_t start;
|
|
||||||
vm_offset_t end;
|
|
||||||
{
|
{
|
||||||
vm_object_t object;
|
vm_object_t object;
|
||||||
vm_map_entry_t entry;
|
vm_map_entry_t entry;
|
||||||
@ -1947,10 +2029,7 @@ vm_map_delete(map, start, end)
|
|||||||
* This is the exported form of vm_map_delete.
|
* This is the exported form of vm_map_delete.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_map_remove(map, start, end)
|
vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
|
||||||
vm_map_t map;
|
|
||||||
vm_offset_t start;
|
|
||||||
vm_offset_t end;
|
|
||||||
{
|
{
|
||||||
int result, s = 0;
|
int result, s = 0;
|
||||||
|
|
||||||
@ -2023,8 +2102,7 @@ vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
|||||||
* being a negative impact on memory usage.
|
* being a negative impact on memory usage.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
vm_map_split(entry)
|
vm_map_split(vm_map_entry_t entry)
|
||||||
vm_map_entry_t entry;
|
|
||||||
{
|
{
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
vm_object_t orig_object, new_object, source;
|
vm_object_t orig_object, new_object, source;
|
||||||
@ -2121,9 +2199,11 @@ vm_map_split(entry)
|
|||||||
* entry. The entries *must* be aligned properly.
|
* entry. The entries *must* be aligned properly.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
|
vm_map_copy_entry(
|
||||||
vm_map_t src_map, dst_map;
|
vm_map_t src_map,
|
||||||
vm_map_entry_t src_entry, dst_entry;
|
vm_map_t dst_map,
|
||||||
|
vm_map_entry_t src_entry,
|
||||||
|
vm_map_entry_t dst_entry)
|
||||||
{
|
{
|
||||||
vm_object_t src_object;
|
vm_object_t src_object;
|
||||||
|
|
||||||
@ -2191,8 +2271,7 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
|
|||||||
* The source map must not be locked.
|
* The source map must not be locked.
|
||||||
*/
|
*/
|
||||||
struct vmspace *
|
struct vmspace *
|
||||||
vmspace_fork(vm1)
|
vmspace_fork(struct vmspace *vm1)
|
||||||
struct vmspace *vm1;
|
|
||||||
{
|
{
|
||||||
struct vmspace *vm2;
|
struct vmspace *vm2;
|
||||||
vm_map_t old_map = &vm1->vm_map;
|
vm_map_t old_map = &vm1->vm_map;
|
||||||
@ -2520,7 +2599,8 @@ Retry:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vmspace_exec(struct proc *p) {
|
vmspace_exec(struct proc *p)
|
||||||
|
{
|
||||||
struct vmspace *oldvmspace = p->p_vmspace;
|
struct vmspace *oldvmspace = p->p_vmspace;
|
||||||
struct vmspace *newvmspace;
|
struct vmspace *newvmspace;
|
||||||
vm_map_t map = &p->p_vmspace->vm_map;
|
vm_map_t map = &p->p_vmspace->vm_map;
|
||||||
@ -2549,7 +2629,8 @@ vmspace_exec(struct proc *p) {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vmspace_unshare(struct proc *p) {
|
vmspace_unshare(struct proc *p)
|
||||||
|
{
|
||||||
struct vmspace *oldvmspace = p->p_vmspace;
|
struct vmspace *oldvmspace = p->p_vmspace;
|
||||||
struct vmspace *newvmspace;
|
struct vmspace *newvmspace;
|
||||||
|
|
||||||
@ -2767,9 +2848,7 @@ RetryLookup:;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_map_lookup_done(map, entry)
|
vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
|
||||||
vm_map_t map;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Unlock the main-level map
|
* Unlock the main-level map
|
||||||
@ -2784,13 +2863,13 @@ vm_map_lookup_done(map, entry)
|
|||||||
* operations.
|
* operations.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
|
vm_uiomove(
|
||||||
vm_map_t mapa;
|
vm_map_t mapa,
|
||||||
vm_object_t srcobject;
|
vm_object_t srcobject,
|
||||||
off_t cp;
|
off_t cp,
|
||||||
int cnta;
|
int cnta,
|
||||||
vm_offset_t uaddra;
|
vm_offset_t uaddra,
|
||||||
int *npages;
|
int *npages)
|
||||||
{
|
{
|
||||||
vm_map_t map;
|
vm_map_t map;
|
||||||
vm_object_t first_object, oldobject, object;
|
vm_object_t first_object, oldobject, object;
|
||||||
@ -3018,9 +3097,7 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
|
|||||||
* from other processes, file unlinking, and file size shrinkage.
|
* from other processes, file unlinking, and file size shrinkage.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_freeze_copyopts(object, froma, toa)
|
vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t froma, toa;
|
|
||||||
{
|
{
|
||||||
int rv;
|
int rv;
|
||||||
vm_object_t robject;
|
vm_object_t robject;
|
||||||
|
177
sys/vm/vm_map.h
177
sys/vm/vm_map.h
@ -73,6 +73,13 @@
|
|||||||
|
|
||||||
#include <sys/lockmgr.h>
|
#include <sys/lockmgr.h>
|
||||||
|
|
||||||
|
#ifdef MAP_LOCK_DIAGNOSTIC
|
||||||
|
#include <sys/systm.h>
|
||||||
|
#define vm_map_printf(str, arg) printf(str,arg)
|
||||||
|
#else
|
||||||
|
#define vm_map_printf(str, arg)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Types defined:
|
* Types defined:
|
||||||
*
|
*
|
||||||
@ -132,19 +139,6 @@ struct vm_map_entry {
|
|||||||
|
|
||||||
#define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */
|
#define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */
|
||||||
|
|
||||||
static __inline u_char
|
|
||||||
vm_map_entry_behavior(struct vm_map_entry *entry)
|
|
||||||
{
|
|
||||||
return entry->eflags & MAP_ENTRY_BEHAV_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
|
|
||||||
{
|
|
||||||
entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
|
|
||||||
(behavior & MAP_ENTRY_BEHAV_MASK);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maps are doubly-linked lists of map entries, kept sorted
|
* Maps are doubly-linked lists of map entries, kept sorted
|
||||||
* by address. A single hint is provided to start
|
* by address. A single hint is provided to start
|
||||||
@ -197,6 +191,10 @@ struct vmspace {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#ifdef _KERNEL
|
#ifdef _KERNEL
|
||||||
|
|
||||||
|
u_char vm_map_entry_behavior(struct vm_map_entry *entry);
|
||||||
|
void vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Macros: vm_map_lock, etc.
|
* Macros: vm_map_lock, etc.
|
||||||
* Function:
|
* Function:
|
||||||
@ -207,6 +205,7 @@ struct vmspace {
|
|||||||
* as unbraced elements in a higher level statement.
|
* as unbraced elements in a higher level statement.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#if 0
|
||||||
/* XXX This macro is not called anywhere, and (map)->ref_lock doesn't exist. */
|
/* XXX This macro is not called anywhere, and (map)->ref_lock doesn't exist. */
|
||||||
#define vm_map_lock_drain_interlock(map) \
|
#define vm_map_lock_drain_interlock(map) \
|
||||||
do { \
|
do { \
|
||||||
@ -214,95 +213,25 @@ struct vmspace {
|
|||||||
&(map)->ref_lock, curproc); \
|
&(map)->ref_lock, curproc); \
|
||||||
(map)->timestamp++; \
|
(map)->timestamp++; \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
/* #define MAP_LOCK_DIAGNOSTIC 1 */
|
|
||||||
#ifdef MAP_LOCK_DIAGNOSTIC
|
|
||||||
#include <sys/systm.h>
|
|
||||||
#define vm_map_printf(str, arg) printf(str,arg)
|
|
||||||
#else
|
|
||||||
#define vm_map_printf(str, arg)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define vm_map_lock(map) \
|
void vm_map_lock(vm_map_t map);
|
||||||
do { \
|
void vm_map_unlock(vm_map_t map);
|
||||||
vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map); \
|
void vm_map_lock_read(vm_map_t map);
|
||||||
if (lockmgr(&(map)->lock, LK_EXCLUSIVE, \
|
void vm_map_unlock_read(vm_map_t map);
|
||||||
NULL, curproc) != 0) \
|
int vm_map_lock_upgrade(vm_map_t map);
|
||||||
panic("vm_map_lock: failed to get lock"); \
|
void vm_map_lock_downgrade(vm_map_t map);
|
||||||
(map)->timestamp++; \
|
void vm_map_set_recursive(vm_map_t map);
|
||||||
} while(0)
|
void vm_map_clear_recursive(vm_map_t map);
|
||||||
|
vm_offset_t vm_map_min(vm_map_t map);
|
||||||
|
vm_offset_t vm_map_max(vm_map_t map);
|
||||||
|
struct pmap *vm_map_pmap(vm_map_t map);
|
||||||
|
|
||||||
#define vm_map_unlock(map) \
|
struct pmap *vmspace_pmap(struct vmspace *vmspace);
|
||||||
do { \
|
long vmspace_resident_count(struct vmspace *vmspace);
|
||||||
vm_map_printf("locking map LK_RELEASE: %p\n", map); \
|
|
||||||
lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define vm_map_lock_read(map) \
|
|
||||||
do { \
|
|
||||||
vm_map_printf("locking map LK_SHARED: %p\n", map); \
|
|
||||||
lockmgr(&(map)->lock, LK_SHARED, \
|
|
||||||
NULL, curproc); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define vm_map_unlock_read(map) \
|
|
||||||
do { \
|
|
||||||
vm_map_printf("locking map LK_RELEASE: %p\n", map); \
|
|
||||||
lockmgr(&(map)->lock, LK_RELEASE, NULL, curproc); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
static __inline__ int
|
|
||||||
_vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
|
|
||||||
int error;
|
|
||||||
|
|
||||||
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
|
|
||||||
error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, p);
|
|
||||||
if (error == 0)
|
|
||||||
map->timestamp++;
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curproc)
|
|
||||||
|
|
||||||
#define vm_map_lock_downgrade(map) \
|
|
||||||
do { \
|
|
||||||
vm_map_printf("locking map LK_DOWNGRADE: %p\n", map); \
|
|
||||||
lockmgr(&(map)->lock, LK_DOWNGRADE, NULL, curproc); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define vm_map_set_recursive(map) \
|
|
||||||
do { \
|
|
||||||
mtx_lock((map)->lock.lk_interlock); \
|
|
||||||
(map)->lock.lk_flags |= LK_CANRECURSE; \
|
|
||||||
mtx_unlock((map)->lock.lk_interlock); \
|
|
||||||
} while(0)
|
|
||||||
|
|
||||||
#define vm_map_clear_recursive(map) \
|
|
||||||
do { \
|
|
||||||
mtx_lock((map)->lock.lk_interlock); \
|
|
||||||
(map)->lock.lk_flags &= ~LK_CANRECURSE; \
|
|
||||||
mtx_unlock((map)->lock.lk_interlock); \
|
|
||||||
} while(0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Functions implemented as macros
|
|
||||||
*/
|
|
||||||
#define vm_map_min(map) ((map)->min_offset)
|
|
||||||
#define vm_map_max(map) ((map)->max_offset)
|
|
||||||
#define vm_map_pmap(map) ((map)->pmap)
|
|
||||||
#endif /* _KERNEL */
|
#endif /* _KERNEL */
|
||||||
|
|
||||||
static __inline struct pmap *
|
|
||||||
vmspace_pmap(struct vmspace *vmspace)
|
|
||||||
{
|
|
||||||
return &vmspace->vm_pmap;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline long
|
|
||||||
vmspace_resident_count(struct vmspace *vmspace)
|
|
||||||
{
|
|
||||||
return pmap_resident_count(vmspace_pmap(vmspace));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* XXX: number of kernel maps and entries to statically allocate */
|
/* XXX: number of kernel maps and entries to statically allocate */
|
||||||
#define MAX_KMAP 10
|
#define MAX_KMAP 10
|
||||||
@ -331,35 +260,35 @@ vmspace_resident_count(struct vmspace *vmspace)
|
|||||||
#define VM_FAULT_DIRTY 8 /* Dirty the page */
|
#define VM_FAULT_DIRTY 8 /* Dirty the page */
|
||||||
|
|
||||||
#ifdef _KERNEL
|
#ifdef _KERNEL
|
||||||
boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
|
boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
||||||
struct pmap;
|
struct pmap;
|
||||||
vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t));
|
vm_map_t vm_map_create (struct pmap *, vm_offset_t, vm_offset_t);
|
||||||
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
|
int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t);
|
||||||
int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int));
|
int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int);
|
||||||
int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
|
int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *);
|
||||||
int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
|
int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
|
||||||
void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t));
|
void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t);
|
||||||
void vm_map_destroy __P((struct vm_map *));
|
void vm_map_destroy (struct vm_map *);
|
||||||
int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
|
int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int);
|
||||||
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
|
int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
|
||||||
vm_pindex_t *, vm_prot_t *, boolean_t *));
|
vm_pindex_t *, vm_prot_t *, boolean_t *);
|
||||||
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
|
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
|
||||||
boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
|
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
|
||||||
int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
|
int vm_map_pageable (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||||
int vm_map_user_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
|
int vm_map_user_pageable (vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
|
||||||
int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
|
int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
|
||||||
int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
|
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
|
||||||
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
|
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
|
||||||
void vm_map_startup __P((void));
|
void vm_map_startup (void);
|
||||||
int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
|
int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
|
||||||
int vm_map_madvise __P((vm_map_t, vm_offset_t, vm_offset_t, int));
|
int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
|
||||||
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
|
void vm_map_simplify_entry (vm_map_t, vm_map_entry_t);
|
||||||
void vm_init2 __P((void));
|
void vm_init2 (void);
|
||||||
int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
|
int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
|
||||||
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
|
void vm_freeze_copyopts (vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||||
int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
|
int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
|
||||||
int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
|
int vm_map_growstack (struct proc *p, vm_offset_t addr);
|
||||||
int vmspace_swap_count __P((struct vmspace *vmspace));
|
int vmspace_swap_count (struct vmspace *vmspace);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#endif /* _VM_MAP_ */
|
#endif /* _VM_MAP_ */
|
||||||
|
@ -140,10 +140,7 @@ static int object_hash_rand;
|
|||||||
static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
|
static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
|
||||||
|
|
||||||
void
|
void
|
||||||
_vm_object_allocate(type, size, object)
|
_vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object)
|
||||||
objtype_t type;
|
|
||||||
vm_size_t size;
|
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
int incr;
|
int incr;
|
||||||
|
|
||||||
@ -191,7 +188,7 @@ _vm_object_allocate(type, size, object)
|
|||||||
* Initialize the VM objects module.
|
* Initialize the VM objects module.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_init()
|
vm_object_init(void)
|
||||||
{
|
{
|
||||||
GIANT_REQUIRED;
|
GIANT_REQUIRED;
|
||||||
|
|
||||||
@ -213,11 +210,89 @@ vm_object_init()
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_object_init2()
|
vm_object_init2(void)
|
||||||
{
|
{
|
||||||
zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1);
|
zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_set_flag(vm_object_t object, u_short bits)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_set_short(&object->flags, bits);
|
||||||
|
/* object->flags |= bits; */
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_clear_flag(vm_object_t object, u_short bits)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_clear_short(&object->flags, bits);
|
||||||
|
/* object->flags &= ~bits; */
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_pip_add(vm_object_t object, short i)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_add_short(&object->paging_in_progress, i);
|
||||||
|
/* object->paging_in_progress += i; */
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_pip_subtract(vm_object_t object, short i)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_subtract_short(&object->paging_in_progress, i);
|
||||||
|
/* object->paging_in_progress -= i; */
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_pip_wakeup(vm_object_t object)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_subtract_short(&object->paging_in_progress, 1);
|
||||||
|
/* object->paging_in_progress--; */
|
||||||
|
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
|
||||||
|
vm_object_clear_flag(object, OBJ_PIPWNT);
|
||||||
|
wakeup(object);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_pip_wakeupn(vm_object_t object, short i)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
if (i)
|
||||||
|
atomic_subtract_short(&object->paging_in_progress, i);
|
||||||
|
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
|
||||||
|
vm_object_clear_flag(object, OBJ_PIPWNT);
|
||||||
|
wakeup(object);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_pip_sleep(vm_object_t object, char *waitid)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
if (object->paging_in_progress) {
|
||||||
|
int s = splvm();
|
||||||
|
if (object->paging_in_progress) {
|
||||||
|
vm_object_set_flag(object, OBJ_PIPWNT);
|
||||||
|
tsleep(object, PVM, waitid, 0);
|
||||||
|
}
|
||||||
|
splx(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_object_pip_wait(vm_object_t object, char *waitid)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
while (object->paging_in_progress)
|
||||||
|
vm_object_pip_sleep(object, waitid);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_object_allocate:
|
* vm_object_allocate:
|
||||||
*
|
*
|
||||||
@ -225,9 +300,7 @@ vm_object_init2()
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
vm_object_t
|
vm_object_t
|
||||||
vm_object_allocate(type, size)
|
vm_object_allocate(objtype_t type, vm_size_t size)
|
||||||
objtype_t type;
|
|
||||||
vm_size_t size;
|
|
||||||
{
|
{
|
||||||
vm_object_t result;
|
vm_object_t result;
|
||||||
|
|
||||||
@ -246,8 +319,7 @@ vm_object_allocate(type, size)
|
|||||||
* Gets another reference to the given object.
|
* Gets another reference to the given object.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_reference(object)
|
vm_object_reference(vm_object_t object)
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
GIANT_REQUIRED;
|
GIANT_REQUIRED;
|
||||||
|
|
||||||
@ -269,8 +341,7 @@ vm_object_reference(object)
|
|||||||
* handle deallocating a object of type OBJT_VNODE
|
* handle deallocating a object of type OBJT_VNODE
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_vndeallocate(object)
|
vm_object_vndeallocate(vm_object_t object)
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
struct vnode *vp = (struct vnode *) object->handle;
|
struct vnode *vp = (struct vnode *) object->handle;
|
||||||
|
|
||||||
@ -308,8 +379,7 @@ vm_object_vndeallocate(object)
|
|||||||
* No object may be locked.
|
* No object may be locked.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_deallocate(object)
|
vm_object_deallocate(vm_object_t object)
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
vm_object_t temp;
|
vm_object_t temp;
|
||||||
|
|
||||||
@ -403,8 +473,7 @@ doterm:
|
|||||||
* This routine may block.
|
* This routine may block.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_terminate(object)
|
vm_object_terminate(vm_object_t object)
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
vm_page_t p;
|
vm_page_t p;
|
||||||
int s;
|
int s;
|
||||||
@ -504,11 +573,7 @@ vm_object_terminate(object)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_object_page_clean(object, start, end, flags)
|
vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t start;
|
|
||||||
vm_pindex_t end;
|
|
||||||
int flags;
|
|
||||||
{
|
{
|
||||||
vm_page_t p, np, tp;
|
vm_page_t p, np, tp;
|
||||||
vm_offset_t tstart, tend;
|
vm_offset_t tstart, tend;
|
||||||
@ -692,29 +757,6 @@ rescan:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef not_used
|
|
||||||
/* XXX I cannot tell if this should be an exported symbol */
|
|
||||||
/*
|
|
||||||
* vm_object_deactivate_pages
|
|
||||||
*
|
|
||||||
* Deactivate all pages in the specified object. (Keep its pages
|
|
||||||
* in memory even though it is no longer referenced.)
|
|
||||||
*
|
|
||||||
* The object must be locked.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
vm_object_deactivate_pages(object)
|
|
||||||
vm_object_t object;
|
|
||||||
{
|
|
||||||
vm_page_t p, next;
|
|
||||||
|
|
||||||
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
|
|
||||||
next = TAILQ_NEXT(p, listq);
|
|
||||||
vm_page_deactivate(p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Same as vm_object_pmap_copy, except range checking really
|
* Same as vm_object_pmap_copy, except range checking really
|
||||||
* works, and is meant for small sections of an object.
|
* works, and is meant for small sections of an object.
|
||||||
@ -728,10 +770,7 @@ vm_object_deactivate_pages(object)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_object_pmap_copy_1(object, start, end)
|
vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t start;
|
|
||||||
vm_pindex_t end;
|
|
||||||
{
|
{
|
||||||
vm_pindex_t idx;
|
vm_pindex_t idx;
|
||||||
vm_page_t p;
|
vm_page_t p;
|
||||||
@ -758,10 +797,7 @@ vm_object_pmap_copy_1(object, start, end)
|
|||||||
* The object must *not* be locked.
|
* The object must *not* be locked.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_pmap_remove(object, start, end)
|
vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t start;
|
|
||||||
vm_pindex_t end;
|
|
||||||
{
|
{
|
||||||
vm_page_t p;
|
vm_page_t p;
|
||||||
|
|
||||||
@ -798,11 +834,7 @@ vm_object_pmap_remove(object, start, end)
|
|||||||
* without I/O.
|
* without I/O.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_madvise(object, pindex, count, advise)
|
vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
int count;
|
|
||||||
int advise;
|
|
||||||
{
|
{
|
||||||
vm_pindex_t end, tpindex;
|
vm_pindex_t end, tpindex;
|
||||||
vm_object_t tobject;
|
vm_object_t tobject;
|
||||||
@ -914,10 +946,10 @@ shadowlookup:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_object_shadow(object, offset, length)
|
vm_object_shadow(
|
||||||
vm_object_t *object; /* IN/OUT */
|
vm_object_t *object, /* IN/OUT */
|
||||||
vm_ooffset_t *offset; /* IN/OUT */
|
vm_ooffset_t *offset, /* IN/OUT */
|
||||||
vm_size_t length;
|
vm_size_t length)
|
||||||
{
|
{
|
||||||
vm_object_t source;
|
vm_object_t source;
|
||||||
vm_object_t result;
|
vm_object_t result;
|
||||||
@ -1177,11 +1209,12 @@ vm_object_backing_scan(vm_object_t object, int op)
|
|||||||
* operation, but should plug 99.9% of the rest of the leaks.
|
* operation, but should plug 99.9% of the rest of the leaks.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
vm_object_qcollapse(object)
|
vm_object_qcollapse(vm_object_t object)
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
vm_object_t backing_object = object->backing_object;
|
vm_object_t backing_object = object->backing_object;
|
||||||
|
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
|
||||||
if (backing_object->ref_count != 1)
|
if (backing_object->ref_count != 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -1200,8 +1233,7 @@ vm_object_qcollapse(object)
|
|||||||
* parent, and the backing object is deallocated.
|
* parent, and the backing object is deallocated.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_collapse(object)
|
vm_object_collapse(vm_object_t object)
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
GIANT_REQUIRED;
|
GIANT_REQUIRED;
|
||||||
|
|
||||||
@ -1406,11 +1438,7 @@ vm_object_collapse(object)
|
|||||||
* The object must be locked.
|
* The object must be locked.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_object_page_remove(object, start, end, clean_only)
|
vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, boolean_t clean_only)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t start;
|
|
||||||
vm_pindex_t end;
|
|
||||||
boolean_t clean_only;
|
|
||||||
{
|
{
|
||||||
vm_page_t p, next;
|
vm_page_t p, next;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
@ -1527,10 +1555,7 @@ again:
|
|||||||
* The object must *not* be locked.
|
* The object must *not* be locked.
|
||||||
*/
|
*/
|
||||||
boolean_t
|
boolean_t
|
||||||
vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
|
vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, vm_size_t prev_size, vm_size_t next_size)
|
||||||
vm_object_t prev_object;
|
|
||||||
vm_pindex_t prev_pindex;
|
|
||||||
vm_size_t prev_size, next_size;
|
|
||||||
{
|
{
|
||||||
vm_pindex_t next_pindex;
|
vm_pindex_t next_pindex;
|
||||||
|
|
||||||
@ -1599,15 +1624,8 @@ vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
|
|||||||
|
|
||||||
#include <ddb/ddb.h>
|
#include <ddb/ddb.h>
|
||||||
|
|
||||||
static int _vm_object_in_map __P((vm_map_t map, vm_object_t object,
|
|
||||||
vm_map_entry_t entry));
|
|
||||||
static int vm_object_in_map __P((vm_object_t object));
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
_vm_object_in_map(map, object, entry)
|
_vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
|
||||||
vm_map_t map;
|
|
||||||
vm_object_t object;
|
|
||||||
vm_map_entry_t entry;
|
|
||||||
{
|
{
|
||||||
vm_map_t tmpm;
|
vm_map_t tmpm;
|
||||||
vm_map_entry_t tmpe;
|
vm_map_entry_t tmpe;
|
||||||
@ -1646,8 +1664,7 @@ _vm_object_in_map(map, object, entry)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
vm_object_in_map( object)
|
vm_object_in_map(vm_object_t object)
|
||||||
vm_object_t object;
|
|
||||||
{
|
{
|
||||||
struct proc *p;
|
struct proc *p;
|
||||||
|
|
||||||
@ -1759,11 +1776,11 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
|
|||||||
|
|
||||||
/* XXX need this non-static entry for calling from vm_map_print. */
|
/* XXX need this non-static entry for calling from vm_map_print. */
|
||||||
void
|
void
|
||||||
vm_object_print(addr, have_addr, count, modif)
|
vm_object_print(
|
||||||
/* db_expr_t */ long addr;
|
/* db_expr_t */ long addr,
|
||||||
boolean_t have_addr;
|
boolean_t have_addr,
|
||||||
/* db_expr_t */ long count;
|
/* db_expr_t */ long count,
|
||||||
char *modif;
|
char *modif)
|
||||||
{
|
{
|
||||||
vm_object_print_static(addr, have_addr, count, modif);
|
vm_object_print_static(addr, have_addr, count, modif);
|
||||||
}
|
}
|
||||||
|
@ -169,101 +169,32 @@ extern vm_object_t kmem_object;
|
|||||||
|
|
||||||
#ifdef _KERNEL
|
#ifdef _KERNEL
|
||||||
|
|
||||||
static __inline void
|
void vm_object_set_flag(vm_object_t object, u_short bits);
|
||||||
vm_object_set_flag(vm_object_t object, u_short bits)
|
void vm_object_clear_flag(vm_object_t object, u_short bits);
|
||||||
{
|
void vm_object_pip_add(vm_object_t object, short i);
|
||||||
GIANT_REQUIRED;
|
void vm_object_pip_subtract(vm_object_t object, short i);
|
||||||
atomic_set_short(&object->flags, bits);
|
void vm_object_pip_wakeup(vm_object_t object);
|
||||||
/* object->flags |= bits; */
|
void vm_object_pip_wakeupn(vm_object_t object, short i);
|
||||||
}
|
void vm_object_pip_sleep(vm_object_t object, char *waitid);
|
||||||
|
void vm_object_pip_wait(vm_object_t object, char *waitid);
|
||||||
|
|
||||||
static __inline void
|
vm_object_t vm_object_allocate (objtype_t, vm_size_t);
|
||||||
vm_object_clear_flag(vm_object_t object, u_short bits)
|
void _vm_object_allocate (objtype_t, vm_size_t, vm_object_t);
|
||||||
{
|
boolean_t vm_object_coalesce (vm_object_t, vm_pindex_t, vm_size_t, vm_size_t);
|
||||||
GIANT_REQUIRED;
|
void vm_object_collapse (vm_object_t);
|
||||||
atomic_clear_short(&object->flags, bits);
|
void vm_object_deallocate (vm_object_t);
|
||||||
/* object->flags &= ~bits; */
|
void vm_object_terminate (vm_object_t);
|
||||||
}
|
void vm_object_vndeallocate (vm_object_t);
|
||||||
|
void vm_object_init (void);
|
||||||
static __inline void
|
void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
|
||||||
vm_object_pip_add(vm_object_t object, short i)
|
void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
|
||||||
{
|
void vm_object_pmap_copy (vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||||
GIANT_REQUIRED;
|
void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||||
atomic_add_short(&object->paging_in_progress, i);
|
void vm_object_pmap_remove (vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||||
/* object->paging_in_progress += i; */
|
void vm_object_reference (vm_object_t);
|
||||||
}
|
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
|
||||||
|
void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
|
||||||
static __inline void
|
void vm_object_init2 (void);
|
||||||
vm_object_pip_subtract(vm_object_t object, short i)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
atomic_subtract_short(&object->paging_in_progress, i);
|
|
||||||
/* object->paging_in_progress -= i; */
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_object_pip_wakeup(vm_object_t object)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
atomic_subtract_short(&object->paging_in_progress, 1);
|
|
||||||
/* object->paging_in_progress--; */
|
|
||||||
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
|
|
||||||
vm_object_clear_flag(object, OBJ_PIPWNT);
|
|
||||||
wakeup(object);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_object_pip_wakeupn(vm_object_t object, short i)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
if (i)
|
|
||||||
atomic_subtract_short(&object->paging_in_progress, i);
|
|
||||||
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
|
|
||||||
vm_object_clear_flag(object, OBJ_PIPWNT);
|
|
||||||
wakeup(object);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_object_pip_sleep(vm_object_t object, char *waitid)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
if (object->paging_in_progress) {
|
|
||||||
int s = splvm();
|
|
||||||
if (object->paging_in_progress) {
|
|
||||||
vm_object_set_flag(object, OBJ_PIPWNT);
|
|
||||||
tsleep(object, PVM, waitid, 0);
|
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_object_pip_wait(vm_object_t object, char *waitid)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
while (object->paging_in_progress)
|
|
||||||
vm_object_pip_sleep(object, waitid);
|
|
||||||
}
|
|
||||||
|
|
||||||
vm_object_t vm_object_allocate __P((objtype_t, vm_size_t));
|
|
||||||
void _vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t));
|
|
||||||
boolean_t vm_object_coalesce __P((vm_object_t, vm_pindex_t, vm_size_t, vm_size_t));
|
|
||||||
void vm_object_collapse __P((vm_object_t));
|
|
||||||
void vm_object_deallocate __P((vm_object_t));
|
|
||||||
void vm_object_terminate __P((vm_object_t));
|
|
||||||
void vm_object_vndeallocate __P((vm_object_t));
|
|
||||||
void vm_object_init __P((void));
|
|
||||||
void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
|
|
||||||
void vm_object_page_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
|
|
||||||
void vm_object_pmap_copy __P((vm_object_t, vm_pindex_t, vm_pindex_t));
|
|
||||||
void vm_object_pmap_copy_1 __P((vm_object_t, vm_pindex_t, vm_pindex_t));
|
|
||||||
void vm_object_pmap_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t));
|
|
||||||
void vm_object_reference __P((vm_object_t));
|
|
||||||
void vm_object_shadow __P((vm_object_t *, vm_ooffset_t *, vm_size_t));
|
|
||||||
void vm_object_madvise __P((vm_object_t, vm_pindex_t, int, int));
|
|
||||||
void vm_object_init2 __P((void));
|
|
||||||
#endif /* _KERNEL */
|
#endif /* _KERNEL */
|
||||||
|
|
||||||
#endif /* _VM_OBJECT_ */
|
#endif /* _VM_OBJECT_ */
|
||||||
|
424
sys/vm/vm_page.c
424
sys/vm/vm_page.c
@ -125,8 +125,7 @@ int vm_page_array_size = 0;
|
|||||||
long first_page = 0;
|
long first_page = 0;
|
||||||
int vm_page_zero_count = 0;
|
int vm_page_zero_count = 0;
|
||||||
|
|
||||||
static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
|
static vm_page_t _vm_page_list_find(int basequeue, int index);
|
||||||
static void vm_page_free_wakeup __P((void));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_set_page_size:
|
* vm_set_page_size:
|
||||||
@ -136,7 +135,7 @@ static void vm_page_free_wakeup __P((void));
|
|||||||
* dependent functions.
|
* dependent functions.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_set_page_size()
|
vm_set_page_size(void)
|
||||||
{
|
{
|
||||||
if (cnt.v_page_size == 0)
|
if (cnt.v_page_size == 0)
|
||||||
cnt.v_page_size = PAGE_SIZE;
|
cnt.v_page_size = PAGE_SIZE;
|
||||||
@ -151,8 +150,7 @@ vm_set_page_size()
|
|||||||
* Must be called at splhigh().
|
* Must be called at splhigh().
|
||||||
*/
|
*/
|
||||||
vm_page_t
|
vm_page_t
|
||||||
vm_add_new_page(pa)
|
vm_add_new_page(vm_offset_t pa)
|
||||||
vm_offset_t pa;
|
|
||||||
{
|
{
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
|
|
||||||
@ -181,10 +179,7 @@ vm_add_new_page(pa)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
vm_offset_t
|
vm_offset_t
|
||||||
vm_page_startup(starta, enda, vaddr)
|
vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
|
||||||
vm_offset_t starta;
|
|
||||||
vm_offset_t enda;
|
|
||||||
vm_offset_t vaddr;
|
|
||||||
{
|
{
|
||||||
vm_offset_t mapped;
|
vm_offset_t mapped;
|
||||||
struct vm_page **bucket;
|
struct vm_page **bucket;
|
||||||
@ -333,15 +328,245 @@ vm_page_startup(starta, enda, vaddr)
|
|||||||
* out in the hash table without it costing us too much.
|
* out in the hash table without it costing us too much.
|
||||||
*/
|
*/
|
||||||
static __inline int
|
static __inline int
|
||||||
vm_page_hash(object, pindex)
|
vm_page_hash(vm_object_t object, vm_pindex_t pindex)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
{
|
{
|
||||||
int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
|
int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
|
||||||
|
|
||||||
return(i & vm_page_hash_mask);
|
return(i & vm_page_hash_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_flag_set(vm_page_t m, unsigned short bits)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_set_short(&(m)->flags, bits);
|
||||||
|
/* m->flags |= bits; */
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_flag_clear(vm_page_t m, unsigned short bits)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_clear_short(&(m)->flags, bits);
|
||||||
|
/* m->flags &= ~bits; */
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_busy(vm_page_t m)
|
||||||
|
{
|
||||||
|
KASSERT((m->flags & PG_BUSY) == 0,
|
||||||
|
("vm_page_busy: page already busy!!!"));
|
||||||
|
vm_page_flag_set(m, PG_BUSY);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_flash:
|
||||||
|
*
|
||||||
|
* wakeup anyone waiting for the page.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_flash(vm_page_t m)
|
||||||
|
{
|
||||||
|
if (m->flags & PG_WANTED) {
|
||||||
|
vm_page_flag_clear(m, PG_WANTED);
|
||||||
|
wakeup(m);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_wakeup:
|
||||||
|
*
|
||||||
|
* clear the PG_BUSY flag and wakeup anyone waiting for the
|
||||||
|
* page.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_wakeup(vm_page_t m)
|
||||||
|
{
|
||||||
|
KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
|
||||||
|
vm_page_flag_clear(m, PG_BUSY);
|
||||||
|
vm_page_flash(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_io_start(vm_page_t m)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_add_char(&(m)->busy, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_io_finish(vm_page_t m)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
atomic_subtract_char(&(m)->busy, 1);
|
||||||
|
if (m->busy == 0)
|
||||||
|
vm_page_flash(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep page from being freed by the page daemon
|
||||||
|
* much of the same effect as wiring, except much lower
|
||||||
|
* overhead and should be used only for *very* temporary
|
||||||
|
* holding ("wiring").
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
vm_page_hold(vm_page_t mem)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
mem->hold_count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_unhold(vm_page_t mem)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
--mem->hold_count;
|
||||||
|
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_protect:
|
||||||
|
*
|
||||||
|
* Reduce the protection of a page. This routine never raises the
|
||||||
|
* protection and therefore can be safely called if the page is already
|
||||||
|
* at VM_PROT_NONE (it will be a NOP effectively ).
|
||||||
|
*/
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_protect(vm_page_t mem, int prot)
|
||||||
|
{
|
||||||
|
if (prot == VM_PROT_NONE) {
|
||||||
|
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
|
||||||
|
pmap_page_protect(mem, VM_PROT_NONE);
|
||||||
|
vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
|
||||||
|
}
|
||||||
|
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
|
||||||
|
pmap_page_protect(mem, VM_PROT_READ);
|
||||||
|
vm_page_flag_clear(mem, PG_WRITEABLE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* vm_page_zero_fill:
|
||||||
|
*
|
||||||
|
* Zero-fill the specified page.
|
||||||
|
* Written as a standard pagein routine, to
|
||||||
|
* be used by the zero-fill object.
|
||||||
|
*/
|
||||||
|
boolean_t
|
||||||
|
vm_page_zero_fill(vm_page_t m)
|
||||||
|
{
|
||||||
|
pmap_zero_page(VM_PAGE_TO_PHYS(m));
|
||||||
|
return (TRUE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_copy:
|
||||||
|
*
|
||||||
|
* Copy one page to another
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
|
||||||
|
{
|
||||||
|
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
|
||||||
|
dest_m->valid = VM_PAGE_BITS_ALL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_free:
|
||||||
|
*
|
||||||
|
* Free a page
|
||||||
|
*
|
||||||
|
* The clearing of PG_ZERO is a temporary safety until the code can be
|
||||||
|
* reviewed to determine that PG_ZERO is being properly cleared on
|
||||||
|
* write faults or maps. PG_ZERO was previously cleared in
|
||||||
|
* vm_page_alloc().
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
vm_page_free(vm_page_t m)
|
||||||
|
{
|
||||||
|
vm_page_flag_clear(m, PG_ZERO);
|
||||||
|
vm_page_free_toq(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_free_zero:
|
||||||
|
*
|
||||||
|
* Free a page to the zerod-pages queue
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
vm_page_free_zero(vm_page_t m)
|
||||||
|
{
|
||||||
|
vm_page_flag_set(m, PG_ZERO);
|
||||||
|
vm_page_free_toq(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_sleep_busy:
|
||||||
|
*
|
||||||
|
* Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
|
||||||
|
* m->busy is zero. Returns TRUE if it had to sleep ( including if
|
||||||
|
* it almost had to sleep and made temporary spl*() mods), FALSE
|
||||||
|
* otherwise.
|
||||||
|
*
|
||||||
|
* This routine assumes that interrupts can only remove the busy
|
||||||
|
* status from a page, not set the busy status or change it from
|
||||||
|
* PG_BUSY to m->busy or vise versa (which would create a timing
|
||||||
|
* window).
|
||||||
|
*/
|
||||||
|
|
||||||
|
int
|
||||||
|
vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
|
||||||
|
{
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
|
||||||
|
int s = splvm();
|
||||||
|
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
|
||||||
|
/*
|
||||||
|
* Page is busy. Wait and retry.
|
||||||
|
*/
|
||||||
|
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
|
||||||
|
tsleep(m, PVM, msg, 0);
|
||||||
|
}
|
||||||
|
splx(s);
|
||||||
|
return(TRUE);
|
||||||
|
/* not reached */
|
||||||
|
}
|
||||||
|
return(FALSE);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* vm_page_dirty:
|
||||||
|
*
|
||||||
|
* make page all dirty
|
||||||
|
*/
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_dirty(vm_page_t m)
|
||||||
|
{
|
||||||
|
KASSERT(m->queue - m->pc != PQ_CACHE,
|
||||||
|
("vm_page_dirty: page in cache!"));
|
||||||
|
m->dirty = VM_PAGE_BITS_ALL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* vm_page_undirty:
|
||||||
|
*
|
||||||
|
* Set page to not be dirty. Note: does not clear pmap modify bits
|
||||||
|
*/
|
||||||
|
|
||||||
|
void
|
||||||
|
vm_page_undirty(vm_page_t m)
|
||||||
|
{
|
||||||
|
m->dirty = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_page_insert: [ internal use only ]
|
* vm_page_insert: [ internal use only ]
|
||||||
*
|
*
|
||||||
@ -357,10 +582,7 @@ vm_page_hash(object, pindex)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_insert(m, object, pindex)
|
vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
|
||||||
vm_page_t m;
|
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
{
|
{
|
||||||
struct vm_page **bucket;
|
struct vm_page **bucket;
|
||||||
|
|
||||||
@ -420,8 +642,7 @@ vm_page_insert(m, object, pindex)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_remove(m)
|
vm_page_remove(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
vm_object_t object;
|
vm_object_t object;
|
||||||
|
|
||||||
@ -497,9 +718,7 @@ vm_page_remove(m)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
vm_page_t
|
vm_page_t
|
||||||
vm_page_lookup(object, pindex)
|
vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
{
|
{
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
struct vm_page **bucket;
|
struct vm_page **bucket;
|
||||||
@ -549,10 +768,7 @@ retry:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_rename(m, new_object, new_pindex)
|
vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
|
||||||
vm_page_t m;
|
|
||||||
vm_object_t new_object;
|
|
||||||
vm_pindex_t new_pindex;
|
|
||||||
{
|
{
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
@ -575,8 +791,7 @@ vm_page_rename(m, new_object, new_pindex)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_unqueue_nowakeup(m)
|
vm_page_unqueue_nowakeup(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
int queue = m->queue;
|
int queue = m->queue;
|
||||||
struct vpgqueues *pq;
|
struct vpgqueues *pq;
|
||||||
@ -599,8 +814,7 @@ vm_page_unqueue_nowakeup(m)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_unqueue(m)
|
vm_page_unqueue(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
int queue = m->queue;
|
int queue = m->queue;
|
||||||
struct vpgqueues *pq;
|
struct vpgqueues *pq;
|
||||||
@ -619,6 +833,33 @@ vm_page_unqueue(m)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vm_page_t
|
||||||
|
vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
|
||||||
|
{
|
||||||
|
vm_page_t m;
|
||||||
|
|
||||||
|
GIANT_REQUIRED;
|
||||||
|
|
||||||
|
#if PQ_L2_SIZE > 1
|
||||||
|
if (prefer_zero) {
|
||||||
|
m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
|
||||||
|
} else {
|
||||||
|
m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
|
||||||
|
}
|
||||||
|
if (m == NULL) {
|
||||||
|
m = _vm_page_list_find(basequeue, index);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
if (prefer_zero) {
|
||||||
|
m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
|
||||||
|
} else {
|
||||||
|
m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#if PQ_L2_SIZE > 1
|
#if PQ_L2_SIZE > 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -638,9 +879,8 @@ vm_page_unqueue(m)
|
|||||||
* This routine may only be called from the vm_page_list_find() macro
|
* This routine may only be called from the vm_page_list_find() macro
|
||||||
* in vm_page.h
|
* in vm_page.h
|
||||||
*/
|
*/
|
||||||
vm_page_t
|
static vm_page_t
|
||||||
_vm_page_list_find(basequeue, index)
|
_vm_page_list_find(int basequeue, int index)
|
||||||
int basequeue, index;
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
vm_page_t m = NULL;
|
vm_page_t m = NULL;
|
||||||
@ -678,9 +918,7 @@ _vm_page_list_find(basequeue, index)
|
|||||||
* This routine may not block.
|
* This routine may not block.
|
||||||
*/
|
*/
|
||||||
vm_page_t
|
vm_page_t
|
||||||
vm_page_select_cache(object, pindex)
|
vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
{
|
{
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
|
|
||||||
@ -703,9 +941,7 @@ vm_page_select_cache(object, pindex)
|
|||||||
/*
|
/*
|
||||||
* vm_page_select_free:
|
* vm_page_select_free:
|
||||||
*
|
*
|
||||||
* Find a free or zero page, with specified preference. We attempt to
|
* Find a free or zero page, with specified preference.
|
||||||
* inline the nominal case and fall back to _vm_page_select_free()
|
|
||||||
* otherwise.
|
|
||||||
*
|
*
|
||||||
* This routine must be called at splvm().
|
* This routine must be called at splvm().
|
||||||
* This routine may not block.
|
* This routine may not block.
|
||||||
@ -744,10 +980,7 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
vm_page_t
|
vm_page_t
|
||||||
vm_page_alloc(object, pindex, page_req)
|
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
int page_req;
|
|
||||||
{
|
{
|
||||||
vm_page_t m = NULL;
|
vm_page_t m = NULL;
|
||||||
int s;
|
int s;
|
||||||
@ -879,7 +1112,7 @@ loop:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_wait()
|
vm_wait(void)
|
||||||
{
|
{
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
@ -905,7 +1138,7 @@ vm_wait()
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_await()
|
vm_await(void)
|
||||||
{
|
{
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
@ -934,8 +1167,7 @@ vm_await()
|
|||||||
* This routine may not block.
|
* This routine may not block.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_page_activate(m)
|
vm_page_activate(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
@ -975,7 +1207,7 @@ vm_page_activate(m)
|
|||||||
* This routine must be called at splvm()
|
* This routine must be called at splvm()
|
||||||
*/
|
*/
|
||||||
static __inline void
|
static __inline void
|
||||||
vm_page_free_wakeup()
|
vm_page_free_wakeup(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* if pageout daemon needs pages, then tell it that there are
|
* if pageout daemon needs pages, then tell it that there are
|
||||||
@ -1154,8 +1386,7 @@ vm_page_unmanage(vm_page_t m)
|
|||||||
* This routine may not block.
|
* This routine may not block.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_page_wire(m)
|
vm_page_wire(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
@ -1204,9 +1435,7 @@ vm_page_wire(m)
|
|||||||
* This routine may not block.
|
* This routine may not block.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_page_unwire(m, activate)
|
vm_page_unwire(vm_page_t m, int activate)
|
||||||
vm_page_t m;
|
|
||||||
int activate;
|
|
||||||
{
|
{
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
@ -1311,8 +1540,7 @@ vm_page_try_to_cache(vm_page_t m)
|
|||||||
* 1 is returned on success, 0 on failure.
|
* 1 is returned on success, 0 on failure.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vm_page_try_to_free(m)
|
vm_page_try_to_free(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
||||||
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
|
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
|
||||||
@ -1335,8 +1563,7 @@ vm_page_try_to_free(m)
|
|||||||
* This routine may not block.
|
* This routine may not block.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_page_cache(m)
|
vm_page_cache(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
@ -1391,8 +1618,7 @@ vm_page_cache(m)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_dontneed(m)
|
vm_page_dontneed(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
static int dnweight;
|
static int dnweight;
|
||||||
int dnw;
|
int dnw;
|
||||||
@ -1441,10 +1667,7 @@ vm_page_dontneed(m)
|
|||||||
* This routine may block.
|
* This routine may block.
|
||||||
*/
|
*/
|
||||||
vm_page_t
|
vm_page_t
|
||||||
vm_page_grab(object, pindex, allocflags)
|
vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
|
||||||
vm_object_t object;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
int allocflags;
|
|
||||||
{
|
{
|
||||||
vm_page_t m;
|
vm_page_t m;
|
||||||
int s, generation;
|
int s, generation;
|
||||||
@ -1524,10 +1747,7 @@ vm_page_bits(int base, int size)
|
|||||||
* (base + size) must be less then or equal to PAGE_SIZE.
|
* (base + size) must be less then or equal to PAGE_SIZE.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_page_set_validclean(m, base, size)
|
vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||||
vm_page_t m;
|
|
||||||
int base;
|
|
||||||
int size;
|
|
||||||
{
|
{
|
||||||
int pagebits;
|
int pagebits;
|
||||||
int frag;
|
int frag;
|
||||||
@ -1591,10 +1811,7 @@ vm_page_set_validclean(m, base, size)
|
|||||||
#if 0
|
#if 0
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_set_dirty(m, base, size)
|
vm_page_set_dirty(vm_page_t m, int base, int size)
|
||||||
vm_page_t m;
|
|
||||||
int base;
|
|
||||||
int size;
|
|
||||||
{
|
{
|
||||||
m->dirty |= vm_page_bits(base, size);
|
m->dirty |= vm_page_bits(base, size);
|
||||||
}
|
}
|
||||||
@ -1602,10 +1819,7 @@ vm_page_set_dirty(m, base, size)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_clear_dirty(m, base, size)
|
vm_page_clear_dirty(vm_page_t m, int base, int size)
|
||||||
vm_page_t m;
|
|
||||||
int base;
|
|
||||||
int size;
|
|
||||||
{
|
{
|
||||||
GIANT_REQUIRED;
|
GIANT_REQUIRED;
|
||||||
m->dirty &= ~vm_page_bits(base, size);
|
m->dirty &= ~vm_page_bits(base, size);
|
||||||
@ -1620,10 +1834,7 @@ vm_page_clear_dirty(m, base, size)
|
|||||||
* May not block.
|
* May not block.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_page_set_invalid(m, base, size)
|
vm_page_set_invalid(vm_page_t m, int base, int size)
|
||||||
vm_page_t m;
|
|
||||||
int base;
|
|
||||||
int size;
|
|
||||||
{
|
{
|
||||||
int bits;
|
int bits;
|
||||||
|
|
||||||
@ -1695,10 +1906,7 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
int
|
int
|
||||||
vm_page_is_valid(m, base, size)
|
vm_page_is_valid(vm_page_t m, int base, int size)
|
||||||
vm_page_t m;
|
|
||||||
int base;
|
|
||||||
int size;
|
|
||||||
{
|
{
|
||||||
int bits = vm_page_bits(base, size);
|
int bits = vm_page_bits(base, size);
|
||||||
|
|
||||||
@ -1713,8 +1921,7 @@ vm_page_is_valid(m, base, size)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void
|
||||||
vm_page_test_dirty(m)
|
vm_page_test_dirty(vm_page_t m)
|
||||||
vm_page_t m;
|
|
||||||
{
|
{
|
||||||
if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
|
if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
|
||||||
vm_page_dirty(m);
|
vm_page_dirty(m);
|
||||||
@ -1728,15 +1935,15 @@ vm_page_test_dirty(m)
|
|||||||
* for statistics and for allocations of less than a page.
|
* for statistics and for allocations of less than a page.
|
||||||
*/
|
*/
|
||||||
void *
|
void *
|
||||||
contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
|
contigmalloc1(
|
||||||
unsigned long size; /* should be size_t here and for malloc() */
|
unsigned long size, /* should be size_t here and for malloc() */
|
||||||
struct malloc_type *type;
|
struct malloc_type *type,
|
||||||
int flags;
|
int flags,
|
||||||
unsigned long low;
|
unsigned long low,
|
||||||
unsigned long high;
|
unsigned long high,
|
||||||
unsigned long alignment;
|
unsigned long alignment,
|
||||||
unsigned long boundary;
|
unsigned long boundary,
|
||||||
vm_map_t map;
|
vm_map_t map)
|
||||||
{
|
{
|
||||||
int i, s, start;
|
int i, s, start;
|
||||||
vm_offset_t addr, phys, tmp_addr;
|
vm_offset_t addr, phys, tmp_addr;
|
||||||
@ -1905,14 +2112,14 @@ again1:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
contigmalloc(size, type, flags, low, high, alignment, boundary)
|
contigmalloc(
|
||||||
unsigned long size; /* should be size_t here and for malloc() */
|
unsigned long size, /* should be size_t here and for malloc() */
|
||||||
struct malloc_type *type;
|
struct malloc_type *type,
|
||||||
int flags;
|
int flags,
|
||||||
unsigned long low;
|
unsigned long low,
|
||||||
unsigned long high;
|
unsigned long high,
|
||||||
unsigned long alignment;
|
unsigned long alignment,
|
||||||
unsigned long boundary;
|
unsigned long boundary)
|
||||||
{
|
{
|
||||||
void * ret;
|
void * ret;
|
||||||
|
|
||||||
@ -1924,21 +2131,18 @@ contigmalloc(size, type, flags, low, high, alignment, boundary)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
contigfree(addr, size, type)
|
contigfree(void *addr, unsigned long size, struct malloc_type *type)
|
||||||
void *addr;
|
|
||||||
unsigned long size;
|
|
||||||
struct malloc_type *type;
|
|
||||||
{
|
{
|
||||||
GIANT_REQUIRED;
|
GIANT_REQUIRED;
|
||||||
kmem_free(kernel_map, (vm_offset_t)addr, size);
|
kmem_free(kernel_map, (vm_offset_t)addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
vm_offset_t
|
vm_offset_t
|
||||||
vm_page_alloc_contig(size, low, high, alignment)
|
vm_page_alloc_contig(
|
||||||
vm_offset_t size;
|
vm_offset_t size,
|
||||||
vm_offset_t low;
|
vm_offset_t low,
|
||||||
vm_offset_t high;
|
vm_offset_t high,
|
||||||
vm_offset_t alignment;
|
vm_offset_t alignment)
|
||||||
{
|
{
|
||||||
vm_offset_t ret;
|
vm_offset_t ret;
|
||||||
|
|
||||||
|
359
sys/vm/vm_page.h
359
sys/vm/vm_page.h
@ -304,96 +304,6 @@ extern long first_page; /* first physical page number */
|
|||||||
#define PHYS_TO_VM_PAGE(pa) \
|
#define PHYS_TO_VM_PAGE(pa) \
|
||||||
(&vm_page_array[atop(pa) - first_page ])
|
(&vm_page_array[atop(pa) - first_page ])
|
||||||
|
|
||||||
/*
|
|
||||||
* Functions implemented as macros
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_flag_set(vm_page_t m, unsigned short bits)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
atomic_set_short(&(m)->flags, bits);
|
|
||||||
/* m->flags |= bits; */
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_flag_clear(vm_page_t m, unsigned short bits)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
atomic_clear_short(&(m)->flags, bits);
|
|
||||||
/* m->flags &= ~bits; */
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
static __inline void
|
|
||||||
vm_page_assert_wait(vm_page_t m, int interruptible)
|
|
||||||
{
|
|
||||||
vm_page_flag_set(m, PG_WANTED);
|
|
||||||
assert_wait((int) m, interruptible);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_busy(vm_page_t m)
|
|
||||||
{
|
|
||||||
|
|
||||||
KASSERT((m->flags & PG_BUSY) == 0,
|
|
||||||
("vm_page_busy: page already busy!!!"));
|
|
||||||
vm_page_flag_set(m, PG_BUSY);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_flash:
|
|
||||||
*
|
|
||||||
* wakeup anyone waiting for the page.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_flash(vm_page_t m)
|
|
||||||
{
|
|
||||||
if (m->flags & PG_WANTED) {
|
|
||||||
vm_page_flag_clear(m, PG_WANTED);
|
|
||||||
wakeup(m);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_wakeup:
|
|
||||||
*
|
|
||||||
* clear the PG_BUSY flag and wakeup anyone waiting for the
|
|
||||||
* page.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_wakeup(vm_page_t m)
|
|
||||||
{
|
|
||||||
KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
|
|
||||||
vm_page_flag_clear(m, PG_BUSY);
|
|
||||||
vm_page_flash(m);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_io_start(vm_page_t m)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
atomic_add_char(&(m)->busy, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_io_finish(vm_page_t m)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
atomic_subtract_char(&(m)->busy, 1);
|
|
||||||
if (m->busy == 0)
|
|
||||||
vm_page_flash(m);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#if PAGE_SIZE == 4096
|
#if PAGE_SIZE == 4096
|
||||||
#define VM_PAGE_BITS_ALL 0xff
|
#define VM_PAGE_BITS_ALL 0xff
|
||||||
@ -409,37 +319,51 @@ vm_page_io_finish(vm_page_t m)
|
|||||||
#define VM_ALLOC_ZERO 3
|
#define VM_ALLOC_ZERO 3
|
||||||
#define VM_ALLOC_RETRY 0x80
|
#define VM_ALLOC_RETRY 0x80
|
||||||
|
|
||||||
void vm_page_activate __P((vm_page_t));
|
void vm_page_flag_set(vm_page_t m, unsigned short bits);
|
||||||
vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
|
void vm_page_flag_clear(vm_page_t m, unsigned short bits);
|
||||||
vm_page_t vm_page_grab __P((vm_object_t, vm_pindex_t, int));
|
void vm_page_busy(vm_page_t m);
|
||||||
void vm_page_cache __P((register vm_page_t));
|
void vm_page_flash(vm_page_t m);
|
||||||
int vm_page_try_to_cache __P((vm_page_t));
|
void vm_page_io_start(vm_page_t m);
|
||||||
int vm_page_try_to_free __P((vm_page_t));
|
void vm_page_io_finish(vm_page_t m);
|
||||||
void vm_page_dontneed __P((register vm_page_t));
|
void vm_page_hold(vm_page_t mem);
|
||||||
static __inline void vm_page_copy __P((vm_page_t, vm_page_t));
|
void vm_page_unhold(vm_page_t mem);
|
||||||
static __inline void vm_page_free __P((vm_page_t));
|
void vm_page_protect(vm_page_t mem, int prot);
|
||||||
static __inline void vm_page_free_zero __P((vm_page_t));
|
boolean_t vm_page_zero_fill(vm_page_t m);
|
||||||
void vm_page_deactivate __P((vm_page_t));
|
void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
|
||||||
void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
|
void vm_page_free(vm_page_t m);
|
||||||
vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
|
void vm_page_free_zero(vm_page_t m);
|
||||||
void vm_page_remove __P((vm_page_t));
|
int vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg);
|
||||||
void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
|
void vm_page_dirty(vm_page_t m);
|
||||||
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
|
void vm_page_undirty(vm_page_t m);
|
||||||
vm_page_t vm_add_new_page __P((vm_offset_t pa));
|
vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero);
|
||||||
void vm_page_unmanage __P((vm_page_t));
|
void vm_page_wakeup(vm_page_t m);
|
||||||
void vm_page_unwire __P((vm_page_t, int));
|
|
||||||
void vm_page_wire __P((vm_page_t));
|
void vm_page_activate (vm_page_t);
|
||||||
void vm_page_unqueue __P((vm_page_t));
|
vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
|
||||||
void vm_page_unqueue_nowakeup __P((vm_page_t));
|
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
|
||||||
void vm_page_set_validclean __P((vm_page_t, int, int));
|
void vm_page_cache (register vm_page_t);
|
||||||
void vm_page_set_dirty __P((vm_page_t, int, int));
|
int vm_page_try_to_cache (vm_page_t);
|
||||||
void vm_page_clear_dirty __P((vm_page_t, int, int));
|
int vm_page_try_to_free (vm_page_t);
|
||||||
void vm_page_set_invalid __P((vm_page_t, int, int));
|
void vm_page_dontneed (register vm_page_t);
|
||||||
static __inline boolean_t vm_page_zero_fill __P((vm_page_t));
|
void vm_page_deactivate (vm_page_t);
|
||||||
int vm_page_is_valid __P((vm_page_t, int, int));
|
void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
|
||||||
void vm_page_test_dirty __P((vm_page_t));
|
vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
|
||||||
int vm_page_bits __P((int, int));
|
void vm_page_remove (vm_page_t);
|
||||||
vm_page_t _vm_page_list_find __P((int, int));
|
void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
|
||||||
|
vm_offset_t vm_page_startup (vm_offset_t, vm_offset_t, vm_offset_t);
|
||||||
|
vm_page_t vm_add_new_page (vm_offset_t pa);
|
||||||
|
void vm_page_unmanage (vm_page_t);
|
||||||
|
void vm_page_unwire (vm_page_t, int);
|
||||||
|
void vm_page_wire (vm_page_t);
|
||||||
|
void vm_page_unqueue (vm_page_t);
|
||||||
|
void vm_page_unqueue_nowakeup (vm_page_t);
|
||||||
|
void vm_page_set_validclean (vm_page_t, int, int);
|
||||||
|
void vm_page_set_dirty (vm_page_t, int, int);
|
||||||
|
void vm_page_clear_dirty (vm_page_t, int, int);
|
||||||
|
void vm_page_set_invalid (vm_page_t, int, int);
|
||||||
|
int vm_page_is_valid (vm_page_t, int, int);
|
||||||
|
void vm_page_test_dirty (vm_page_t);
|
||||||
|
int vm_page_bits (int, int);
|
||||||
#if 0
|
#if 0
|
||||||
int vm_page_sleep(vm_page_t m, char *msg, char *busy);
|
int vm_page_sleep(vm_page_t m, char *msg, char *busy);
|
||||||
int vm_page_asleep(vm_page_t m, char *msg, char *busy);
|
int vm_page_asleep(vm_page_t m, char *msg, char *busy);
|
||||||
@ -447,198 +371,5 @@ int vm_page_asleep(vm_page_t m, char *msg, char *busy);
|
|||||||
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
|
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
|
||||||
void vm_page_free_toq(vm_page_t m);
|
void vm_page_free_toq(vm_page_t m);
|
||||||
|
|
||||||
/*
|
|
||||||
* Keep page from being freed by the page daemon
|
|
||||||
* much of the same effect as wiring, except much lower
|
|
||||||
* overhead and should be used only for *very* temporary
|
|
||||||
* holding ("wiring").
|
|
||||||
*/
|
|
||||||
static __inline void
|
|
||||||
vm_page_hold(vm_page_t mem)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
mem->hold_count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_unhold(vm_page_t mem)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
--mem->hold_count;
|
|
||||||
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_protect:
|
|
||||||
*
|
|
||||||
* Reduce the protection of a page. This routine never raises the
|
|
||||||
* protection and therefore can be safely called if the page is already
|
|
||||||
* at VM_PROT_NONE (it will be a NOP effectively ).
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_protect(vm_page_t mem, int prot)
|
|
||||||
{
|
|
||||||
if (prot == VM_PROT_NONE) {
|
|
||||||
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
|
|
||||||
pmap_page_protect(mem, VM_PROT_NONE);
|
|
||||||
vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
|
|
||||||
}
|
|
||||||
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
|
|
||||||
pmap_page_protect(mem, VM_PROT_READ);
|
|
||||||
vm_page_flag_clear(mem, PG_WRITEABLE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_zero_fill:
|
|
||||||
*
|
|
||||||
* Zero-fill the specified page.
|
|
||||||
* Written as a standard pagein routine, to
|
|
||||||
* be used by the zero-fill object.
|
|
||||||
*/
|
|
||||||
static __inline boolean_t
|
|
||||||
vm_page_zero_fill(m)
|
|
||||||
vm_page_t m;
|
|
||||||
{
|
|
||||||
pmap_zero_page(VM_PAGE_TO_PHYS(m));
|
|
||||||
return (TRUE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_copy:
|
|
||||||
*
|
|
||||||
* Copy one page to another
|
|
||||||
*/
|
|
||||||
static __inline void
|
|
||||||
vm_page_copy(src_m, dest_m)
|
|
||||||
vm_page_t src_m;
|
|
||||||
vm_page_t dest_m;
|
|
||||||
{
|
|
||||||
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
|
|
||||||
dest_m->valid = VM_PAGE_BITS_ALL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_free:
|
|
||||||
*
|
|
||||||
* Free a page
|
|
||||||
*
|
|
||||||
* The clearing of PG_ZERO is a temporary safety until the code can be
|
|
||||||
* reviewed to determine that PG_ZERO is being properly cleared on
|
|
||||||
* write faults or maps. PG_ZERO was previously cleared in
|
|
||||||
* vm_page_alloc().
|
|
||||||
*/
|
|
||||||
static __inline void
|
|
||||||
vm_page_free(m)
|
|
||||||
vm_page_t m;
|
|
||||||
{
|
|
||||||
vm_page_flag_clear(m, PG_ZERO);
|
|
||||||
vm_page_free_toq(m);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_free_zero:
|
|
||||||
*
|
|
||||||
* Free a page to the zerod-pages queue
|
|
||||||
*/
|
|
||||||
static __inline void
|
|
||||||
vm_page_free_zero(m)
|
|
||||||
vm_page_t m;
|
|
||||||
{
|
|
||||||
vm_page_flag_set(m, PG_ZERO);
|
|
||||||
vm_page_free_toq(m);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_sleep_busy:
|
|
||||||
*
|
|
||||||
* Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
|
|
||||||
* m->busy is zero. Returns TRUE if it had to sleep ( including if
|
|
||||||
* it almost had to sleep and made temporary spl*() mods), FALSE
|
|
||||||
* otherwise.
|
|
||||||
*
|
|
||||||
* This routine assumes that interrupts can only remove the busy
|
|
||||||
* status from a page, not set the busy status or change it from
|
|
||||||
* PG_BUSY to m->busy or vise versa (which would create a timing
|
|
||||||
* window).
|
|
||||||
*
|
|
||||||
* Note that being an inline, this code will be well optimized.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline int
|
|
||||||
vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
|
|
||||||
{
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
|
|
||||||
int s = splvm();
|
|
||||||
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
|
|
||||||
/*
|
|
||||||
* Page is busy. Wait and retry.
|
|
||||||
*/
|
|
||||||
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
|
|
||||||
tsleep(m, PVM, msg, 0);
|
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
return(TRUE);
|
|
||||||
/* not reached */
|
|
||||||
}
|
|
||||||
return(FALSE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_dirty:
|
|
||||||
*
|
|
||||||
* make page all dirty
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_dirty(vm_page_t m)
|
|
||||||
{
|
|
||||||
#if !defined(KLD_MODULE)
|
|
||||||
KASSERT(m->queue - m->pc != PQ_CACHE, ("vm_page_dirty: page in cache!"));
|
|
||||||
#endif
|
|
||||||
m->dirty = VM_PAGE_BITS_ALL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* vm_page_undirty:
|
|
||||||
*
|
|
||||||
* Set page to not be dirty. Note: does not clear pmap modify bits
|
|
||||||
*/
|
|
||||||
|
|
||||||
static __inline void
|
|
||||||
vm_page_undirty(vm_page_t m)
|
|
||||||
{
|
|
||||||
m->dirty = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(KLD_MODULE)
|
|
||||||
|
|
||||||
static __inline vm_page_t
|
|
||||||
vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
|
|
||||||
{
|
|
||||||
vm_page_t m;
|
|
||||||
|
|
||||||
#if PQ_L2_SIZE > 1
|
|
||||||
if (prefer_zero) {
|
|
||||||
m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
|
|
||||||
} else {
|
|
||||||
m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
|
|
||||||
}
|
|
||||||
if (m == NULL)
|
|
||||||
m = _vm_page_list_find(basequeue, index);
|
|
||||||
#else
|
|
||||||
if (prefer_zero) {
|
|
||||||
m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
|
|
||||||
} else {
|
|
||||||
m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return(m);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _KERNEL */
|
#endif /* _KERNEL */
|
||||||
#endif /* !_VM_PAGE_ */
|
#endif /* !_VM_PAGE_ */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user