Change the way that unmanaged pages are created. Specifically,

immediately flag any page that is allocated to a OBJT_PHYS object as
unmanaged in vm_page_alloc() rather than waiting for a later call to
vm_page_unmanage().  This allows for the elimination of some uses of
the page queues lock.

Change the type of the kernel and kmem objects from OBJT_DEFAULT to
OBJT_PHYS.  This allows us to take advantage of the above change to
simplify the allocation of unmanaged pages in kmem_alloc() and
kmem_malloc().

Remove vm_page_unmanage().  It is no longer used.
This commit is contained in:
Alan Cox 2007-02-25 06:14:58 +00:00
parent 3763835bfe
commit 9f5c801b94
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=166964
6 changed files with 11 additions and 48 deletions

View File

@ -150,11 +150,6 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
}
KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
("phys_pager_getpages: partially valid page %p", m[i]));
}
vm_page_lock_queues();
for (i = 0; i < count; i++) {
/* Switch off pv_entries */
vm_page_unmanage(m[i]);
m[i]->dirty = 0;
/* The requested page must remain busy, the others not. */
if (reqpage != i) {
@ -162,7 +157,6 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
m[i]->busy = 0;
}
}
vm_page_unlock_queues();
return (VM_PAGER_OK);
}

View File

@ -175,9 +175,8 @@ kmem_alloc(map, size)
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
mem->valid = VM_PAGE_BITS_ALL;
vm_page_lock_queues();
vm_page_unmanage(mem);
vm_page_unlock_queues();
KASSERT((mem->flags & PG_UNMANAGED) != 0,
("kmem_alloc: page %p is managed", mem));
}
VM_OBJECT_UNLOCK(kernel_object);
@ -364,9 +363,8 @@ kmem_malloc(map, size, flags)
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
vm_page_lock_queues();
vm_page_unmanage(m);
vm_page_unlock_queues();
KASSERT((m->flags & PG_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
}
VM_OBJECT_UNLOCK(kmem_object);

View File

@ -2269,8 +2269,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
VM_OBJECT_LOCK(object);
if (object->ref_count != 1 &&
((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
object == kernel_object || object == kmem_object) &&
(object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
object == kernel_object || object == kmem_object)) {
vm_object_collapse(object);
vm_object_page_remove(object, offidxstart, offidxend, FALSE);
if (object->type == OBJT_SWAP)

View File

@ -249,11 +249,11 @@ vm_object_init(void)
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
/*
@ -1800,7 +1800,8 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
* remove pages from the object (we must instead remove the page
* references, and then destroy the object).
*/
KASSERT(object->type != OBJT_PHYS,
KASSERT(object->type != OBJT_PHYS || object == kernel_object ||
object == kmem_object,
("attempt to remove pages from a physical object"));
vm_object_pip_add(object, 1);

View File

@ -938,6 +938,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (req & VM_ALLOC_ZERO)
flags = PG_ZERO;
}
if (object != NULL && object->type == OBJT_PHYS)
flags |= PG_UNMANAGED;
m->flags = flags;
if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
m->oflags = 0;
@ -1168,36 +1170,6 @@ vm_page_free_toq(vm_page_t m)
mtx_unlock(&vm_page_queue_free_mtx);
}
/*
* vm_page_unmanage:
*
* Prevent PV management from being done on the page. The page is
* removed from the paging queues as if it were wired, and as a
* consequence of no longer being managed the pageout daemon will not
* touch it (since there is no way to locate the pte mappings for the
* page). madvise() calls that mess with the pmap will also no longer
* operate on the page.
*
* Beyond that the page is still reasonably 'normal'. Freeing the page
* will clear the flag.
*
* This routine is used by OBJT_PHYS objects - objects using unswappable
* physical memory as backing store rather then swap-backed memory and
* will eventually be extended to support 4MB unmanaged physical
* mappings.
*/
void
vm_page_unmanage(vm_page_t m)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_UNMANAGED) == 0) {
if (m->wire_count == 0)
vm_pageq_remove(m);
}
vm_page_flag_set(m, PG_UNMANAGED);
}
/*
* vm_page_wire:
*

View File

@ -343,7 +343,6 @@ vm_page_t vm_page_select_cache(int);
void vm_page_sleep(vm_page_t m, const char *msg);
vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
vm_offset_t vm_page_startup(vm_offset_t vaddr);
void vm_page_unmanage (vm_page_t);
void vm_page_unwire (vm_page_t, int);
void vm_page_wire (vm_page_t);
void vm_page_set_validclean (vm_page_t, int, int);