In the past four years, we've added two new vm object types. Each time,

similar changes had to be made in various places throughout the machine-
independent virtual memory layer to support the new vm object type.
However, in most of these places, it's actually not the type of the vm
object that matters to us but instead certain attributes of its pages.
For example, OBJT_DEVICE, OBJT_MGTDEVICE, and OBJT_SG objects contain
fictitious pages.  In other words, in most of these places, we were
testing the vm object's type to determine if it contained fictitious (or
unmanaged) pages.

To both simplify the code in these places and make the addition of future
vm object types easier, this change introduces two new vm object flags
that describe attributes of the vm object's pages, specifically, whether
they are fictitious or unmanaged.

Reviewed and tested by:	kib
This commit is contained in:
Alan Cox 2012-12-09 00:32:38 +00:00
parent 1f60bfd822
commit 2863482058
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=244043
7 changed files with 47 additions and 30 deletions

View File

@ -968,8 +968,8 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
VM_OBJECT_LOCK(object);
}
}
if (first_object->type != OBJT_DEVICE &&
first_object->type != OBJT_PHYS && first_object->type != OBJT_SG) {
/* Neither fictitious nor unmanaged pages can be cached. */
if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) {
if (fs->first_pindex < distance)
pindex = 0;
else

View File

@ -2324,8 +2324,8 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
*/
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
(entry->object.vm_object->type == OBJT_DEVICE ||
entry->object.vm_object->type == OBJT_SG));
(entry->object.vm_object->flags &
OBJ_FICTITIOUS) != 0);
}
}
KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
@ -2445,8 +2445,8 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
saved_start = entry->start;
saved_end = entry->end;
fictitious = entry->object.vm_object != NULL &&
(entry->object.vm_object->type == OBJT_DEVICE ||
entry->object.vm_object->type == OBJT_SG);
(entry->object.vm_object->flags &
OBJ_FICTITIOUS) != 0;
/*
* Release the map lock, relying on the in-transition
* mark. Mark the map busy for fork.
@ -2544,8 +2544,8 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
*/
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
(entry->object.vm_object->type == OBJT_DEVICE ||
entry->object.vm_object->type == OBJT_SG));
(entry->object.vm_object->flags &
OBJ_FICTITIOUS) != 0);
}
}
next_entry_done:
@ -2681,8 +2681,7 @@ vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
{
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
(entry->object.vm_object->type == OBJT_DEVICE ||
entry->object.vm_object->type == OBJT_SG));
(entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0);
entry->wired_count = 0;
}

View File

@ -200,7 +200,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
* synchronization should not impair the accuracy of
* the reported statistics.
*/
if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
if ((object->flags & OBJ_FICTITIOUS) != 0) {
/*
* Devices, like /dev/mem, will badly skew our totals.
*/

View File

@ -212,15 +212,35 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->root = NULL;
object->type = type;
switch (type) {
case OBJT_DEAD:
panic("_vm_object_allocate: can't create OBJT_DEAD");
case OBJT_DEFAULT:
case OBJT_SWAP:
object->flags = OBJ_ONEMAPPING;
break;
case OBJT_DEVICE:
case OBJT_SG:
object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
break;
case OBJT_MGTDEVICE:
object->flags = OBJ_FICTITIOUS;
break;
case OBJT_PHYS:
object->flags = OBJ_UNMANAGED;
break;
case OBJT_VNODE:
object->flags = 0;
break;
default:
panic("_vm_object_allocate: type %d is undefined", type);
}
object->size = size;
object->generation = 1;
object->ref_count = 1;
object->memattr = VM_MEMATTR_DEFAULT;
object->flags = 0;
object->cred = NULL;
object->charge = 0;
if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
object->flags = OBJ_ONEMAPPING;
object->pg_color = 0;
object->handle = NULL;
object->backing_object = NULL;
@ -1064,7 +1084,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
(tobject->flags & OBJ_ONEMAPPING) == 0) {
goto unlock_tobject;
}
} else if (tobject->type == OBJT_PHYS)
} else if ((tobject->flags & OBJ_UNMANAGED) != 0)
goto unlock_tobject;
m = vm_page_lookup(tobject, tpindex);
if (m == NULL && advise == MADV_WILLNEED) {
@ -1834,7 +1854,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
int wirings;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_PHYS) ||
KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
if (object->resident_page_count == 0)
@ -1918,7 +1938,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
* pages are moved to the cache queue.
*
* This operation should only be performed on objects that
* contain managed pages.
* contain non-fictitious, managed pages.
*
* The object must be locked.
*/
@ -1929,8 +1949,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
vm_page_t p, next;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_SG &&
object->type != OBJT_PHYS),
KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
("vm_object_page_cache: illegal object %p", object));
if (object->resident_page_count == 0)
return;

View File

@ -165,6 +165,8 @@ struct vm_object {
/*
* Flags
*/
#define OBJ_FICTITIOUS 0x0001 /* (c) contains fictitious pages */
#define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */
#define OBJ_ACTIVE 0x0004 /* active objects */
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
#define OBJ_NOSPLIT 0x0010 /* dont split this object */

View File

@ -1414,9 +1414,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
#if VM_NRESERVLEVEL > 0
} else if (object == NULL || object->type == OBJT_DEVICE ||
object->type == OBJT_SG ||
(object->flags & OBJ_COLORED) == 0 ||
} else if (object == NULL || (object->flags & (OBJ_COLORED |
OBJ_FICTITIOUS)) != OBJ_COLORED ||
(m = vm_reserv_alloc_page(object, pindex)) == NULL) {
#else
} else {
@ -1491,10 +1490,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
m->flags = flags;
mtx_unlock(&vm_page_queue_free_mtx);
m->aflags = 0;
if (object == NULL || object->type == OBJT_PHYS)
m->oflags = VPO_UNMANAGED;
else
m->oflags = 0;
m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
VPO_UNMANAGED : 0;
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
m->oflags |= VPO_BUSY;
if (req & VM_ALLOC_WIRED) {
@ -1510,7 +1507,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (object != NULL) {
/* Ignore device objects; the pager sets "memattr" for them. */
if (object->memattr != VM_MEMATTR_DEFAULT &&
object->type != OBJT_DEVICE && object->type != OBJT_SG)
(object->flags & OBJ_FICTITIOUS) == 0)
pmap_page_set_memattr(m, object->memattr);
vm_page_insert(m, object, pindex);
} else

View File

@ -705,14 +705,14 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
int actcount, remove_mode;
VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
if (first_object->type == OBJT_DEVICE ||
first_object->type == OBJT_SG)
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
return;
for (object = first_object;; object = backing_object) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if (object->type == OBJT_PHYS || object->paging_in_progress)
if ((object->flags & OBJ_UNMANAGED) != 0 ||
object->paging_in_progress != 0)
goto unlock_return;
remove_mode = 0;