- Remove vm_object_init2(). It is unused.

- Add a mtx_destroy() to vm_object_collapse().  (This allows a bzero()
   to migrate from _vm_object_allocate() to vm_object_zinit(), where it
   will be performed less often.)
This commit is contained in:
Alan Cox 2002-12-29 21:01:14 +00:00
parent e4c3e988a5
commit e3a9e1b2a8
3 changed files with 3 additions and 8 deletions

View File

@ -275,7 +275,6 @@ vm_init2(void)
#endif
vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
pmap_init2();
vm_object_init2();
}
static __inline void

View File

@ -180,6 +180,7 @@ vm_object_zinit(void *mem, int size)
vm_object_t object;
object = (vm_object_t)mem;
bzero(&object->mtx, sizeof(object->mtx));
/* These are true for any object that has been freed */
object->paging_in_progress = 0;
@ -192,7 +193,6 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
{
int incr;
bzero(&object->mtx, sizeof(object->mtx));
mtx_init(&object->mtx, "vm object", NULL, MTX_DEF);
TAILQ_INIT(&object->memq);
@ -252,11 +252,6 @@ vm_object_init(void)
uma_prealloc(obj_zone, VM_OBJECTS_INIT);
}
void
vm_object_init2(void)
{
}
void
vm_object_set_flag(vm_object_t object, u_short bits)
{
@ -1630,6 +1625,8 @@ vm_object_collapse(vm_object_t object)
);
mtx_unlock(&vm_object_list_mtx);
mtx_destroy(&backing_object->mtx);
uma_zfree(obj_zone, backing_object);
object_collapses++;

View File

@ -207,7 +207,6 @@ void vm_object_reference (vm_object_t);
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
void vm_object_split(vm_map_entry_t);
void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
void vm_object_init2 (void);
#endif /* _KERNEL */
#endif /* _VM_OBJECT_ */