Add OBJT_SWAP_TMPFS pager

This is OBJT_SWAP pager, specialized for tmpfs.  Right now, both swap pager
and generic vm code have to explicitly handle swap objects which are tmpfs
vnode v_object, in the special ways.  Replace (almost) all such places with
proper methods.

Since VM still needs a notion of the 'swap object', regardless of its
use, add yet another type-classification flag OBJ_SWAP. Set it in
vm_object_allocate() where other type-class flags are set.

This change almost completely eliminates the knowledge of tmpfs from VM,
and opens a way to make OBJT_SWAP_TMPFS loadable from tmpfs.ko.

Reviewed by:	markj
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D30070
This commit is contained in:
Konstantin Belousov 2021-05-01 04:18:00 +03:00
parent 0d2dfc6fed
commit 4b8365d752
15 changed files with 115 additions and 95 deletions

View File

@ -166,17 +166,16 @@ procfs_doprocmap(PFS_FILL_ARGS)
vp = lobj->handle;
vref(vp);
break;
case OBJT_SWAP:
if ((lobj->flags & OBJ_TMPFS_NODE) != 0) {
type = "vnode";
if ((lobj->flags & OBJ_TMPFS) != 0) {
vp = lobj->un_pager.swp.swp_tmpfs;
vref(vp);
}
} else {
type = "swap";
case OBJT_SWAP_TMPFS:
type = "vnode";
if ((lobj->flags & OBJ_TMPFS) != 0) {
vp = lobj->un_pager.swp.swp_tmpfs;
vref(vp);
}
break;
case OBJT_SWAP:
type = "swap";
break;
case OBJT_SG:
case OBJT_DEVICE:
type = "device";

View File

@ -364,12 +364,10 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
case VREG:
obj = nnode->tn_reg.tn_aobj =
vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0,
vm_pager_allocate(OBJT_SWAP_TMPFS, NULL, 0,
VM_PROT_DEFAULT, 0,
NULL /* XXXKIB - tmpfs needs swap reservation */);
VM_OBJECT_WLOCK(obj);
/* OBJ_TMPFS is set together with the setting of vp->v_object */
vm_object_set_flag(obj, OBJ_TMPFS_NODE);
VM_OBJECT_WUNLOCK(obj);
nnode->tn_reg.tn_tmp = tmp;
break;
@ -1590,8 +1588,9 @@ tmpfs_check_mtime(struct vnode *vp)
if (vp->v_type != VREG)
return;
obj = vp->v_object;
KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
(OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
KASSERT(obj->type == OBJT_SWAP_TMPFS &&
(obj->flags & (OBJ_SWAP | OBJ_TMPFS)) ==
(OBJ_SWAP | OBJ_TMPFS), ("non-tmpfs obj"));
/* unlocked read */
if (obj->generation != obj->cleangeneration) {
VM_OBJECT_WLOCK(obj);

View File

@ -120,8 +120,8 @@ tmpfs_update_mtime(struct mount *mp, bool lazy)
continue;
}
obj = vp->v_object;
KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
(OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
MPASS(obj->type == OBJT_SWAP_TMPFS);
MPASS((obj->flags & OBJ_TMPFS) != 0);
/*
* In lazy case, do unlocked read, avoid taking vnode
@ -225,8 +225,7 @@ tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
(entry->max_protection & VM_PROT_WRITE) == 0)
continue;
object = entry->object.vm_object;
if (object == NULL || object->type != OBJT_SWAP ||
(object->flags & OBJ_TMPFS_NODE) == 0)
if (object == NULL || object->type != OBJT_SWAP_TMPFS)
continue;
/*
* No need to dig into shadow chain, mapping
@ -239,8 +238,7 @@ tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
continue;
}
MPASS(object->ref_count > 1);
if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
(OBJ_TMPFS_NODE | OBJ_TMPFS)) {
if ((object->flags & OBJ_TMPFS) == 0) {
VM_OBJECT_RUNLOCK(object);
continue;
}

View File

@ -623,8 +623,9 @@ tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
if (object == NULL)
goto out_smr;
MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_TMPFS_NODE)) ==
OBJ_TMPFS_NODE);
MPASS(object->type == OBJT_SWAP_TMPFS);
MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
OBJ_SWAP);
if (!VN_IS_DOOMED(vp)) {
/* size cannot become shorter due to rangelock. */
size = node->tn_size;

View File

@ -418,6 +418,9 @@ static uma_zone_t swpctrie_zone;
static vm_object_t
swap_pager_alloc(void *handle, vm_ooffset_t size,
vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
static vm_object_t
swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size,
vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
static void swap_pager_dealloc(vm_object_t object);
static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
int *);
@ -433,9 +436,7 @@ static void swap_pager_update_writecount(vm_object_t object,
vm_offset_t start, vm_offset_t end);
static void swap_pager_release_writecount(vm_object_t object,
vm_offset_t start, vm_offset_t end);
static void swap_pager_set_writeable_dirty(vm_object_t object);
static bool swap_pager_mightbedirty(vm_object_t object);
static void swap_pager_getvp(vm_object_t object, struct vnode **vpp,
static void swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp,
bool *vp_heldp);
static void swap_pager_freespace(vm_object_t object, vm_pindex_t start,
vm_size_t size);
@ -451,9 +452,22 @@ struct pagerops swappagerops = {
.pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */
.pgo_update_writecount = swap_pager_update_writecount,
.pgo_release_writecount = swap_pager_release_writecount,
.pgo_set_writeable_dirty = swap_pager_set_writeable_dirty,
.pgo_mightbedirty = swap_pager_mightbedirty,
.pgo_getvp = swap_pager_getvp,
.pgo_freespace = swap_pager_freespace,
};
struct pagerops swaptmpfspagerops = {
.pgo_alloc = swap_tmpfs_pager_alloc,
.pgo_dealloc = swap_pager_dealloc,
.pgo_getpages = swap_pager_getpages,
.pgo_getpages_async = swap_pager_getpages_async,
.pgo_putpages = swap_pager_putpages,
.pgo_haspage = swap_pager_haspage,
.pgo_pageunswapped = swap_pager_unswapped,
.pgo_update_writecount = swap_pager_update_writecount,
.pgo_release_writecount = swap_pager_release_writecount,
.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
.pgo_mightbedirty = vm_object_mightbedirty_,
.pgo_getvp = swap_tmpfs_pager_getvp,
.pgo_freespace = swap_pager_freespace,
};
@ -665,8 +679,8 @@ swap_pager_swap_init(void)
}
static vm_object_t
swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
vm_ooffset_t offset)
swap_pager_alloc_init(objtype_t otype, void *handle, struct ucred *cred,
vm_ooffset_t size, vm_ooffset_t offset)
{
vm_object_t object;
@ -681,7 +695,7 @@ swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
* vm_object_allocate() to ensure the correct order of
* visibility to other threads.
*/
object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
object = vm_object_allocate(otype, OFF_TO_IDX(offset +
PAGE_MASK + size));
object->un_pager.swp.writemappings = 0;
@ -720,8 +734,8 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
sx_xlock(&sw_alloc_sx);
object = vm_pager_object_lookup(NOBJLIST(handle), handle);
if (object == NULL) {
object = swap_pager_alloc_init(handle, cred, size,
offset);
object = swap_pager_alloc_init(OBJT_SWAP, handle, cred,
size, offset);
if (object != NULL) {
TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
object, pager_object_list);
@ -729,11 +743,24 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
}
sx_xunlock(&sw_alloc_sx);
} else {
object = swap_pager_alloc_init(handle, cred, size, offset);
object = swap_pager_alloc_init(OBJT_SWAP, handle, cred,
size, offset);
}
return (object);
}
static vm_object_t
swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
MPASS(handle == NULL);
object = swap_pager_alloc_init(OBJT_SWAP_TMPFS, handle, cred,
size, offset);
return (object);
}
/*
* SWAP_PAGER_DEALLOC() - remove swap metadata from object
*
@ -775,6 +802,7 @@ swap_pager_dealloc(vm_object_t object)
swp_pager_meta_free_all(object);
object->handle = NULL;
object->type = OBJT_DEAD;
vm_object_clear_flag(object, OBJ_SWAP);
}
/************************************************************************
@ -1003,9 +1031,9 @@ swp_pager_xfer_source(vm_object_t srcobject, vm_object_t dstobject,
{
daddr_t dstaddr;
KASSERT(srcobject->type == OBJT_SWAP,
KASSERT((srcobject->flags & OBJ_SWAP) != 0,
("%s: Srcobject not swappable", __func__));
if (dstobject->type == OBJT_SWAP &&
if ((dstobject->flags & OBJ_SWAP) != 0 &&
swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) {
/* Caller should destroy the source block. */
return (false);
@ -1086,6 +1114,7 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
* queues.
*/
srcobject->type = OBJT_DEFAULT;
vm_object_clear_flag(srcobject, OBJ_SWAP);
}
}
@ -1107,7 +1136,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
int i;
VM_OBJECT_ASSERT_LOCKED(object);
KASSERT(object->type == OBJT_SWAP,
KASSERT((object->flags & OBJ_SWAP) != 0,
("%s: object not swappable", __func__));
/*
@ -1203,7 +1232,7 @@ swap_pager_unswapped(vm_page_t m)
* The meta data only exists if the object is OBJT_SWAP
* and even then might not be allocated yet.
*/
KASSERT(m->object->type == OBJT_SWAP,
KASSERT((m->object->flags & OBJ_SWAP) != 0,
("Free object not swappable"));
sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks,
@ -1241,7 +1270,7 @@ swap_pager_getpages_locked(vm_object_t object, vm_page_t *ma, int count,
VM_OBJECT_ASSERT_WLOCKED(object);
reqcount = count;
KASSERT(object->type == OBJT_SWAP,
KASSERT((object->flags & OBJ_SWAP) != 0,
("%s: object not swappable", __func__));
if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) {
VM_OBJECT_WUNLOCK(object);
@ -1475,7 +1504,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
*
* Turn object into OBJT_SWAP. Force sync if not a pageout process.
*/
if (object->type != OBJT_SWAP) {
if ((object->flags & OBJ_SWAP) == 0) {
addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE);
KASSERT(addr == SWAPBLK_NONE,
("unexpected object swap block"));
@ -1780,7 +1809,7 @@ swap_pager_swapped_pages(vm_object_t object)
int i;
VM_OBJECT_ASSERT_LOCKED(object);
if (object->type != OBJT_SWAP)
if ((object->flags & OBJ_SWAP) == 0)
return (0);
for (res = 0, pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
@ -1809,7 +1838,7 @@ swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
daddr_t blk;
int i, nv, rahead, rv;
KASSERT(object->type == OBJT_SWAP,
KASSERT((object->flags & OBJ_SWAP) != 0,
("%s: Object not swappable", __func__));
for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
@ -1917,7 +1946,7 @@ swap_pager_swapoff(struct swdevt *sp)
full_rescan:
mtx_lock(&vm_object_list_mtx);
TAILQ_FOREACH(object, &vm_object_list, object_list) {
if (object->type != OBJT_SWAP)
if ((object->flags & OBJ_SWAP) == 0)
continue;
mtx_unlock(&vm_object_list_mtx);
/* Depends on type-stability. */
@ -1936,7 +1965,7 @@ swap_pager_swapoff(struct swdevt *sp)
* dead.
*/
atomic_thread_fence_acq();
if (object->type != OBJT_SWAP)
if ((object->flags & OBJ_SWAP) == 0)
goto next_obj;
swap_pager_swapoff_object(sp, object);
@ -2031,7 +2060,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
/*
* Convert default object to swap object if necessary
*/
if (object->type != OBJT_SWAP) {
if ((object->flags & OBJ_SWAP) == 0) {
pctrie_init(&object->un_pager.swp.swp_blks);
/*
@ -2041,6 +2070,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
atomic_thread_fence_rel();
object->type = OBJT_SWAP;
vm_object_set_flag(object, OBJ_SWAP);
object->un_pager.swp.writemappings = 0;
KASSERT((object->flags & OBJ_ANON) != 0 ||
object->handle == NULL,
@ -2149,7 +2179,7 @@ swp_pager_meta_transfer(vm_object_t srcobject, vm_object_t dstobject,
int i, limit, start;
VM_OBJECT_ASSERT_WLOCKED(srcobject);
if (srcobject->type != OBJT_SWAP || count == 0)
if ((srcobject->flags & OBJ_SWAP) == 0 || count == 0)
return;
swp_pager_init_freerange(&s_free, &n_free);
@ -2216,7 +2246,7 @@ swp_pager_meta_free_all(vm_object_t object)
int i;
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
if ((object->flags & OBJ_SWAP) == 0)
return;
swp_pager_init_freerange(&s_free, &n_free);
@ -2256,7 +2286,7 @@ swp_pager_meta_lookup(vm_object_t object, vm_pindex_t pindex)
* The meta data only exists if the object is OBJT_SWAP
* and even then might not be allocated yet.
*/
KASSERT(object->type == OBJT_SWAP,
KASSERT((object->flags & OBJ_SWAP) != 0,
("Lookup object not swappable"));
sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
@ -2280,7 +2310,7 @@ swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
int i;
VM_OBJECT_ASSERT_LOCKED(object);
if (object->type != OBJT_SWAP)
if ((object->flags & OBJ_SWAP) == 0)
return (object->size);
sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
@ -2735,10 +2765,10 @@ vmspace_swap_count(struct vmspace *vmspace)
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
continue;
object = cur->object.vm_object;
if (object == NULL || object->type != OBJT_SWAP)
if (object == NULL || (object->flags & OBJ_SWAP) == 0)
continue;
VM_OBJECT_RLOCK(object);
if (object->type != OBJT_SWAP)
if ((object->flags & OBJ_SWAP) == 0)
goto unlock;
pi = OFF_TO_IDX(cur->offset);
e = pi + OFF_TO_IDX(cur->end - cur->start);
@ -3133,32 +3163,14 @@ swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
}
static void
swap_pager_set_writeable_dirty(vm_object_t object)
{
if ((object->flags & OBJ_TMPFS_NODE) != 0)
vm_object_set_writeable_dirty_(object);
}
static bool
swap_pager_mightbedirty(vm_object_t object)
{
if ((object->flags & OBJ_TMPFS_NODE) != 0)
return (vm_object_mightbedirty_(object));
return (false);
}
static void
swap_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
{
struct vnode *vp;
KASSERT((object->flags & OBJ_TMPFS_NODE) != 0,
("swap_pager_getvp: swap and !TMPFS obj %p", object));
/*
* Tmpfs VREG node, which was reclaimed, has
* OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS. In
* this case there is no v_writecount to adjust.
* Tmpfs VREG node, which was reclaimed, has OBJT_SWAP_TMPFS
* type, but not OBJ_TMPFS flag. In this case there is no
* v_writecount to adjust.
*/
if (vp_heldp != NULL)
VM_OBJECT_RLOCK(object);

View File

@ -97,6 +97,7 @@ enum obj_type {
OBJT_DEAD,
OBJT_SG,
OBJT_MGTDEVICE,
OBJT_SWAP_TMPFS,
};
typedef u_char objtype_t;

View File

@ -1947,7 +1947,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
crhold(dst_object->cred);
*fork_charge += dst_object->charge;
} else if ((dst_object->type == OBJT_DEFAULT ||
dst_object->type == OBJT_SWAP) &&
(dst_object->flags & OBJ_SWAP) != 0) &&
dst_object->cred == NULL) {
KASSERT(dst_entry->cred != NULL, ("no cred for entry %p",
dst_entry));

View File

@ -2826,10 +2826,12 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
continue;
}
if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP)
if (obj->type != OBJT_DEFAULT &&
(obj->flags & OBJ_SWAP) == 0)
continue;
VM_OBJECT_WLOCK(obj);
if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
if (obj->type != OBJT_DEFAULT &&
(obj->flags & OBJ_SWAP) == 0) {
VM_OBJECT_WUNLOCK(obj);
continue;
}
@ -4140,7 +4142,7 @@ vm_map_copy_entry(
size = src_entry->end - src_entry->start;
if ((src_object = src_entry->object.vm_object) != NULL) {
if (src_object->type == OBJT_DEFAULT ||
src_object->type == OBJT_SWAP) {
(src_object->flags & OBJ_SWAP) != 0) {
vm_map_copy_swap_object(src_entry, dst_entry,
size, fork_charge);
/* May have split/collapsed, reload obj. */

View File

@ -930,7 +930,7 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
VM_OBJECT_WLOCK(object);
}
if (object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP ||
(object->flags & OBJ_SWAP) != 0 ||
object->type == OBJT_VNODE) {
pindex = OFF_TO_IDX(current->offset +
(addr - current->start));
@ -1357,7 +1357,8 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
goto done;
}
} else {
KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
KASSERT(obj->type == OBJT_DEFAULT ||
(obj->flags & OBJ_SWAP) != 0,
("wrong object type"));
vm_object_reference(obj);
#if VM_NRESERVLEVEL > 0

View File

@ -240,7 +240,8 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
LIST_INIT(&object->shadow_head);
object->type = type;
if (type == OBJT_SWAP)
object->flags = flags;
if ((flags & OBJ_SWAP) != 0)
pctrie_init(&object->un_pager.swp.swp_blks);
/*
@ -251,7 +252,6 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
atomic_thread_fence_rel();
object->pg_color = 0;
object->flags = flags;
object->size = size;
object->domain.dr_policy = NULL;
object->generation = 1;
@ -337,6 +337,7 @@ vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
case OBJT_PHYS:
case OBJT_SG:
case OBJT_SWAP:
case OBJT_SWAP_TMPFS:
case OBJT_VNODE:
if (!TAILQ_EMPTY(&object->memq))
return (KERN_FAILURE);
@ -421,9 +422,12 @@ vm_object_allocate(objtype_t type, vm_pindex_t size)
case OBJT_DEAD:
panic("vm_object_allocate: can't create OBJT_DEAD");
case OBJT_DEFAULT:
case OBJT_SWAP:
flags = OBJ_COLORED;
break;
case OBJT_SWAP:
case OBJT_SWAP_TMPFS:
flags = OBJ_COLORED | OBJ_SWAP;
break;
case OBJT_DEVICE:
case OBJT_SG:
flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
@ -573,7 +577,7 @@ vm_object_deallocate_anon(vm_object_t backing_object)
KASSERT(object != NULL && backing_object->shadow_count == 1,
("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d",
backing_object->ref_count, backing_object->shadow_count));
KASSERT((object->flags & (OBJ_TMPFS_NODE | OBJ_ANON)) == OBJ_ANON,
KASSERT((object->flags & OBJ_ANON) != 0,
("invalid shadow object %p", object));
if (!VM_OBJECT_TRYWLOCK(object)) {
@ -677,7 +681,7 @@ vm_object_deallocate(vm_object_t object)
umtx_shm_object_terminated(object);
temp = object->backing_object;
if (temp != NULL) {
KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,
KASSERT(object->type != OBJT_SWAP_TMPFS,
("shadowed tmpfs v_object 2 %p", object));
vm_object_backing_remove(object);
}
@ -958,7 +962,7 @@ vm_object_terminate(vm_object_t object)
#endif
KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP,
object->type == OBJT_SWAP || object->type == OBJT_SWAP_TMPFS,
("%s: non-swap obj %p has cred", __func__, object));
/*
@ -1627,7 +1631,7 @@ vm_object_split(vm_map_entry_t entry)
else if (m_busy == NULL)
m_busy = m;
}
if (orig_object->type == OBJT_SWAP) {
if ((orig_object->flags & OBJ_SWAP) != 0) {
/*
* swap_pager_copy() can sleep, in which case the orig_object's
* and new_object's locks are released and reacquired.
@ -1955,7 +1959,7 @@ vm_object_collapse(vm_object_t object)
/*
* Move the pager from backing_object to object.
*/
if (backing_object->type == OBJT_SWAP) {
if ((backing_object->flags & OBJ_SWAP) != 0) {
/*
* swap_pager_copy() can sleep, in which case
* the backing_object's and object's locks are
@ -2482,9 +2486,9 @@ vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
case OBJT_VNODE:
return (KVME_TYPE_VNODE);
case OBJT_SWAP:
if ((object->flags & OBJ_TMPFS_NODE) != 0)
return (KVME_TYPE_VNODE);
return (KVME_TYPE_SWAP);
case OBJT_SWAP_TMPFS:
return (KVME_TYPE_VNODE);
case OBJT_DEVICE:
return (KVME_TYPE_DEVICE);
case OBJT_PHYS:

View File

@ -201,7 +201,7 @@ struct vm_object {
#define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
#define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */
#define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
#define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */
#define OBJ_SWAP 0x0200 /* object swaps */
#define OBJ_SPLIT 0x0400 /* object is being split */
#define OBJ_COLLAPSING 0x0800 /* Parent of collapse. */
#define OBJ_COLORED 0x1000 /* pg_color is defined */

View File

@ -2639,7 +2639,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
}
/* Don't care: PG_NODUMP, PG_ZERO. */
if (object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP &&
(object->flags & OBJ_SWAP) == 0 &&
object->type != OBJT_VNODE) {
run_ext = 0;
#if VM_NRESERVLEVEL > 0
@ -2777,7 +2777,7 @@ vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
/* Don't care: PG_NODUMP, PG_ZERO. */
if (m->object != object ||
(object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP &&
(object->flags & OBJ_SWAP) == 0 &&
object->type != OBJT_VNODE))
error = EINVAL;
else if (object->memattr != VM_MEMATTR_DEFAULT)

View File

@ -545,7 +545,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
* clog the laundry and inactive queues. (We will try
* paging it out again later.)
*/
if (object->type == OBJT_SWAP &&
if ((object->flags & OBJ_SWAP) != 0 &&
pageout_status[i] == VM_PAGER_FAIL) {
vm_page_unswappable(mt);
numpagedout++;
@ -897,7 +897,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
vm_page_free(m);
VM_CNT_INC(v_dfree);
} else if ((object->flags & OBJ_DEAD) == 0) {
if (object->type != OBJT_SWAP &&
if ((object->flags & OBJ_SWAP) == 0 &&
object->type != OBJT_DEFAULT)
pageout_ok = true;
else if (disable_swap_pageouts)
@ -1890,6 +1890,7 @@ vm_pageout_oom_pagecount(struct vmspace *vmspace)
switch (obj->type) {
case OBJT_DEFAULT:
case OBJT_SWAP:
case OBJT_SWAP_TMPFS:
case OBJT_PHYS:
case OBJT_VNODE:
res += obj->resident_page_count;

View File

@ -172,6 +172,7 @@ struct pagerops *pagertab[] = {
[OBJT_DEAD] = &deadpagerops,
[OBJT_SG] = &sgpagerops,
[OBJT_MGTDEVICE] = &mgtdevicepagerops,
[OBJT_SWAP_TMPFS] = &swaptmpfspagerops,
};
void

View File

@ -95,6 +95,7 @@ extern struct pagerops devicepagerops;
extern struct pagerops physpagerops;
extern struct pagerops sgpagerops;
extern struct pagerops mgtdevicepagerops;
extern struct pagerops swaptmpfspagerops;
/*
* get/put return values