tmpfs: dynamically register tmpfs pager

Remove OBJT_SWAP_TMPFS. Move tmpfs-specific swap pager bits into
tmpfs_subr.c.

There is no longer any code to directly support tmpfs in sys/vm, most
tmpfs knowledge is shared by non-anon swap object type implementation.
The tmpfs-specific methods are provided by registered tmpfs pager, which
inherits from the swap pager.

Reviewed by:	markj
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D30168
This commit is contained in:
Konstantin Belousov 2021-05-07 22:42:06 +03:00
parent b730fd30b7
commit 28bc23ab92
10 changed files with 107 additions and 88 deletions

View File

@ -45,6 +45,8 @@
MALLOC_DECLARE(M_TMPFSNAME);
#endif
#define OBJ_TMPFS OBJ_PAGERPRIV /* has tmpfs vnode allocated */
/*
* Internal representation of a tmpfs directory entry.
*/
@ -514,9 +516,11 @@ tmpfs_update(struct vnode *vp)
size_t tmpfs_mem_avail(void);
size_t tmpfs_pages_used(struct tmpfs_mount *tmp);
void tmpfs_subr_init(void);
int tmpfs_subr_init(void);
void tmpfs_subr_uninit(void);
extern int tmpfs_pager_type;
/*
* Macros/functions to convert from generic data structures to tmpfs
* specific ones.

View File

@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$");
#include <sys/smr.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/user.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
@ -79,6 +80,61 @@ MALLOC_DEFINE(M_TMPFSDIR, "tmpfs dir", "tmpfs dirent structure");
static uma_zone_t tmpfs_node_pool;
VFS_SMR_DECLARE;
int tmpfs_pager_type = -1;
static vm_object_t
tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
MPASS(handle == NULL);
MPASS(offset == 0);
object = vm_object_allocate_dyn(tmpfs_pager_type, size,
OBJ_COLORED | OBJ_SWAP);
if (!swap_pager_init_object(object, NULL, NULL, size, 0)) {
vm_object_deallocate(object);
object = NULL;
}
return (object);
}
static void
tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
{
struct vnode *vp;
/*
* Tmpfs VREG node, which was reclaimed, has tmpfs_pager_type
* type, but not OBJ_TMPFS flag. In this case there is no
* v_writecount to adjust.
*/
if (vp_heldp != NULL)
VM_OBJECT_RLOCK(object);
else
VM_OBJECT_ASSERT_LOCKED(object);
if ((object->flags & OBJ_TMPFS) != 0) {
vp = object->un_pager.swp.swp_tmpfs;
if (vp != NULL) {
*vpp = vp;
if (vp_heldp != NULL) {
vhold(vp);
*vp_heldp = true;
}
}
}
if (vp_heldp != NULL)
VM_OBJECT_RUNLOCK(object);
}
struct pagerops tmpfs_pager_ops = {
.pgo_kvme_type = KVME_TYPE_VNODE,
.pgo_alloc = tmpfs_pager_alloc,
.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
.pgo_mightbedirty = vm_object_mightbedirty_,
.pgo_getvp = tmpfs_pager_getvp,
};
static int
tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
{
@ -126,18 +182,26 @@ tmpfs_node_fini(void *mem, int size)
mtx_destroy(&node->tn_interlock);
}
void
int
tmpfs_subr_init(void)
{
tmpfs_pager_type = vm_pager_alloc_dyn_type(&tmpfs_pager_ops,
OBJT_SWAP);
if (tmpfs_pager_type == -1)
return (EINVAL);
tmpfs_node_pool = uma_zcreate("TMPFS node",
sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
VFS_SMR_ZONE_SET(tmpfs_node_pool);
return (0);
}
void
tmpfs_subr_uninit(void)
{
if (tmpfs_pager_type != -1)
vm_pager_free_dyn_type(tmpfs_pager_type);
tmpfs_pager_type = -1;
uma_zdestroy(tmpfs_node_pool);
}
@ -364,7 +428,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
case VREG:
obj = nnode->tn_reg.tn_aobj =
vm_pager_allocate(OBJT_SWAP_TMPFS, NULL, 0,
vm_pager_allocate(tmpfs_pager_type, NULL, 0,
VM_PROT_DEFAULT, 0,
NULL /* XXXKIB - tmpfs needs swap reservation */);
/* OBJ_TMPFS is set together with the setting of vp->v_object */
@ -1588,7 +1652,7 @@ tmpfs_check_mtime(struct vnode *vp)
if (vp->v_type != VREG)
return;
obj = vp->v_object;
KASSERT(obj->type == OBJT_SWAP_TMPFS &&
KASSERT(obj->type == tmpfs_pager_type &&
(obj->flags & (OBJ_SWAP | OBJ_TMPFS)) ==
(OBJ_SWAP | OBJ_TMPFS), ("non-tmpfs obj"));
/* unlocked read */

View File

@ -103,8 +103,8 @@ static const char *tmpfs_updateopts[] = {
* Handle updates of time from writes to mmaped regions, if allowed.
* Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since
* unmap of the tmpfs-backed vnode does not call vinactive(), due to
* vm object type is OBJT_SWAP. If lazy, only handle delayed update
* of mtime due to the writes to mapped files.
* vm object type is basically OBJT_SWAP. If lazy, only handle
* delayed update of mtime due to the writes to mapped files.
*/
static void
tmpfs_update_mtime(struct mount *mp, bool lazy)
@ -120,7 +120,7 @@ tmpfs_update_mtime(struct mount *mp, bool lazy)
continue;
}
obj = vp->v_object;
MPASS(obj->type == OBJT_SWAP_TMPFS);
MPASS(obj->type == tmpfs_pager_type);
MPASS((obj->flags & OBJ_TMPFS) != 0);
/*
@ -225,7 +225,7 @@ tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
(entry->max_protection & VM_PROT_WRITE) == 0)
continue;
object = entry->object.vm_object;
if (object == NULL || object->type != OBJT_SWAP_TMPFS)
if (object == NULL || object->type != tmpfs_pager_type)
continue;
/*
* No need to dig into shadow chain, mapping
@ -661,7 +661,11 @@ tmpfs_sync(struct mount *mp, int waitfor)
static int
tmpfs_init(struct vfsconf *conf)
{
tmpfs_subr_init();
int res;
res = tmpfs_subr_init();
if (res != 0)
return (res);
memcpy(&tmpfs_fnops, &vnops, sizeof(struct fileops));
tmpfs_fnops.fo_close = tmpfs_fo_close;
return (0);

View File

@ -623,7 +623,7 @@ tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
if (object == NULL)
goto out_smr;
MPASS(object->type == OBJT_SWAP_TMPFS);
MPASS(object->type == tmpfs_pager_type);
MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
OBJ_SWAP);
if (!VN_IS_DOOMED(vp)) {

View File

@ -419,9 +419,6 @@ static uma_zone_t swpctrie_zone;
static vm_object_t
swap_pager_alloc(void *handle, vm_ooffset_t size,
vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
static vm_object_t
swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size,
vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
static void swap_pager_dealloc(vm_object_t object);
static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
int *);
@ -437,8 +434,6 @@ static void swap_pager_update_writecount(vm_object_t object,
vm_offset_t start, vm_offset_t end);
static void swap_pager_release_writecount(vm_object_t object,
vm_offset_t start, vm_offset_t end);
static void swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp,
bool *vp_heldp);
static void swap_pager_freespace(vm_object_t object, vm_pindex_t start,
vm_size_t size);
@ -457,23 +452,6 @@ const struct pagerops swappagerops = {
.pgo_freespace = swap_pager_freespace,
};
const struct pagerops swaptmpfspagerops = {
.pgo_kvme_type = KVME_TYPE_VNODE,
.pgo_alloc = swap_tmpfs_pager_alloc,
.pgo_dealloc = swap_pager_dealloc,
.pgo_getpages = swap_pager_getpages,
.pgo_getpages_async = swap_pager_getpages_async,
.pgo_putpages = swap_pager_putpages,
.pgo_haspage = swap_pager_haspage,
.pgo_pageunswapped = swap_pager_unswapped,
.pgo_update_writecount = swap_pager_update_writecount,
.pgo_release_writecount = swap_pager_release_writecount,
.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
.pgo_mightbedirty = vm_object_mightbedirty_,
.pgo_getvp = swap_tmpfs_pager_getvp,
.pgo_freespace = swap_pager_freespace,
};
/*
* swap_*() routines are externally accessible. swp_*() routines are
* internal.
@ -681,18 +659,31 @@ swap_pager_swap_init(void)
"reduce kern.maxswzone.\n");
}
bool
swap_pager_init_object(vm_object_t object, void *handle, struct ucred *cred,
vm_ooffset_t size, vm_ooffset_t offset)
{
if (cred != NULL) {
if (!swap_reserve_by_cred(size, cred))
return (false);
crhold(cred);
}
object->un_pager.swp.writemappings = 0;
object->handle = handle;
if (cred != NULL) {
object->cred = cred;
object->charge = size;
}
return (true);
}
static vm_object_t
swap_pager_alloc_init(objtype_t otype, void *handle, struct ucred *cred,
vm_ooffset_t size, vm_ooffset_t offset)
{
vm_object_t object;
if (cred != NULL) {
if (!swap_reserve_by_cred(size, cred))
return (NULL);
crhold(cred);
}
/*
* The un_pager.swp.swp_blks trie is initialized by
* vm_object_allocate() to ensure the correct order of
@ -701,11 +692,9 @@ swap_pager_alloc_init(objtype_t otype, void *handle, struct ucred *cred,
object = vm_object_allocate(otype, OFF_TO_IDX(offset +
PAGE_MASK + size));
object->un_pager.swp.writemappings = 0;
object->handle = handle;
if (cred != NULL) {
object->cred = cred;
object->charge = size;
if (!swap_pager_init_object(object, handle, cred, size, offset)) {
vm_object_deallocate(object);
return (NULL);
}
return (object);
}
@ -752,18 +741,6 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
return (object);
}
static vm_object_t
swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset, struct ucred *cred)
{
vm_object_t object;
MPASS(handle == NULL);
object = swap_pager_alloc_init(OBJT_SWAP_TMPFS, handle, cred,
size, offset);
return (object);
}
/*
* SWAP_PAGER_DEALLOC() - remove swap metadata from object
*
@ -3168,31 +3145,3 @@ swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
VM_OBJECT_WUNLOCK(object);
}
static void
swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
{
struct vnode *vp;
/*
* Tmpfs VREG node, which was reclaimed, has OBJT_SWAP_TMPFS
* type, but not OBJ_TMPFS flag. In this case there is no
* v_writecount to adjust.
*/
if (vp_heldp != NULL)
VM_OBJECT_RLOCK(object);
else
VM_OBJECT_ASSERT_LOCKED(object);
if ((object->flags & OBJ_TMPFS) != 0) {
vp = object->un_pager.swp.swp_tmpfs;
if (vp != NULL) {
*vpp = vp;
if (vp_heldp != NULL) {
vhold(vp);
*vp_heldp = true;
}
}
}
if (vp_heldp != NULL)
VM_OBJECT_RUNLOCK(object);
}

View File

@ -82,6 +82,7 @@ int swap_pager_reserve(vm_object_t, vm_pindex_t, vm_size_t);
void swap_pager_status(int *total, int *used);
u_long swap_pager_swapped_pages(vm_object_t object);
void swapoff_all(void);
bool swap_pager_init_object(vm_object_t object, void *handle,
struct ucred *cred, vm_ooffset_t size, vm_ooffset_t offset);
#endif /* _KERNEL */
#endif /* _VM_SWAP_PAGER_H_ */

View File

@ -97,7 +97,6 @@ enum obj_type {
OBJT_DEAD,
OBJT_SG,
OBJT_MGTDEVICE,
OBJT_SWAP_TMPFS,
OBJT_FIRST_DYN,
};
typedef u_char objtype_t;

View File

@ -413,7 +413,6 @@ vm_object_allocate(objtype_t type, vm_pindex_t size)
flags = OBJ_COLORED;
break;
case OBJT_SWAP:
case OBJT_SWAP_TMPFS:
flags = OBJ_COLORED | OBJ_SWAP;
break;
case OBJT_DEVICE:

View File

@ -207,7 +207,7 @@ struct vm_object {
#define OBJ_COLORED 0x1000 /* pg_color is defined */
#define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
#define OBJ_SHADOWLIST 0x4000 /* Object is on the shadow list. */
#define OBJ_TMPFS 0x8000 /* has tmpfs vnode allocated */
#define OBJ_PAGERPRIV 0x8000 /* Pager private */
/*
* Helpers to perform conversion between vm_object page indexes and offsets.

View File

@ -174,7 +174,6 @@ const struct pagerops *pagertab[16] __read_mostly = {
[OBJT_DEAD] = &deadpagerops,
[OBJT_SG] = &sgpagerops,
[OBJT_MGTDEVICE] = &mgtdevicepagerops,
[OBJT_SWAP_TMPFS] = &swaptmpfspagerops,
};
static struct mtx pagertab_lock;