Stop using per-mount tmpfs zones.

Requested and reviewed by:	jeff
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D22643
This commit is contained in:
Konstantin Belousov 2019-12-05 00:03:17 +00:00
parent 348d9b567e
commit a51c8071a3
3 changed files with 89 additions and 65 deletions

View File

@ -378,10 +378,6 @@ struct tmpfs_mount {
/* All node lock to protect the node list and tmp_pages_used. */
struct mtx tm_allnode_lock;
/* Zones used to store file system meta data, per tmpfs mount. */
uma_zone_t tm_dirent_pool;
uma_zone_t tm_node_pool;
/* Read-only status. */
bool tm_ronly;
/* Do not use namecache. */
@ -493,8 +489,9 @@ struct tmpfs_dirent *tmpfs_dir_next(struct tmpfs_node *dnode,
#endif
size_t tmpfs_mem_avail(void);
size_t tmpfs_pages_used(struct tmpfs_mount *tmp);
void tmpfs_subr_init(void);
void tmpfs_subr_uninit(void);
#endif

View File

@ -72,6 +72,73 @@ SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmpfs file system");
static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED;
static uma_zone_t tmpfs_dirent_pool;
static uma_zone_t tmpfs_node_pool;
static int
tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
{
struct tmpfs_node *node;
node = mem;
node->tn_gen++;
node->tn_size = 0;
node->tn_status = 0;
node->tn_flags = 0;
node->tn_links = 0;
node->tn_vnode = NULL;
node->tn_vpstate = 0;
return (0);
}
static void
tmpfs_node_dtor(void *mem, int size, void *arg)
{
struct tmpfs_node *node;
node = mem;
node->tn_type = VNON;
}
static int
tmpfs_node_init(void *mem, int size, int flags)
{
struct tmpfs_node *node;
node = mem;
node->tn_id = 0;
mtx_init(&node->tn_interlock, "tmpfsni", NULL, MTX_DEF);
node->tn_gen = arc4random();
return (0);
}
static void
tmpfs_node_fini(void *mem, int size)
{
struct tmpfs_node *node;
node = mem;
mtx_destroy(&node->tn_interlock);
}
void
tmpfs_subr_init(void)
{
tmpfs_dirent_pool = uma_zcreate("TMPFS dirent",
sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
tmpfs_node_pool = uma_zcreate("TMPFS node",
sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
}
void
tmpfs_subr_uninit(void)
{
uma_zdestroy(tmpfs_node_pool);
uma_zdestroy(tmpfs_dirent_pool);
}
static int
sysctl_mem_reserved(SYSCTL_HANDLER_ARGS)
{
@ -219,8 +286,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
if ((mp->mnt_kern_flag & MNT_RDONLY) != 0)
return (EROFS);
nnode = (struct tmpfs_node *)uma_zalloc_arg(tmp->tm_node_pool, tmp,
M_WAITOK);
nnode = uma_zalloc_arg(tmpfs_node_pool, tmp, M_WAITOK);
/* Generic initialization. */
nnode->tn_type = type;
@ -367,7 +433,7 @@ tmpfs_free_node_locked(struct tmpfs_mount *tmp, struct tmpfs_node *node,
panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type);
}
uma_zfree(tmp->tm_node_pool, node);
uma_zfree(tmpfs_node_pool, node);
TMPFS_LOCK(tmp);
tmpfs_free_tmp(tmp);
return (true);
@ -434,7 +500,7 @@ tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
{
struct tmpfs_dirent *nde;
nde = uma_zalloc(tmp->tm_dirent_pool, M_WAITOK);
nde = uma_zalloc(tmpfs_dirent_pool, M_WAITOK);
nde->td_node = node;
if (name != NULL) {
nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK);
@ -470,7 +536,7 @@ tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de)
}
if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL)
free(de->ud.td_name, M_TMPFSNAME);
uma_zfree(tmp->tm_dirent_pool, de);
uma_zfree(tmpfs_dirent_pool, de);
}
void

View File

@ -99,49 +99,6 @@ static const char *tmpfs_updateopts[] = {
"from", "export", "size", NULL
};
static int
tmpfs_node_ctor(void *mem, int size, void *arg, int flags)
{
struct tmpfs_node *node = (struct tmpfs_node *)mem;
node->tn_gen++;
node->tn_size = 0;
node->tn_status = 0;
node->tn_flags = 0;
node->tn_links = 0;
node->tn_vnode = NULL;
node->tn_vpstate = 0;
return (0);
}
static void
tmpfs_node_dtor(void *mem, int size, void *arg)
{
struct tmpfs_node *node = (struct tmpfs_node *)mem;
node->tn_type = VNON;
}
static int
tmpfs_node_init(void *mem, int size, int flags)
{
struct tmpfs_node *node = (struct tmpfs_node *)mem;
node->tn_id = 0;
mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF);
node->tn_gen = arc4random();
return (0);
}
static void
tmpfs_node_fini(void *mem, int size)
{
struct tmpfs_node *node = (struct tmpfs_node *)mem;
mtx_destroy(&node->tn_interlock);
}
/*
* Handle updates of time from writes to mmaped regions. Use
* MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since
@ -481,12 +438,6 @@ tmpfs_mount(struct mount *mp)
tmp->tm_pages_max = pages;
tmp->tm_pages_used = 0;
new_unrhdr64(&tmp->tm_ino_unr, 2);
tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent",
sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
tmp->tm_node_pool = uma_zcreate("TMPFS node",
sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor,
tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0);
tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
tmp->tm_nonc = nonc;
@ -495,8 +446,6 @@ tmpfs_mount(struct mount *mp)
root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
if (error != 0 || root == NULL) {
uma_zdestroy(tmp->tm_node_pool);
uma_zdestroy(tmp->tm_dirent_pool);
free(tmp, M_TMPFSMNT);
return (error);
}
@ -590,9 +539,6 @@ tmpfs_free_tmp(struct tmpfs_mount *tmp)
}
TMPFS_UNLOCK(tmp);
uma_zdestroy(tmp->tm_dirent_pool);
uma_zdestroy(tmp->tm_node_pool);
mtx_destroy(&tmp->tm_allnode_lock);
MPASS(tmp->tm_pages_used == 0);
MPASS(tmp->tm_nodes_inuse == 0);
@ -702,10 +648,23 @@ tmpfs_susp_clean(struct mount *mp __unused)
{
}
static int
tmpfs_init(struct vfsconf *conf)
{
tmpfs_subr_init();
return (0);
}
static int
tmpfs_uninit(struct vfsconf *conf)
{
tmpfs_subr_uninit();
return (0);
}
/*
* tmpfs vfs operations.
*/
struct vfsops tmpfs_vfsops = {
.vfs_mount = tmpfs_mount,
.vfs_unmount = tmpfs_unmount,
@ -715,5 +674,7 @@ struct vfsops tmpfs_vfsops = {
.vfs_fhtovp = tmpfs_fhtovp,
.vfs_sync = tmpfs_sync,
.vfs_susp_clean = tmpfs_susp_clean,
.vfs_init = tmpfs_init,
.vfs_uninit = tmpfs_uninit,
};
VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);