vfs: prealloc vnodes in getnewvnode_reserve
Having a reserved vnode count does not guarantee that getnewvnodes wont block later. Said blocking partially defeats the purpose of reserving in the first place. Preallocate instaed. The only consumer was always passing "1" as count and never nesting reservations.
This commit is contained in:
parent
85edb793f6
commit
d9c6cac9c3
@ -815,7 +815,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
|
||||
return (SET_ERROR(EDQUOT));
|
||||
}
|
||||
|
||||
getnewvnode_reserve(1);
|
||||
getnewvnode_reserve();
|
||||
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
|
||||
|
@ -1800,7 +1800,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
|
||||
goto out;
|
||||
}
|
||||
|
||||
getnewvnode_reserve(1);
|
||||
getnewvnode_reserve();
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
|
||||
@ -2092,7 +2092,7 @@ zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr)
|
||||
/*
|
||||
* Add a new entry to the directory.
|
||||
*/
|
||||
getnewvnode_reserve(1);
|
||||
getnewvnode_reserve();
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
|
||||
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
|
||||
@ -4003,7 +4003,7 @@ zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link,
|
||||
return (SET_ERROR(EDQUOT));
|
||||
}
|
||||
|
||||
getnewvnode_reserve(1);
|
||||
getnewvnode_reserve();
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
fuid_dirtied = zfsvfs->z_fuid_dirty;
|
||||
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
|
||||
|
@ -644,8 +644,8 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
|
||||
|
||||
zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
|
||||
|
||||
KASSERT(curthread->td_vp_reserv > 0,
|
||||
("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
|
||||
KASSERT(curthread->td_vp_reserved != NULL,
|
||||
("zfs_znode_alloc: getnewvnode without preallocated vnode"));
|
||||
error = getnewvnode("zfs", zfsvfs->z_parent->z_vfs, &zfs_vnodeops, &vp);
|
||||
if (error != 0) {
|
||||
kmem_cache_free(znode_cache, zp);
|
||||
@ -1157,7 +1157,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
|
||||
int err;
|
||||
|
||||
td = curthread;
|
||||
getnewvnode_reserve(1);
|
||||
getnewvnode_reserve();
|
||||
again:
|
||||
*zpp = NULL;
|
||||
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
|
||||
|
@ -82,7 +82,7 @@ _Static_assert(offsetof(struct thread, td_flags) == 0xfc,
|
||||
"struct thread KBI td_flags");
|
||||
_Static_assert(offsetof(struct thread, td_pflags) == 0x104,
|
||||
"struct thread KBI td_pflags");
|
||||
_Static_assert(offsetof(struct thread, td_frame) == 0x478,
|
||||
_Static_assert(offsetof(struct thread, td_frame) == 0x480,
|
||||
"struct thread KBI td_frame");
|
||||
_Static_assert(offsetof(struct thread, td_emuldata) == 0x690,
|
||||
"struct thread KBI td_emuldata");
|
||||
|
@ -187,8 +187,8 @@ userret(struct thread *td, struct trapframe *frame)
|
||||
}
|
||||
KASSERT(td->td_pinned == 0 || (td->td_pflags & TDP_CALLCHAIN) != 0,
|
||||
("userret: Returning with with pinned thread"));
|
||||
KASSERT(td->td_vp_reserv == 0,
|
||||
("userret: Returning while holding vnode reservation"));
|
||||
KASSERT(td->td_vp_reserved == NULL,
|
||||
("userret: Returning with preallocated vnode"));
|
||||
KASSERT((td->td_flags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
|
||||
("userret: Returning with stop signals deferred"));
|
||||
KASSERT(td->td_su == NULL,
|
||||
|
@ -1527,40 +1527,29 @@ getnewvnode_wait(int suspended)
|
||||
* watermark handling works.
|
||||
*/
|
||||
void
|
||||
getnewvnode_reserve(u_int count)
|
||||
getnewvnode_reserve(void)
|
||||
{
|
||||
u_long rnumvnodes, rfreevnodes;
|
||||
struct thread *td;
|
||||
|
||||
/* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */
|
||||
/* XXX no longer so quick, but this part is not racy. */
|
||||
td = curthread;
|
||||
MPASS(td->td_vp_reserved == NULL);
|
||||
|
||||
mtx_lock(&vnode_free_list_mtx);
|
||||
rnumvnodes = atomic_load_long(&numvnodes);
|
||||
rfreevnodes = atomic_load_long(&freevnodes);
|
||||
if (rnumvnodes + count > desiredvnodes && rfreevnodes > wantfreevnodes)
|
||||
vnlru_free_locked(ulmin(rnumvnodes + count - desiredvnodes,
|
||||
if (rnumvnodes + 1 > desiredvnodes && rfreevnodes > wantfreevnodes)
|
||||
vnlru_free_locked(ulmin(rnumvnodes + 1 - desiredvnodes,
|
||||
rfreevnodes - wantfreevnodes), NULL);
|
||||
mtx_unlock(&vnode_free_list_mtx);
|
||||
|
||||
td = curthread;
|
||||
/* First try to be quick and racy. */
|
||||
if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) {
|
||||
td->td_vp_reserv += count;
|
||||
vcheckspace(); /* XXX no longer so quick, but more racy */
|
||||
return;
|
||||
} else
|
||||
atomic_subtract_long(&numvnodes, count);
|
||||
|
||||
mtx_lock(&vnode_free_list_mtx);
|
||||
while (count > 0) {
|
||||
if (getnewvnode_wait(0) == 0) {
|
||||
count--;
|
||||
td->td_vp_reserv++;
|
||||
atomic_add_long(&numvnodes, 1);
|
||||
}
|
||||
if (rnumvnodes + 1 > desiredvnodes) {
|
||||
while (getnewvnode_wait(0) != 0)
|
||||
continue;
|
||||
}
|
||||
vcheckspace();
|
||||
atomic_add_long(&numvnodes, 1);
|
||||
mtx_unlock(&vnode_free_list_mtx);
|
||||
|
||||
td->td_vp_reserved = uma_zalloc(vnode_zone, M_WAITOK);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1577,8 +1566,11 @@ getnewvnode_drop_reserve(void)
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
atomic_subtract_long(&numvnodes, td->td_vp_reserv);
|
||||
td->td_vp_reserv = 0;
|
||||
if (td->td_vp_reserved != NULL) {
|
||||
atomic_subtract_long(&numvnodes, 1);
|
||||
uma_zfree(vnode_zone, td->td_vp_reserved);
|
||||
td->td_vp_reserved = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1599,11 +1591,11 @@ getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
|
||||
KASSERT(vops->registered,
|
||||
("%s: not registered vector op %p\n", __func__, vops));
|
||||
|
||||
vp = NULL;
|
||||
td = curthread;
|
||||
if (td->td_vp_reserv > 0) {
|
||||
td->td_vp_reserv -= 1;
|
||||
goto alloc;
|
||||
if (td->td_vp_reserved != NULL) {
|
||||
vp = td->td_vp_reserved;
|
||||
td->td_vp_reserved = NULL;
|
||||
goto init;
|
||||
}
|
||||
mtx_lock(&vnode_free_list_mtx);
|
||||
if (numvnodes < desiredvnodes)
|
||||
@ -1639,9 +1631,9 @@ getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
|
||||
vcheckspace();
|
||||
atomic_add_long(&numvnodes, 1);
|
||||
mtx_unlock(&vnode_free_list_mtx);
|
||||
alloc:
|
||||
counter_u64_add(vnodes_created, 1);
|
||||
vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK);
|
||||
init:
|
||||
counter_u64_add(vnodes_created, 1);
|
||||
/*
|
||||
* Locks are given the generic name "vnode" when created.
|
||||
* Follow the historic practice of using the filesystem
|
||||
|
@ -297,7 +297,7 @@ struct thread {
|
||||
struct osd td_osd; /* (k) Object specific data. */
|
||||
struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */
|
||||
pid_t td_dbg_forked; /* (c) Child pid for debugger. */
|
||||
u_int td_vp_reserv; /* (k) Count of reserved vnodes. */
|
||||
struct vnode *td_vp_reserved;/* (k) Prealloated vnode. */
|
||||
u_int td_no_sleeping; /* (k) Sleeping disabled count. */
|
||||
void *td_su; /* (k) FFS SU private */
|
||||
sbintime_t td_sleeptimo; /* (t) Sleep timeout. */
|
||||
|
@ -624,7 +624,7 @@ void freebsd11_cvtnstat(struct stat *sb, struct nstat *nsb);
|
||||
int freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost);
|
||||
int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
|
||||
struct vnode **vpp);
|
||||
void getnewvnode_reserve(u_int count);
|
||||
void getnewvnode_reserve(void);
|
||||
void getnewvnode_drop_reserve(void);
|
||||
int insmntque1(struct vnode *vp, struct mount *mp,
|
||||
void (*dtr)(struct vnode *, void *), void *dtr_arg);
|
||||
|
Loading…
Reference in New Issue
Block a user