Add a KPI to allow to reserve some amount of space in the numvnodes

counter, without actually allocating the vnodes. The supposed use of
the getnewvnode_reserve(9) is to reclaim enough free vnodes while the
code still does not hold any resources that might be needed during the
reclamation, and to consume the slack later for getnewvnode() calls
made from the innards. After the critical block is finished, the
caller shall free any reserve left, by getnewvnode_drop_reserve(9).

Reviewed by:	avg
Tested by:	pho
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2012-10-14 19:43:37 +00:00
parent da016e84de
commit 9b233e2307
4 changed files with 78 additions and 25 deletions

View File

@ -154,6 +154,8 @@ userret(struct thread *td, struct trapframe *frame)
("userret: Returning with sleep disabled"));
KASSERT(td->td_pinned == 0,
("userret: Returning with with pinned thread"));
KASSERT(td->td_vp_reserv == 0,
("userret: Returning while holding vnode reservation"));
#ifdef VIMAGE
/* Unfortunately td_vnet_lpush needs VNET_DEBUG. */
VNET_ASSERT(curvnet == NULL,

View File

@ -935,34 +935,22 @@ vtryrecycle(struct vnode *vp)
}
/*
* Return the next vnode from the free list.
* Wait for available vnodes.
*/
int
getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
struct vnode **vpp)
static int
getnewvnode_wait(int suspended)
{
struct vnode *vp = NULL;
struct bufobj *bo;
CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
mtx_lock(&vnode_free_list_mtx);
/*
* Lend our context to reclaim vnodes if they've exceeded the max.
*/
if (freevnodes > wantfreevnodes)
vnlru_free(1);
/*
* Wait for available vnodes.
*/
mtx_assert(&vnode_free_list_mtx, MA_OWNED);
if (numvnodes > desiredvnodes) {
if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) {
if (suspended) {
/*
* File system is beeing suspended, we cannot risk a
* deadlock here, so allocate new vnode anyway.
*/
if (freevnodes > wantfreevnodes)
vnlru_free(freevnodes - wantfreevnodes);
goto alloc;
return (0);
}
if (vnlruproc_sig == 0) {
vnlruproc_sig = 1; /* avoid unnecessary wakeups */
@ -970,16 +958,76 @@ getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
}
msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
"vlruwk", hz);
#if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
if (numvnodes > desiredvnodes) {
mtx_unlock(&vnode_free_list_mtx);
return (ENFILE);
}
#endif
}
alloc:
return (numvnodes > desiredvnodes ? ENFILE : 0);
}
void
getnewvnode_reserve(u_int count)
{
struct thread *td;
td = curthread;
mtx_lock(&vnode_free_list_mtx);
while (count > 0) {
if (getnewvnode_wait(0) == 0) {
count--;
td->td_vp_reserv++;
numvnodes++;
}
}
mtx_unlock(&vnode_free_list_mtx);
}
void
getnewvnode_drop_reserve(void)
{
struct thread *td;
td = curthread;
mtx_lock(&vnode_free_list_mtx);
KASSERT(numvnodes >= td->td_vp_reserv, ("reserve too large"));
numvnodes -= td->td_vp_reserv;
mtx_unlock(&vnode_free_list_mtx);
td->td_vp_reserv = 0;
}
/*
* Return the next vnode from the free list.
*/
int
getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
struct vnode **vpp)
{
struct vnode *vp;
struct bufobj *bo;
struct thread *td;
int error;
CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
vp = NULL;
td = curthread;
if (td->td_vp_reserv > 0) {
td->td_vp_reserv -= 1;
goto alloc;
}
mtx_lock(&vnode_free_list_mtx);
/*
* Lend our context to reclaim vnodes if they've exceeded the max.
*/
if (freevnodes > wantfreevnodes)
vnlru_free(1);
error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag &
MNTK_SUSPEND));
#if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
if (error != 0) {
mtx_unlock(&vnode_free_list_mtx);
return (error);
}
#endif
numvnodes++;
mtx_unlock(&vnode_free_list_mtx);
alloc:
vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
/*
* Setup locks.

View File

@ -272,6 +272,7 @@ struct thread {
struct osd td_osd; /* (k) Object specific data. */
struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */
pid_t td_dbg_forked; /* (c) Child pid for debugger. */
u_int td_vp_reserv; /* (k) Count of reserved vnodes. */
#define td_endzero td_sigmask
/* Copied during fork1() or create_thread(). */

View File

@ -600,6 +600,8 @@ void cvtstat(struct stat *st, struct ostat *ost);
void cvtnstat(struct stat *sb, struct nstat *nsb);
int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
struct vnode **vpp);
void getnewvnode_reserve(u_int count);
void getnewvnode_drop_reserve(void);
int insmntque1(struct vnode *vp, struct mount *mp,
void (*dtr)(struct vnode *, void *), void *dtr_arg);
int insmntque(struct vnode *vp, struct mount *mp);