By doing file extension fast, it is possible to create excess supply

of the D_NEWBLK kinds of dependencies (i.e. D_ALLOCDIRECT and
D_ALLOCINDIR), which can exhaust kmem.

Handle excess of D_NEWBLK in the same way as excess of D_INODEDEP and
D_DIRREM, by scheduling ast to flush dependencies, after the thread,
which created new dep, left the VFS/FFS innards.  For D_NEWBLK, the
only way to get rid of them is to do full sync, since items are
attached to data blocks of arbitrary vnodes.  The check for D_NEWBLK
excess in softdep_ast_cleanup_proc() is unlocked.

For 32bit arches, reduce the total amount of allowed dependencies by
two.  It could be considered increasing the limit for 64 bit platforms
with direct maps.

Reported and tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2015-09-01 13:07:27 +00:00
parent 3cc31a047e
commit 76cae067b8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=287361

View File

@ -923,8 +923,7 @@ static int journal_unsuspend(struct ufsmount *ump);
static void softdep_prelink(struct vnode *, struct vnode *);
static void add_to_journal(struct worklist *);
static void remove_from_journal(struct worklist *);
static bool softdep_excess_inodes(struct ufsmount *);
static bool softdep_excess_dirrem(struct ufsmount *);
static bool softdep_excess_items(struct ufsmount *, int);
static void softdep_process_journal(struct mount *, struct worklist *, int);
static struct jremref *newjremref(struct dirrem *, struct inode *,
struct inode *ip, off_t, nlink_t);
@ -2212,7 +2211,7 @@ inodedep_lookup(mp, inum, flags, inodedeppp)
* responsible for more than our share of that usage and
* we are not in a rush, request some inodedep cleanup.
*/
if (softdep_excess_inodes(ump))
if (softdep_excess_items(ump, D_INODEDEP))
schedule_cleanup(mp);
else
FREE_LOCK(ump);
@ -2307,7 +2306,12 @@ newblk_lookup(mp, newblkno, flags, newblkpp)
return (1);
if ((flags & DEPALLOC) == 0)
return (0);
FREE_LOCK(ump);
if (softdep_excess_items(ump, D_NEWBLK) ||
softdep_excess_items(ump, D_ALLOCDIRECT) ||
softdep_excess_items(ump, D_ALLOCINDIR))
schedule_cleanup(mp);
else
FREE_LOCK(ump);
newblk = malloc(sizeof(union allblk), M_NEWBLK,
M_SOFTDEP_FLAGS | M_ZERO);
workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
@ -2406,7 +2410,11 @@ softdep_initialize()
{
TAILQ_INIT(&softdepmounts);
#ifdef __LP64__
max_softdeps = desiredvnodes * 4;
#else
max_softdeps = desiredvnodes * 2;
#endif
/* initialise bioops hack */
bioops.io_start = softdep_disk_io_initiation;
@ -9106,7 +9114,7 @@ newdirrem(bp, dp, ip, isrmdir, prevdirremp)
* the number of freefile and freeblks structures.
*/
ACQUIRE_LOCK(ip->i_ump);
if (!IS_SNAPSHOT(ip) && softdep_excess_dirrem(ip->i_ump))
if (!IS_SNAPSHOT(ip) && softdep_excess_items(ip->i_ump, D_DIRREM))
schedule_cleanup(ITOV(dp)->v_mount);
else
FREE_LOCK(ip->i_ump);
@ -13244,20 +13252,12 @@ softdep_request_cleanup(fs, vp, cred, resource)
}
static bool
softdep_excess_inodes(struct ufsmount *ump)
softdep_excess_items(struct ufsmount *ump, int item)
{
return (dep_current[D_INODEDEP] > max_softdeps &&
ump->softdep_curdeps[D_INODEDEP] > max_softdeps /
stat_flush_threads);
}
static bool
softdep_excess_dirrem(struct ufsmount *ump)
{
return (dep_current[D_DIRREM] > max_softdeps / 2 &&
ump->softdep_curdeps[D_DIRREM] > (max_softdeps / 2) /
KASSERT(item >= 0 && item < D_LAST, ("item %d", item));
return (dep_current[item] > max_softdeps &&
ump->softdep_curdeps[item] > max_softdeps /
stat_flush_threads);
}
@ -13313,15 +13313,21 @@ softdep_ast_cleanup_proc(void)
for (;;) {
req = false;
ACQUIRE_LOCK(ump);
if (softdep_excess_inodes(ump)) {
if (softdep_excess_items(ump, D_INODEDEP)) {
req = true;
request_cleanup(mp, FLUSH_INODES);
}
if (softdep_excess_dirrem(ump)) {
if (softdep_excess_items(ump, D_DIRREM)) {
req = true;
request_cleanup(mp, FLUSH_BLOCKS);
}
FREE_LOCK(ump);
if (softdep_excess_items(ump, D_NEWBLK) ||
softdep_excess_items(ump, D_ALLOCDIRECT) ||
softdep_excess_items(ump, D_ALLOCINDIR)) {
req = true;
VFS_SYNC(mp, MNT_WAIT);
}
if ((td->td_pflags & TDP_KTHREAD) != 0 || !req)
break;
}