cache: remove incomplete lockless lockout support during resize

This is already properly handled thanks to 2 step hash replacement.
This commit is contained in:
Mateusz Guzik 2021-04-10 19:20:28 +02:00
parent 164d49a584
commit 074abaccfa

View File

@ -580,9 +580,6 @@ static long cache_lock_vnodes_cel_3_failures;
DEBUGNODE_ULONG(vnodes_cel_3_failures, cache_lock_vnodes_cel_3_failures,
"Number of times 3-way vnode locking failed");
static void cache_fplookup_lockout(void);
static void cache_fplookup_restore(void);
static void cache_zap_locked(struct namecache *ncp);
static int vn_fullpath_hardlink(struct nameidata *ndp, char **retbuf,
char **freebuf, size_t *buflen);
@ -2771,7 +2768,6 @@ cache_changesize(u_long newmaxvnodes)
* None of the namecache entries in the table can be removed
* because to do so, they have to be removed from the hash table.
*/
cache_fplookup_lockout();
cache_lock_all_vnodes();
cache_lock_all_buckets();
old_nchashtbl = nchashtbl;
@ -2790,7 +2786,6 @@ cache_changesize(u_long newmaxvnodes)
cache_changesize_set_new(new_nchashtbl, new_nchash);
cache_unlock_all_buckets();
cache_unlock_all_vnodes();
cache_fplookup_restore();
ncfreetbl(old_nchashtbl);
ncfreetbl(temptbl);
}
@ -3865,33 +3860,6 @@ syscal_vfs_cache_fast_lookup(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_vfs, OID_AUTO, cache_fast_lookup, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE,
&cache_fast_lookup, 0, syscal_vfs_cache_fast_lookup, "IU", "");
/*
* Disable lockless lookup and observe all CPUs not executing it.
*
* Used when resizing the hash table.
*
* TODO: no provisions are made to handle tweaking of the knob at the same time
*/
static void
cache_fplookup_lockout(void)
{
bool on;
on = atomic_load_char(&cache_fast_lookup_enabled);
if (on) {
atomic_store_char(&cache_fast_lookup_enabled, false);
atomic_thread_fence_rel();
vfs_smr_synchronize();
}
}
static void
cache_fplookup_restore(void)
{
cache_fast_lookup_enabled_recalc();
}
/*
* Components of nameidata (or objects it can point to) which may
* need restoring in case fast path lookup fails.