2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1997-02-10 02:22:35 +00:00
|
|
|
* Copyright (c) 1989, 1993, 1995
|
1994-05-24 10:09:53 +00:00
|
|
|
* The Regents of the University of California. All rights reserved.
|
1997-02-10 02:22:35 +00:00
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* Poul-Henning Kamp of the FreeBSD Project.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2016-09-15 13:16:20 +00:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-05-24 10:09:53 +00:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1997-03-08 15:22:14 +00:00
|
|
|
* @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2009-03-20 10:47:16 +00:00
|
|
|
#include "opt_ktrace.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
2011-12-12 10:05:13 +00:00
|
|
|
#include <sys/systm.h>
|
2016-01-21 01:04:03 +00:00
|
|
|
#include <sys/counter.h>
|
2008-09-18 20:04:22 +00:00
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/fnv_hash.h>
|
1995-12-14 09:55:16 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/lock.h>
|
2008-09-18 20:04:22 +00:00
|
|
|
#include <sys/malloc.h>
|
2012-01-15 12:08:20 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/namei.h>
|
2008-09-18 20:04:22 +00:00
|
|
|
#include <sys/proc.h>
|
2009-01-28 19:05:18 +00:00
|
|
|
#include <sys/rwlock.h>
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
#include <sys/sdt.h>
|
2016-09-10 16:29:53 +00:00
|
|
|
#include <sys/smp.h>
|
2002-09-02 22:40:30 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
2008-09-18 20:04:22 +00:00
|
|
|
#include <sys/sysctl.h>
|
1999-10-03 12:18:29 +00:00
|
|
|
#include <sys/sysproto.h>
|
2008-09-18 20:04:22 +00:00
|
|
|
#include <sys/vnode.h>
|
2009-03-20 10:47:16 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-06-13 08:46:13 +00:00
|
|
|
#include <vm/uma.h>
|
|
|
|
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
SDT_PROVIDER_DECLARE(vfs);
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
"struct vnode *");
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
"char *");
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
|
|
|
|
SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
|
2013-07-09 08:58:34 +00:00
|
|
|
"char *", "struct vnode *");
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
|
|
|
|
SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int",
|
2013-07-09 08:58:34 +00:00
|
|
|
"struct vnode *", "char *");
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
"struct vnode *");
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative,
|
2010-08-22 11:18:57 +00:00
|
|
|
"struct vnode *", "char *");
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
"char *");
|
2013-11-26 08:46:27 +00:00
|
|
|
SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
|
|
|
|
SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
|
|
|
|
SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
|
|
|
|
SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
"struct vnode *");
|
2016-10-19 18:29:52 +00:00
|
|
|
SDT_PROBE_DEFINE3(vfs, namecache, zap_negative, done, "struct vnode *",
|
|
|
|
"char *", "int");
|
|
|
|
SDT_PROBE_DEFINE3(vfs, namecache, shrink_negative, done, "struct vnode *",
|
|
|
|
"char *", "int");
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
|
2000-04-26 11:57:45 +00:00
|
|
|
/*
|
|
|
|
* This structure describes the elements in the cache of recent
|
|
|
|
* names looked up by namei.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct namecache {
|
2000-05-26 02:09:24 +00:00
|
|
|
LIST_ENTRY(namecache) nc_hash; /* hash chain */
|
|
|
|
LIST_ENTRY(namecache) nc_src; /* source vnode list */
|
|
|
|
TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
|
2000-04-26 11:57:45 +00:00
|
|
|
struct vnode *nc_dvp; /* vnode of parent of name */
|
2016-10-19 18:29:52 +00:00
|
|
|
union {
|
|
|
|
struct vnode *nu_vp; /* vnode the name refers to */
|
|
|
|
u_int nu_neghits; /* negative entry hits */
|
|
|
|
} n_un;
|
2000-04-26 11:57:45 +00:00
|
|
|
u_char nc_flag; /* flag bits */
|
|
|
|
u_char nc_nlen; /* length of name */
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
char nc_name[0]; /* segment name + nul */
|
2000-04-26 11:57:45 +00:00
|
|
|
};
|
|
|
|
|
2012-01-22 01:11:06 +00:00
|
|
|
/*
|
|
|
|
* struct namecache_ts repeats struct namecache layout up to the
|
|
|
|
* nc_nlen member.
|
2012-03-03 01:06:54 +00:00
|
|
|
* struct namecache_ts is used in place of struct namecache when time(s) need
|
|
|
|
* to be stored. The nc_dotdottime field is used when a cache entry is mapping
|
|
|
|
* both a non-dotdot directory name plus dotdot for the directory's
|
|
|
|
* parent.
|
2012-01-22 01:11:06 +00:00
|
|
|
*/
|
|
|
|
struct namecache_ts {
|
|
|
|
struct timespec nc_time; /* timespec provided by fs */
|
2012-03-03 01:06:54 +00:00
|
|
|
struct timespec nc_dotdottime; /* dotdot timespec provided by fs */
|
2012-01-22 01:11:06 +00:00
|
|
|
int nc_ticks; /* ticks value when entry was added */
|
2017-09-10 11:17:32 +00:00
|
|
|
struct namecache nc_nc;
|
2012-01-22 01:11:06 +00:00
|
|
|
};
|
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
#define nc_vp n_un.nu_vp
|
|
|
|
#define nc_neghits n_un.nu_neghits
|
|
|
|
|
2012-01-22 01:11:06 +00:00
|
|
|
/*
|
|
|
|
* Flags in namecache.nc_flag
|
|
|
|
*/
|
|
|
|
#define NCF_WHITE 0x01
|
|
|
|
#define NCF_ISDOTDOT 0x02
|
|
|
|
#define NCF_TS 0x04
|
2012-03-03 01:06:54 +00:00
|
|
|
#define NCF_DTS 0x08
|
2016-09-04 16:52:14 +00:00
|
|
|
#define NCF_DVDROP 0x10
|
2016-10-19 18:29:52 +00:00
|
|
|
#define NCF_NEGATIVE 0x20
|
|
|
|
#define NCF_HOTNEGATIVE 0x40
|
2012-01-22 01:11:06 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Name caching works as follows:
|
|
|
|
*
|
|
|
|
* Names found by directory scans are retained in a cache
|
|
|
|
* for future reference. It is managed LRU, so frequently
|
|
|
|
* used names will hang around. Cache is indexed by hash value
|
|
|
|
* obtained from (vp, name) where vp refers to the directory
|
|
|
|
* containing name.
|
|
|
|
*
|
1997-02-10 02:22:35 +00:00
|
|
|
* If it is a "negative" entry, (i.e. for a name that is known NOT to
|
|
|
|
* exist) the vnode pointer will be NULL.
|
1995-03-09 20:23:45 +00:00
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Upon reaching the last segment of a path, if the reference
|
|
|
|
* is for DELETE, or NOCACHE is set (rewrite), and the
|
|
|
|
* name is located in the cache, it will be dropped.
|
2016-09-10 16:29:53 +00:00
|
|
|
*
|
|
|
|
* These locks are used (in the order in which they can be taken):
|
2016-09-23 04:45:11 +00:00
|
|
|
* NAME TYPE ROLE
|
|
|
|
* vnodelock mtx vnode lists and v_cache_dd field protection
|
|
|
|
* bucketlock rwlock for access to given set of hash buckets
|
2016-10-19 18:29:52 +00:00
|
|
|
* neglist mtx negative entry LRU management
|
2016-09-10 16:29:53 +00:00
|
|
|
*
|
2016-09-23 04:45:11 +00:00
|
|
|
* Additionally, ncneg_shrink_lock mtx is used to have at most one thread
|
|
|
|
* shrinking the LRU list.
|
2016-09-10 16:29:53 +00:00
|
|
|
*
|
2016-09-23 04:45:11 +00:00
|
|
|
* It is legal to take multiple vnodelock and bucketlock locks. The locking
|
|
|
|
* order is lower address first. Both are recursive.
|
2016-09-10 16:29:53 +00:00
|
|
|
*
|
2016-09-23 04:45:11 +00:00
|
|
|
* "." lookups are lockless.
|
2016-09-10 16:29:53 +00:00
|
|
|
*
|
2016-09-23 04:45:11 +00:00
|
|
|
* ".." and vnode -> name lookups require vnodelock.
|
|
|
|
*
|
|
|
|
* name -> vnode lookup requires the relevant bucketlock to be held for reading.
|
|
|
|
*
|
|
|
|
* Insertions and removals of entries require involved vnodes and bucketlocks
|
|
|
|
* to be write-locked to prevent other threads from seeing the entry.
|
|
|
|
*
|
|
|
|
* Some lookups result in removal of the found entry (e.g. getting rid of a
|
|
|
|
* negative entry with the intent to create a positive one), which poses a
|
|
|
|
* problem when multiple threads reach the state. Similarly, two different
|
|
|
|
* threads can purge two different vnodes and try to remove the same name.
|
|
|
|
*
|
|
|
|
* If the already held vnode lock is lower than the second required lock, we
|
|
|
|
* can just take the other lock. However, in the opposite case, this could
|
|
|
|
* deadlock. As such, this is resolved by trylocking and if that fails unlocking
|
|
|
|
* the first node, locking everything in order and revalidating the state.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2016-04-29 22:15:33 +00:00
|
|
|
* Structures associated with name caching.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-03-20 02:10:18 +00:00
|
|
|
#define NCHHASH(hash) \
|
|
|
|
(&nchashtbl[(hash) & nchash])
|
2017-01-27 14:56:36 +00:00
|
|
|
static __read_mostly LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
|
|
|
|
static u_long __read_mostly nchash; /* size of hash table */
|
2010-10-16 09:44:31 +00:00
|
|
|
SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
|
|
|
|
"Size of namecache hash table");
|
2017-11-01 06:45:41 +00:00
|
|
|
static u_long __read_mostly ncnegfactor = 12; /* ratio of negative entries */
|
2010-10-16 09:44:31 +00:00
|
|
|
SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
|
|
|
|
"Ratio of negative namecache entries");
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */
|
2010-10-16 09:44:31 +00:00
|
|
|
SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
|
|
|
|
"Number of negative entries in namecache");
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */
|
2010-10-16 09:44:31 +00:00
|
|
|
SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
|
|
|
|
"Number of namecache entries");
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_long __exclusive_cache_line numcachehv;/* number of cache entries with vnodes held */
|
2010-10-16 09:44:31 +00:00
|
|
|
SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
|
|
|
|
"Number of namecache entries with vnodes held");
|
2017-01-27 14:56:36 +00:00
|
|
|
u_int __read_mostly ncsizefactor = 2;
|
2010-10-16 09:44:31 +00:00
|
|
|
SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
|
|
|
|
"Size factor for namecache");
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_int __read_mostly ncpurgeminvnodes;
|
2016-10-03 00:02:32 +00:00
|
|
|
SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0,
|
|
|
|
"Number of vnodes below which purgevfs ignores the request");
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_int __read_mostly ncneghitsrequeue = 8;
|
2016-10-19 18:29:52 +00:00
|
|
|
SYSCTL_UINT(_vfs, OID_AUTO, ncneghitsrequeue, CTLFLAG_RW, &ncneghitsrequeue, 0,
|
|
|
|
"Number of hits to requeue a negative entry in the LRU list");
|
2010-10-16 09:44:31 +00:00
|
|
|
|
|
|
|
struct nchstats nchstats; /* cache effectiveness statistics */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
static struct mtx ncneg_shrink_lock;
|
2017-01-27 14:56:36 +00:00
|
|
|
static int shrink_list_turn;
|
2003-10-05 07:13:50 +00:00
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
struct neglist {
|
|
|
|
struct mtx nl_lock;
|
|
|
|
TAILQ_HEAD(, namecache) nl_list;
|
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
2017-01-27 14:56:36 +00:00
|
|
|
static struct neglist __read_mostly *neglists;
|
2016-10-19 18:29:52 +00:00
|
|
|
static struct neglist ncneg_hot;
|
|
|
|
|
2016-12-29 08:41:25 +00:00
|
|
|
#define numneglists (ncneghash + 1)
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_int __read_mostly ncneghash;
|
2016-10-19 18:29:52 +00:00
|
|
|
static inline struct neglist *
|
|
|
|
NCP2NEGLIST(struct namecache *ncp)
|
|
|
|
{
|
|
|
|
|
2016-12-29 08:41:25 +00:00
|
|
|
return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
|
2016-10-19 18:29:52 +00:00
|
|
|
}
|
2016-09-10 16:29:53 +00:00
|
|
|
|
2016-12-29 08:41:25 +00:00
|
|
|
#define numbucketlocks (ncbuckethash + 1)
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_int __read_mostly ncbuckethash;
|
|
|
|
static struct rwlock_padalign __read_mostly *bucketlocks;
|
2016-09-10 16:29:53 +00:00
|
|
|
#define HASH2BUCKETLOCK(hash) \
|
2016-12-29 08:41:25 +00:00
|
|
|
((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
|
2016-09-04 08:58:35 +00:00
|
|
|
|
2016-12-29 08:41:25 +00:00
|
|
|
#define numvnodelocks (ncvnodehash + 1)
|
2017-01-27 14:56:36 +00:00
|
|
|
static u_int __read_mostly ncvnodehash;
|
|
|
|
static struct mtx __read_mostly *vnodelocks;
|
2016-09-23 04:45:11 +00:00
|
|
|
static inline struct mtx *
|
|
|
|
VP2VNODELOCK(struct vnode *vp)
|
|
|
|
{
|
|
|
|
|
2016-12-29 08:41:25 +00:00
|
|
|
return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
|
2016-09-23 04:45:11 +00:00
|
|
|
}
|
|
|
|
|
2003-06-13 08:46:13 +00:00
|
|
|
/*
|
|
|
|
* UMA zones for the VFS cache.
|
|
|
|
*
|
|
|
|
* The small cache is used for entries with short names, which are the
|
|
|
|
* most common. The large cache is used for entries which are too big to
|
|
|
|
* fit in the small cache.
|
|
|
|
*/
|
2017-01-27 14:56:36 +00:00
|
|
|
static uma_zone_t __read_mostly cache_zone_small;
|
|
|
|
static uma_zone_t __read_mostly cache_zone_small_ts;
|
|
|
|
static uma_zone_t __read_mostly cache_zone_large;
|
|
|
|
static uma_zone_t __read_mostly cache_zone_large_ts;
|
2003-06-13 08:46:13 +00:00
|
|
|
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
#define CACHE_PATH_CUTOFF 35
|
2012-01-22 01:11:06 +00:00
|
|
|
|
|
|
|
static struct namecache *
|
|
|
|
cache_alloc(int len, int ts)
|
|
|
|
{
|
2017-09-10 11:17:32 +00:00
|
|
|
struct namecache_ts *ncp_ts;
|
|
|
|
struct namecache *ncp;
|
2012-01-22 01:11:06 +00:00
|
|
|
|
2017-09-10 11:17:32 +00:00
|
|
|
if (__predict_false(ts)) {
|
|
|
|
if (len <= CACHE_PATH_CUTOFF)
|
|
|
|
ncp_ts = uma_zalloc(cache_zone_small_ts, M_WAITOK);
|
|
|
|
else
|
|
|
|
ncp_ts = uma_zalloc(cache_zone_large_ts, M_WAITOK);
|
|
|
|
ncp = &ncp_ts->nc_nc;
|
|
|
|
} else {
|
|
|
|
if (len <= CACHE_PATH_CUTOFF)
|
|
|
|
ncp = uma_zalloc(cache_zone_small, M_WAITOK);
|
2012-03-03 01:06:54 +00:00
|
|
|
else
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp = uma_zalloc(cache_zone_large, M_WAITOK);
|
2012-03-03 01:06:54 +00:00
|
|
|
}
|
2017-09-10 11:17:32 +00:00
|
|
|
return (ncp);
|
2012-01-22 01:11:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_free(struct namecache *ncp)
|
|
|
|
{
|
2017-09-10 11:17:32 +00:00
|
|
|
struct namecache_ts *ncp_ts;
|
2012-01-22 01:11:06 +00:00
|
|
|
|
|
|
|
if (ncp == NULL)
|
|
|
|
return;
|
2016-09-04 16:52:14 +00:00
|
|
|
if ((ncp->nc_flag & NCF_DVDROP) != 0)
|
|
|
|
vdrop(ncp->nc_dvp);
|
2017-09-10 11:17:32 +00:00
|
|
|
if (__predict_false(ncp->nc_flag & NCF_TS)) {
|
|
|
|
ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
|
|
|
|
if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
|
|
|
|
uma_zfree(cache_zone_small_ts, ncp_ts);
|
2012-01-22 01:11:06 +00:00
|
|
|
else
|
2017-09-10 11:17:32 +00:00
|
|
|
uma_zfree(cache_zone_large_ts, ncp_ts);
|
|
|
|
} else {
|
|
|
|
if (ncp->nc_nlen <= CACHE_PATH_CUTOFF)
|
2012-01-22 01:11:06 +00:00
|
|
|
uma_zfree(cache_zone_small, ncp);
|
2017-09-10 11:17:32 +00:00
|
|
|
else
|
|
|
|
uma_zfree(cache_zone_large, ncp);
|
|
|
|
}
|
2012-01-22 01:11:06 +00:00
|
|
|
}
|
2003-06-13 08:46:13 +00:00
|
|
|
|
2012-01-23 17:09:23 +00:00
|
|
|
static void
|
|
|
|
cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp)
|
|
|
|
{
|
2017-09-10 11:17:32 +00:00
|
|
|
struct namecache_ts *ncp_ts;
|
2012-01-23 17:09:23 +00:00
|
|
|
|
2012-01-25 20:48:20 +00:00
|
|
|
KASSERT((ncp->nc_flag & NCF_TS) != 0 ||
|
|
|
|
(tsp == NULL && ticksp == NULL),
|
|
|
|
("No NCF_TS"));
|
2012-01-23 17:09:23 +00:00
|
|
|
|
2017-09-10 11:17:32 +00:00
|
|
|
if (tsp == NULL && ticksp == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
|
2012-01-23 17:09:23 +00:00
|
|
|
if (tsp != NULL)
|
2017-09-10 11:17:32 +00:00
|
|
|
*tsp = ncp_ts->nc_time;
|
2012-01-23 17:09:23 +00:00
|
|
|
if (ticksp != NULL)
|
2017-09-10 11:17:32 +00:00
|
|
|
*ticksp = ncp_ts->nc_ticks;
|
2012-01-23 17:09:23 +00:00
|
|
|
}
|
|
|
|
|
2017-01-27 14:56:36 +00:00
|
|
|
static int __read_mostly doingcache = 1; /* 1 => enable the cache */
|
2010-10-16 09:44:31 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
|
|
|
|
"VFS namecache enabled");
|
2002-03-05 15:38:49 +00:00
|
|
|
|
|
|
|
/* Export size information to userland */
|
2014-10-21 07:31:21 +00:00
|
|
|
SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
|
2010-11-14 16:10:15 +00:00
|
|
|
sizeof(struct namecache), "sizeof(struct namecache)");
|
1997-03-08 15:22:14 +00:00
|
|
|
|
1997-09-24 07:46:54 +00:00
|
|
|
/*
|
|
|
|
* The new name cache statistics
|
|
|
|
*/
|
2010-11-14 08:06:29 +00:00
|
|
|
static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
|
2010-11-14 16:10:15 +00:00
|
|
|
"Name cache statistics");
|
2016-01-21 01:04:03 +00:00
|
|
|
#define STATNODE_ULONG(name, descr) \
|
|
|
|
SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
|
|
|
|
#define STATNODE_COUNTER(name, descr) \
|
2017-01-27 14:56:36 +00:00
|
|
|
static counter_u64_t __read_mostly name; \
|
2016-01-21 01:04:03 +00:00
|
|
|
SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr);
|
|
|
|
STATNODE_ULONG(numneg, "Number of negative cache entries");
|
|
|
|
STATNODE_ULONG(numcache, "Number of cache entries");
|
|
|
|
STATNODE_COUNTER(numcalls, "Number of cache lookups");
|
|
|
|
STATNODE_COUNTER(dothits, "Number of '.' hits");
|
|
|
|
STATNODE_COUNTER(dotdothits, "Number of '..' hits");
|
|
|
|
STATNODE_COUNTER(numchecks, "Number of checks in lookup");
|
|
|
|
STATNODE_COUNTER(nummiss, "Number of cache misses");
|
|
|
|
STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
|
|
|
|
STATNODE_COUNTER(numposzaps,
|
2010-11-14 16:10:15 +00:00
|
|
|
"Number of cache hits (positive) we do not want to cache");
|
2016-01-21 01:04:03 +00:00
|
|
|
STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
|
|
|
|
STATNODE_COUNTER(numnegzaps,
|
2010-11-14 16:10:15 +00:00
|
|
|
"Number of cache hits (negative) we do not want to cache");
|
2016-01-21 01:04:03 +00:00
|
|
|
STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
|
|
|
|
/* These count for kern___getcwd(), too. */
|
|
|
|
STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
|
|
|
|
STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
|
|
|
|
STATNODE_COUNTER(numfullpathfail2,
|
|
|
|
"Number of fullpath search errors (VOP_VPTOCNP failures)");
|
|
|
|
STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
|
|
|
|
STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
|
2016-09-10 16:29:53 +00:00
|
|
|
static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail,
|
2016-09-23 04:45:11 +00:00
|
|
|
"Number of times zap_and_exit failed to lock");
|
|
|
|
static long cache_lock_vnodes_cel_3_failures;
|
|
|
|
STATNODE_ULONG(cache_lock_vnodes_cel_3_failures,
|
|
|
|
"Number of times 3-way vnode locking failed");
|
1997-09-24 07:46:54 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
static void cache_zap_locked(struct namecache *ncp, bool neg_locked);
|
2005-03-30 02:59:32 +00:00
|
|
|
static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
|
|
|
char *buf, char **retbuf, u_int buflen);
|
1995-03-09 20:23:45 +00:00
|
|
|
|
2000-12-08 20:09:00 +00:00
|
|
|
static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
|
1999-10-03 12:18:29 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
static int cache_yield;
|
|
|
|
SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0,
|
|
|
|
"Number of times cache called yield");
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_maybe_yield(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (should_yield()) {
|
|
|
|
cache_yield++;
|
|
|
|
kern_yield(PRI_USER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
cache_assert_vlp_locked(struct mtx *vlp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (vlp != NULL)
|
|
|
|
mtx_assert(vlp, MA_OWNED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
cache_assert_vnode_locked(struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct mtx *vlp;
|
|
|
|
|
|
|
|
vlp = VP2VNODELOCK(vp);
|
|
|
|
cache_assert_vlp_locked(vlp);
|
|
|
|
}
|
|
|
|
|
2016-01-21 01:05:41 +00:00
|
|
|
static uint32_t
|
|
|
|
cache_get_hash(char *name, u_char len, struct vnode *dvp)
|
|
|
|
{
|
|
|
|
uint32_t hash;
|
|
|
|
|
|
|
|
hash = fnv_32_buf(name, len, FNV1_32_INIT);
|
|
|
|
hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
|
|
|
|
return (hash);
|
|
|
|
}
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
static inline struct rwlock *
|
|
|
|
NCP2BUCKETLOCK(struct namecache *ncp)
|
|
|
|
{
|
|
|
|
uint32_t hash;
|
|
|
|
|
2017-09-10 11:17:32 +00:00
|
|
|
hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp);
|
2016-09-23 04:45:11 +00:00
|
|
|
return (HASH2BUCKETLOCK(hash));
|
|
|
|
}
|
|
|
|
|
2016-09-10 16:29:53 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
static void
|
|
|
|
cache_assert_bucket_locked(struct namecache *ncp, int mode)
|
|
|
|
{
|
2016-09-23 04:45:11 +00:00
|
|
|
struct rwlock *blp;
|
2016-09-10 16:29:53 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
blp = NCP2BUCKETLOCK(ncp);
|
|
|
|
rw_assert(blp, mode);
|
2016-09-10 16:29:53 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define cache_assert_bucket_locked(x, y) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
#define cache_sort(x, y) _cache_sort((void **)(x), (void **)(y))
|
|
|
|
static void
|
|
|
|
_cache_sort(void **p1, void **p2)
|
|
|
|
{
|
|
|
|
void *tmp;
|
|
|
|
|
|
|
|
if (*p1 > *p2) {
|
|
|
|
tmp = *p2;
|
|
|
|
*p2 = *p1;
|
|
|
|
*p1 = tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-10 16:29:53 +00:00
|
|
|
static void
|
|
|
|
cache_lock_all_buckets(void)
|
|
|
|
{
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
for (i = 0; i < numbucketlocks; i++)
|
|
|
|
rw_wlock(&bucketlocks[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_unlock_all_buckets(void)
|
|
|
|
{
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
for (i = 0; i < numbucketlocks; i++)
|
|
|
|
rw_wunlock(&bucketlocks[i]);
|
|
|
|
}
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
static void
|
|
|
|
cache_lock_all_vnodes(void)
|
|
|
|
{
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
for (i = 0; i < numvnodelocks; i++)
|
|
|
|
mtx_lock(&vnodelocks[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_unlock_all_vnodes(void)
|
|
|
|
{
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
for (i = 0; i < numvnodelocks; i++)
|
|
|
|
mtx_unlock(&vnodelocks[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
|
|
|
|
{
|
|
|
|
|
|
|
|
cache_sort(&vlp1, &vlp2);
|
|
|
|
MPASS(vlp2 != NULL);
|
|
|
|
|
|
|
|
if (vlp1 != NULL) {
|
|
|
|
if (!mtx_trylock(vlp1))
|
|
|
|
return (EAGAIN);
|
|
|
|
}
|
|
|
|
if (!mtx_trylock(vlp2)) {
|
|
|
|
if (vlp1 != NULL)
|
|
|
|
mtx_unlock(vlp1);
|
|
|
|
return (EAGAIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2)
|
|
|
|
{
|
|
|
|
|
|
|
|
MPASS(vlp1 != NULL || vlp2 != NULL);
|
|
|
|
|
|
|
|
if (vlp1 != NULL)
|
|
|
|
mtx_unlock(vlp1);
|
|
|
|
if (vlp2 != NULL)
|
|
|
|
mtx_unlock(vlp2);
|
|
|
|
}
|
|
|
|
|
2016-01-21 01:04:03 +00:00
|
|
|
static int
|
|
|
|
sysctl_nchstats(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct nchstats snap;
|
|
|
|
|
|
|
|
if (req->oldptr == NULL)
|
|
|
|
return (SYSCTL_OUT(req, 0, sizeof(snap)));
|
|
|
|
|
|
|
|
snap = nchstats;
|
|
|
|
snap.ncs_goodhits = counter_u64_fetch(numposhits);
|
|
|
|
snap.ncs_neghits = counter_u64_fetch(numneghits);
|
|
|
|
snap.ncs_badhits = counter_u64_fetch(numposzaps) +
|
|
|
|
counter_u64_fetch(numnegzaps);
|
|
|
|
snap.ncs_miss = counter_u64_fetch(nummisszap) +
|
|
|
|
counter_u64_fetch(nummiss);
|
|
|
|
|
|
|
|
return (SYSCTL_OUT(req, &snap, sizeof(snap)));
|
|
|
|
}
|
|
|
|
SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
|
|
|
|
CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
|
|
|
|
"VFS cache effectiveness statistics");
|
|
|
|
|
2009-03-09 19:04:53 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
2001-04-11 00:39:20 +00:00
|
|
|
/*
|
|
|
|
* Grab an atomic snapshot of the name cache hash chain lengths
|
|
|
|
*/
|
2011-11-07 15:43:11 +00:00
|
|
|
static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL,
|
|
|
|
"hash table stats");
|
2001-04-11 00:39:20 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct nchashhead *ncpp;
|
|
|
|
struct namecache *ncp;
|
2015-04-18 00:59:03 +00:00
|
|
|
int i, error, n_nchash, *cntbuf;
|
2001-04-11 00:39:20 +00:00
|
|
|
|
2015-09-06 05:50:51 +00:00
|
|
|
retry:
|
2001-04-11 00:39:20 +00:00
|
|
|
n_nchash = nchash + 1; /* nchash is max index, not count */
|
2015-04-18 00:59:03 +00:00
|
|
|
if (req->oldptr == NULL)
|
2001-04-11 00:39:20 +00:00
|
|
|
return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
|
2015-04-18 00:59:03 +00:00
|
|
|
cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_lock_all_buckets();
|
2015-09-06 05:50:51 +00:00
|
|
|
if (n_nchash != nchash + 1) {
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_unlock_all_buckets();
|
2015-09-06 05:50:51 +00:00
|
|
|
free(cntbuf, M_TEMP);
|
|
|
|
goto retry;
|
|
|
|
}
|
2015-04-18 00:59:03 +00:00
|
|
|
/* Scan hash tables counting entries */
|
|
|
|
for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
|
|
|
|
LIST_FOREACH(ncp, ncpp, nc_hash)
|
|
|
|
cntbuf[i]++;
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_unlock_all_buckets();
|
2015-04-18 00:59:03 +00:00
|
|
|
for (error = 0, i = 0; i < n_nchash; i++)
|
|
|
|
if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0)
|
|
|
|
break;
|
|
|
|
free(cntbuf, M_TEMP);
|
|
|
|
return (error);
|
2001-04-11 00:39:20 +00:00
|
|
|
}
|
2009-01-23 22:49:23 +00:00
|
|
|
SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
|
2010-11-14 16:10:15 +00:00
|
|
|
CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
|
|
|
|
"nchash chain lengths");
|
2001-04-11 00:39:20 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nchashhead *ncpp;
|
|
|
|
struct namecache *ncp;
|
|
|
|
int n_nchash;
|
|
|
|
int count, maxlength, used, pct;
|
|
|
|
|
|
|
|
if (!req->oldptr)
|
|
|
|
return SYSCTL_OUT(req, 0, 4 * sizeof(int));
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_lock_all_buckets();
|
2001-04-11 00:39:20 +00:00
|
|
|
n_nchash = nchash + 1; /* nchash is max index, not count */
|
|
|
|
used = 0;
|
|
|
|
maxlength = 0;
|
|
|
|
|
|
|
|
/* Scan hash tables for applicable entries */
|
|
|
|
for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
|
|
|
|
count = 0;
|
|
|
|
LIST_FOREACH(ncp, ncpp, nc_hash) {
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
if (count)
|
|
|
|
used++;
|
|
|
|
if (maxlength < count)
|
|
|
|
maxlength = count;
|
|
|
|
}
|
|
|
|
n_nchash = nchash + 1;
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_unlock_all_buckets();
|
2012-02-25 12:06:40 +00:00
|
|
|
pct = (used * 100) / (n_nchash / 100);
|
2002-06-28 23:17:36 +00:00
|
|
|
error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
|
2001-04-11 00:39:20 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-06-28 23:17:36 +00:00
|
|
|
error = SYSCTL_OUT(req, &used, sizeof(used));
|
2001-04-11 00:39:20 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-06-28 23:17:36 +00:00
|
|
|
error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
|
2001-04-11 00:39:20 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-06-28 23:17:36 +00:00
|
|
|
error = SYSCTL_OUT(req, &pct, sizeof(pct));
|
2001-04-11 00:39:20 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
return (0);
|
|
|
|
}
|
2009-01-23 22:49:23 +00:00
|
|
|
SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
|
2010-11-14 16:10:15 +00:00
|
|
|
CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
|
2012-02-25 12:06:40 +00:00
|
|
|
"nchash statistics (number of total/used buckets, maximum chain length, usage percentage)");
|
2009-03-09 19:04:53 +00:00
|
|
|
#endif
|
2001-04-11 00:39:20 +00:00
|
|
|
|
2016-09-04 08:55:15 +00:00
|
|
|
/*
|
|
|
|
* Negative entries management
|
2016-10-19 18:29:52 +00:00
|
|
|
*
|
|
|
|
* A variation of LRU scheme is used. New entries are hashed into one of
|
|
|
|
* numneglists cold lists. Entries get promoted to the hot list on first hit.
|
|
|
|
* Partial LRU for the hot list is maintained by requeueing them every
|
|
|
|
* ncneghitsrequeue hits.
|
|
|
|
*
|
|
|
|
* The shrinker will demote hot list head and evict from the cold list in a
|
|
|
|
* round-robin manner.
|
2016-09-04 08:55:15 +00:00
|
|
|
*/
|
|
|
|
static void
|
2016-09-10 16:29:53 +00:00
|
|
|
cache_negative_hit(struct namecache *ncp)
|
2016-09-04 08:55:15 +00:00
|
|
|
{
|
2016-10-19 18:29:52 +00:00
|
|
|
struct neglist *neglist;
|
|
|
|
u_int hits;
|
2016-09-04 08:55:15 +00:00
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
MPASS(ncp->nc_flag & NCF_NEGATIVE);
|
|
|
|
hits = atomic_fetchadd_int(&ncp->nc_neghits, 1);
|
|
|
|
if (ncp->nc_flag & NCF_HOTNEGATIVE) {
|
|
|
|
if ((hits % ncneghitsrequeue) != 0)
|
|
|
|
return;
|
|
|
|
mtx_lock(&ncneg_hot.nl_lock);
|
|
|
|
if (ncp->nc_flag & NCF_HOTNEGATIVE) {
|
|
|
|
TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
|
|
|
|
TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
|
|
|
|
mtx_unlock(&ncneg_hot.nl_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The shrinker cleared the flag and removed the entry from
|
|
|
|
* the hot list. Put it back.
|
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
mtx_lock(&ncneg_hot.nl_lock);
|
|
|
|
}
|
|
|
|
neglist = NCP2NEGLIST(ncp);
|
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
|
|
|
|
TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
|
|
|
|
TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst);
|
|
|
|
ncp->nc_flag |= NCF_HOTNEGATIVE;
|
|
|
|
}
|
|
|
|
mtx_unlock(&neglist->nl_lock);
|
|
|
|
mtx_unlock(&ncneg_hot.nl_lock);
|
2016-09-04 08:55:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-10-19 18:29:52 +00:00
|
|
|
cache_negative_insert(struct namecache *ncp, bool neg_locked)
|
2016-09-04 08:55:15 +00:00
|
|
|
{
|
2016-10-19 18:29:52 +00:00
|
|
|
struct neglist *neglist;
|
2016-09-04 08:55:15 +00:00
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
MPASS(ncp->nc_flag & NCF_NEGATIVE);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_assert_bucket_locked(ncp, RA_WLOCKED);
|
2016-10-19 18:29:52 +00:00
|
|
|
neglist = NCP2NEGLIST(ncp);
|
|
|
|
if (!neg_locked) {
|
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
} else {
|
|
|
|
mtx_assert(&neglist->nl_lock, MA_OWNED);
|
|
|
|
}
|
|
|
|
TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
|
|
|
|
if (!neg_locked)
|
|
|
|
mtx_unlock(&neglist->nl_lock);
|
|
|
|
atomic_add_rel_long(&numneg, 1);
|
2016-09-04 08:55:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_negative_remove(struct namecache *ncp, bool neg_locked)
|
2016-09-04 08:55:15 +00:00
|
|
|
{
|
2016-10-19 18:29:52 +00:00
|
|
|
struct neglist *neglist;
|
|
|
|
bool hot_locked = false;
|
|
|
|
bool list_locked = false;
|
2016-09-04 08:55:15 +00:00
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
MPASS(ncp->nc_flag & NCF_NEGATIVE);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_assert_bucket_locked(ncp, RA_WLOCKED);
|
2016-10-19 18:29:52 +00:00
|
|
|
neglist = NCP2NEGLIST(ncp);
|
|
|
|
if (!neg_locked) {
|
|
|
|
if (ncp->nc_flag & NCF_HOTNEGATIVE) {
|
|
|
|
hot_locked = true;
|
|
|
|
mtx_lock(&ncneg_hot.nl_lock);
|
|
|
|
if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) {
|
|
|
|
list_locked = true;
|
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
list_locked = true;
|
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ncp->nc_flag & NCF_HOTNEGATIVE) {
|
2017-05-04 03:11:59 +00:00
|
|
|
mtx_assert(&ncneg_hot.nl_lock, MA_OWNED);
|
2016-10-19 18:29:52 +00:00
|
|
|
TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
|
|
|
|
} else {
|
2017-05-04 03:11:59 +00:00
|
|
|
mtx_assert(&neglist->nl_lock, MA_OWNED);
|
2016-10-19 18:29:52 +00:00
|
|
|
TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst);
|
|
|
|
}
|
|
|
|
if (list_locked)
|
|
|
|
mtx_unlock(&neglist->nl_lock);
|
|
|
|
if (hot_locked)
|
|
|
|
mtx_unlock(&ncneg_hot.nl_lock);
|
|
|
|
atomic_subtract_rel_long(&numneg, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_negative_shrink_select(int start, struct namecache **ncpp,
|
|
|
|
struct neglist **neglistpp)
|
|
|
|
{
|
|
|
|
struct neglist *neglist;
|
|
|
|
struct namecache *ncp;
|
|
|
|
int i;
|
|
|
|
|
2016-10-20 19:55:50 +00:00
|
|
|
*ncpp = ncp = NULL;
|
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
for (i = start; i < numneglists; i++) {
|
|
|
|
neglist = &neglists[i];
|
|
|
|
if (TAILQ_FIRST(&neglist->nl_list) == NULL)
|
|
|
|
continue;
|
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
ncp = TAILQ_FIRST(&neglist->nl_list);
|
|
|
|
if (ncp != NULL)
|
|
|
|
break;
|
|
|
|
mtx_unlock(&neglist->nl_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
*neglistpp = neglist;
|
|
|
|
*ncpp = ncp;
|
2016-09-04 08:55:15 +00:00
|
|
|
}
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
static void
|
2016-09-04 08:55:15 +00:00
|
|
|
cache_negative_zap_one(void)
|
|
|
|
{
|
2016-11-15 03:43:10 +00:00
|
|
|
struct namecache *ncp, *ncp2;
|
2016-10-19 18:29:52 +00:00
|
|
|
struct neglist *neglist;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct mtx *dvlp;
|
|
|
|
struct rwlock *blp;
|
|
|
|
|
|
|
|
if (!mtx_trylock(&ncneg_shrink_lock))
|
|
|
|
return;
|
2016-09-04 08:55:15 +00:00
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
mtx_lock(&ncneg_hot.nl_lock);
|
|
|
|
ncp = TAILQ_FIRST(&ncneg_hot.nl_list);
|
|
|
|
if (ncp != NULL) {
|
|
|
|
neglist = NCP2NEGLIST(ncp);
|
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst);
|
|
|
|
TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst);
|
|
|
|
ncp->nc_flag &= ~NCF_HOTNEGATIVE;
|
|
|
|
mtx_unlock(&neglist->nl_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
|
|
|
|
shrink_list_turn++;
|
|
|
|
if (shrink_list_turn == numneglists)
|
|
|
|
shrink_list_turn = 0;
|
|
|
|
if (ncp == NULL && shrink_list_turn == 0)
|
|
|
|
cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist);
|
2016-09-23 04:45:11 +00:00
|
|
|
if (ncp == NULL) {
|
2016-10-19 18:29:52 +00:00
|
|
|
mtx_unlock(&ncneg_hot.nl_lock);
|
2016-09-23 04:45:11 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2016-10-19 18:29:52 +00:00
|
|
|
|
|
|
|
MPASS(ncp->nc_flag & NCF_NEGATIVE);
|
2016-09-23 04:45:11 +00:00
|
|
|
dvlp = VP2VNODELOCK(ncp->nc_dvp);
|
|
|
|
blp = NCP2BUCKETLOCK(ncp);
|
2016-10-19 18:29:52 +00:00
|
|
|
mtx_unlock(&neglist->nl_lock);
|
|
|
|
mtx_unlock(&ncneg_hot.nl_lock);
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_lock(dvlp);
|
|
|
|
rw_wlock(blp);
|
2016-10-19 18:29:52 +00:00
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
ncp2 = TAILQ_FIRST(&neglist->nl_list);
|
2016-09-23 04:45:11 +00:00
|
|
|
if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) ||
|
2016-10-19 18:29:52 +00:00
|
|
|
blp != NCP2BUCKETLOCK(ncp2) || !(ncp2->nc_flag & NCF_NEGATIVE)) {
|
2016-09-23 04:45:11 +00:00
|
|
|
ncp = NULL;
|
|
|
|
goto out_unlock_all;
|
|
|
|
}
|
2016-10-19 18:29:52 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, shrink_negative, done, ncp->nc_dvp,
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp->nc_name, ncp->nc_neghits);
|
2016-10-19 18:29:52 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_zap_locked(ncp, true);
|
|
|
|
out_unlock_all:
|
2016-10-19 18:29:52 +00:00
|
|
|
mtx_unlock(&neglist->nl_lock);
|
2016-09-23 04:45:11 +00:00
|
|
|
rw_wunlock(blp);
|
|
|
|
mtx_unlock(dvlp);
|
|
|
|
out:
|
|
|
|
mtx_unlock(&ncneg_shrink_lock);
|
|
|
|
cache_free(ncp);
|
2016-09-04 08:55:15 +00:00
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
2016-09-23 04:45:11 +00:00
|
|
|
* cache_zap_locked():
|
2003-02-15 18:58:06 +00:00
|
|
|
*
|
|
|
|
* Removes a namecache entry from cache, whether it contains an actual
|
|
|
|
* pointer to a vnode or if it is just a negative cache entry.
|
1997-02-10 02:22:35 +00:00
|
|
|
*/
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
static void
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_zap_locked(struct namecache *ncp, bool neg_locked)
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
{
|
2003-10-05 07:13:50 +00:00
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE))
|
|
|
|
cache_assert_vnode_locked(ncp->nc_vp);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_assert_vnode_locked(ncp->nc_dvp);
|
2016-09-10 16:29:53 +00:00
|
|
|
cache_assert_bucket_locked(ncp, RA_WLOCKED);
|
2016-09-23 04:45:11 +00:00
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp,
|
|
|
|
(ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp);
|
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE)) {
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp->nc_name, ncp->nc_vp);
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
} else {
|
2016-10-19 18:29:52 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, zap_negative, done, ncp->nc_dvp,
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp->nc_name, ncp->nc_neghits);
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
}
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
LIST_REMOVE(ncp, nc_hash);
|
2016-11-15 03:38:05 +00:00
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE)) {
|
|
|
|
TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
|
|
|
|
if (ncp == ncp->nc_vp->v_cache_dd)
|
|
|
|
ncp->nc_vp->v_cache_dd = NULL;
|
|
|
|
} else {
|
|
|
|
cache_negative_remove(ncp, neg_locked);
|
|
|
|
}
|
2009-03-29 21:25:40 +00:00
|
|
|
if (ncp->nc_flag & NCF_ISDOTDOT) {
|
|
|
|
if (ncp == ncp->nc_dvp->v_cache_dd)
|
|
|
|
ncp->nc_dvp->v_cache_dd = NULL;
|
|
|
|
} else {
|
|
|
|
LIST_REMOVE(ncp, nc_src);
|
|
|
|
if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
|
2016-09-04 16:52:14 +00:00
|
|
|
ncp->nc_flag |= NCF_DVDROP;
|
2016-09-23 04:45:11 +00:00
|
|
|
atomic_subtract_rel_long(&numcachehv, 1);
|
2009-03-29 21:25:40 +00:00
|
|
|
}
|
2001-04-18 11:19:50 +00:00
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
atomic_subtract_rel_long(&numcache, 1);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
|
2016-09-10 16:29:53 +00:00
|
|
|
static void
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp)
|
2016-09-10 16:29:53 +00:00
|
|
|
{
|
2016-09-23 04:45:11 +00:00
|
|
|
struct rwlock *blp;
|
2016-09-10 16:29:53 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
MPASS(ncp->nc_dvp == vp);
|
2016-10-19 18:29:52 +00:00
|
|
|
MPASS(ncp->nc_flag & NCF_NEGATIVE);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_assert_vnode_locked(vp);
|
2016-09-10 16:29:53 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
blp = NCP2BUCKETLOCK(ncp);
|
|
|
|
rw_wlock(blp);
|
|
|
|
cache_zap_locked(ncp, false);
|
|
|
|
rw_wunlock(blp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp,
|
|
|
|
struct mtx **vlpp)
|
|
|
|
{
|
|
|
|
struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
|
|
|
|
struct rwlock *blp;
|
|
|
|
|
|
|
|
MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
|
|
|
|
cache_assert_vnode_locked(vp);
|
|
|
|
|
2016-10-19 18:29:52 +00:00
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE) {
|
2016-09-23 04:45:11 +00:00
|
|
|
if (*vlpp != NULL) {
|
|
|
|
mtx_unlock(*vlpp);
|
|
|
|
*vlpp = NULL;
|
|
|
|
}
|
|
|
|
cache_zap_negative_locked_vnode_kl(ncp, vp);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
|
|
|
pvlp = VP2VNODELOCK(vp);
|
|
|
|
blp = NCP2BUCKETLOCK(ncp);
|
|
|
|
vlp1 = VP2VNODELOCK(ncp->nc_dvp);
|
|
|
|
vlp2 = VP2VNODELOCK(ncp->nc_vp);
|
|
|
|
|
|
|
|
if (*vlpp == vlp1 || *vlpp == vlp2) {
|
|
|
|
to_unlock = *vlpp;
|
|
|
|
*vlpp = NULL;
|
|
|
|
} else {
|
|
|
|
if (*vlpp != NULL) {
|
|
|
|
mtx_unlock(*vlpp);
|
|
|
|
*vlpp = NULL;
|
|
|
|
}
|
|
|
|
cache_sort(&vlp1, &vlp2);
|
|
|
|
if (vlp1 == pvlp) {
|
|
|
|
mtx_lock(vlp2);
|
|
|
|
to_unlock = vlp2;
|
|
|
|
} else {
|
|
|
|
if (!mtx_trylock(vlp1))
|
|
|
|
goto out_relock;
|
|
|
|
to_unlock = vlp1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rw_wlock(blp);
|
|
|
|
cache_zap_locked(ncp, false);
|
|
|
|
rw_wunlock(blp);
|
|
|
|
if (to_unlock != NULL)
|
|
|
|
mtx_unlock(to_unlock);
|
|
|
|
return (true);
|
|
|
|
|
|
|
|
out_relock:
|
|
|
|
mtx_unlock(vlp2);
|
|
|
|
mtx_lock(vlp1);
|
|
|
|
mtx_lock(vlp2);
|
|
|
|
MPASS(*vlpp == NULL);
|
|
|
|
*vlpp = vlp1;
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct mtx *pvlp, *vlp1, *vlp2, *to_unlock;
|
|
|
|
struct rwlock *blp;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp);
|
|
|
|
cache_assert_vnode_locked(vp);
|
|
|
|
|
|
|
|
pvlp = VP2VNODELOCK(vp);
|
2016-10-19 18:29:52 +00:00
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE) {
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_zap_negative_locked_vnode_kl(ncp, vp);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
blp = NCP2BUCKETLOCK(ncp);
|
|
|
|
vlp1 = VP2VNODELOCK(ncp->nc_dvp);
|
|
|
|
vlp2 = VP2VNODELOCK(ncp->nc_vp);
|
|
|
|
cache_sort(&vlp1, &vlp2);
|
|
|
|
if (vlp1 == pvlp) {
|
|
|
|
mtx_lock(vlp2);
|
|
|
|
to_unlock = vlp2;
|
|
|
|
} else {
|
|
|
|
if (!mtx_trylock(vlp1)) {
|
|
|
|
error = EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
to_unlock = vlp1;
|
|
|
|
}
|
|
|
|
rw_wlock(blp);
|
|
|
|
cache_zap_locked(ncp, false);
|
|
|
|
rw_wunlock(blp);
|
|
|
|
mtx_unlock(to_unlock);
|
|
|
|
out:
|
|
|
|
mtx_unlock(pvlp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2017-11-05 22:28:39 +00:00
|
|
|
static int
|
|
|
|
cache_zap_wlocked_bucket(struct namecache *ncp, struct rwlock *blp)
|
|
|
|
{
|
|
|
|
struct mtx *dvlp, *vlp;
|
|
|
|
|
|
|
|
cache_assert_bucket_locked(ncp, RA_WLOCKED);
|
|
|
|
|
|
|
|
dvlp = VP2VNODELOCK(ncp->nc_dvp);
|
|
|
|
vlp = NULL;
|
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE))
|
|
|
|
vlp = VP2VNODELOCK(ncp->nc_vp);
|
|
|
|
if (cache_trylock_vnodes(dvlp, vlp) == 0) {
|
|
|
|
cache_zap_locked(ncp, false);
|
|
|
|
rw_wunlock(blp);
|
|
|
|
cache_unlock_vnodes(dvlp, vlp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
rw_wunlock(blp);
|
|
|
|
return (EAGAIN);
|
|
|
|
}
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
static int
|
|
|
|
cache_zap_rlocked_bucket(struct namecache *ncp, struct rwlock *blp)
|
|
|
|
{
|
|
|
|
struct mtx *dvlp, *vlp;
|
|
|
|
|
|
|
|
cache_assert_bucket_locked(ncp, RA_RLOCKED);
|
|
|
|
|
|
|
|
dvlp = VP2VNODELOCK(ncp->nc_dvp);
|
2016-10-19 18:29:52 +00:00
|
|
|
vlp = NULL;
|
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE))
|
|
|
|
vlp = VP2VNODELOCK(ncp->nc_vp);
|
2016-09-23 04:45:11 +00:00
|
|
|
if (cache_trylock_vnodes(dvlp, vlp) == 0) {
|
|
|
|
rw_runlock(blp);
|
|
|
|
rw_wlock(blp);
|
|
|
|
cache_zap_locked(ncp, false);
|
|
|
|
rw_wunlock(blp);
|
|
|
|
cache_unlock_vnodes(dvlp, vlp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
rw_runlock(blp);
|
|
|
|
return (EAGAIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp,
|
|
|
|
struct mtx **vlpp1, struct mtx **vlpp2)
|
|
|
|
{
|
|
|
|
struct mtx *dvlp, *vlp;
|
|
|
|
|
|
|
|
cache_assert_bucket_locked(ncp, RA_WLOCKED);
|
|
|
|
|
|
|
|
dvlp = VP2VNODELOCK(ncp->nc_dvp);
|
2016-10-19 18:29:52 +00:00
|
|
|
vlp = NULL;
|
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE))
|
|
|
|
vlp = VP2VNODELOCK(ncp->nc_vp);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_sort(&dvlp, &vlp);
|
|
|
|
|
|
|
|
if (*vlpp1 == dvlp && *vlpp2 == vlp) {
|
|
|
|
cache_zap_locked(ncp, false);
|
|
|
|
cache_unlock_vnodes(dvlp, vlp);
|
|
|
|
*vlpp1 = NULL;
|
|
|
|
*vlpp2 = NULL;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*vlpp1 != NULL)
|
|
|
|
mtx_unlock(*vlpp1);
|
|
|
|
if (*vlpp2 != NULL)
|
|
|
|
mtx_unlock(*vlpp2);
|
|
|
|
*vlpp1 = NULL;
|
|
|
|
*vlpp2 = NULL;
|
|
|
|
|
|
|
|
if (cache_trylock_vnodes(dvlp, vlp) == 0) {
|
|
|
|
cache_zap_locked(ncp, false);
|
|
|
|
cache_unlock_vnodes(dvlp, vlp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
rw_wunlock(blp);
|
|
|
|
*vlpp1 = dvlp;
|
|
|
|
*vlpp2 = vlp;
|
|
|
|
if (*vlpp1 != NULL)
|
|
|
|
mtx_lock(*vlpp1);
|
|
|
|
mtx_lock(*vlpp2);
|
|
|
|
rw_wlock(blp);
|
|
|
|
return (EAGAIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (blp != NULL) {
|
|
|
|
rw_runlock(blp);
|
|
|
|
} else {
|
|
|
|
mtx_unlock(vlp);
|
|
|
|
}
|
2016-09-10 16:29:53 +00:00
|
|
|
}
|
|
|
|
|
2017-09-08 06:51:33 +00:00
|
|
|
static int __noinline
|
|
|
|
cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
|
|
|
|
struct timespec *tsp, int *ticksp)
|
|
|
|
{
|
|
|
|
int ltype;
|
|
|
|
|
|
|
|
*vpp = dvp;
|
|
|
|
CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
|
|
|
|
dvp, cnp->cn_nameptr);
|
|
|
|
counter_u64_add(dothits, 1);
|
|
|
|
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
|
|
|
|
if (tsp != NULL)
|
|
|
|
timespecclear(tsp);
|
|
|
|
if (ticksp != NULL)
|
|
|
|
*ticksp = ticks;
|
|
|
|
vrefact(*vpp);
|
|
|
|
/*
|
|
|
|
* When we lookup "." we still can be asked to lock it
|
|
|
|
* differently...
|
|
|
|
*/
|
|
|
|
ltype = cnp->cn_lkflags & LK_TYPE_MASK;
|
|
|
|
if (ltype != VOP_ISLOCKED(*vpp)) {
|
|
|
|
if (ltype == LK_EXCLUSIVE) {
|
|
|
|
vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
|
|
|
|
if ((*vpp)->v_iflag & VI_DOOMED) {
|
|
|
|
/* forced unmount */
|
|
|
|
vrele(*vpp);
|
|
|
|
*vpp = NULL;
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
|
|
|
|
}
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1997-03-08 15:22:14 +00:00
|
|
|
* Lookup an entry in the cache
|
1995-03-09 20:23:45 +00:00
|
|
|
*
|
|
|
|
* Lookup is called with dvp pointing to the directory to search,
|
1997-02-10 02:22:35 +00:00
|
|
|
* cnp pointing to the name of the entry being sought. If the lookup
|
|
|
|
* succeeds, the vnode is returned in *vpp, and a status of -1 is
|
|
|
|
* returned. If the lookup determines that the name does not exist
|
2016-04-29 22:15:33 +00:00
|
|
|
* (negative caching), a status of ENOENT is returned. If the lookup
|
2008-09-24 18:51:33 +00:00
|
|
|
* fails, a status of zero is returned. If the directory vnode is
|
|
|
|
* recycled out from under us due to a forced unmount, a status of
|
2009-03-24 18:16:42 +00:00
|
|
|
* ENOENT is returned.
|
2005-03-29 12:59:06 +00:00
|
|
|
*
|
|
|
|
* vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is
|
|
|
|
* unlocked. If we're looking up . an extra ref is taken, but the lock is
|
|
|
|
* not recursively acquired.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-03-09 20:23:45 +00:00
|
|
|
|
2017-10-06 23:05:55 +00:00
|
|
|
static __noinline int
|
|
|
|
cache_lookup_nomakeentry(struct vnode *dvp, struct vnode **vpp,
|
|
|
|
struct componentname *cnp, struct timespec *tsp, int *ticksp)
|
|
|
|
{
|
|
|
|
struct namecache *ncp;
|
|
|
|
struct rwlock *blp;
|
|
|
|
struct mtx *dvlp, *dvlp2;
|
|
|
|
uint32_t hash;
|
|
|
|
int error;
|
|
|
|
|
2017-11-01 08:40:04 +00:00
|
|
|
if (cnp->cn_namelen == 2 &&
|
|
|
|
cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
|
2017-10-06 23:05:55 +00:00
|
|
|
counter_u64_add(dotdothits, 1);
|
|
|
|
dvlp = VP2VNODELOCK(dvp);
|
|
|
|
dvlp2 = NULL;
|
|
|
|
mtx_lock(dvlp);
|
|
|
|
retry_dotdot:
|
|
|
|
ncp = dvp->v_cache_dd;
|
|
|
|
if (ncp == NULL) {
|
|
|
|
SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
|
|
|
|
"..", NULL);
|
|
|
|
mtx_unlock(dvlp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
|
|
|
|
if (ncp->nc_dvp != dvp)
|
|
|
|
panic("dvp %p v_cache_dd %p\n", dvp, ncp);
|
|
|
|
if (!cache_zap_locked_vnode_kl2(ncp,
|
|
|
|
dvp, &dvlp2))
|
|
|
|
goto retry_dotdot;
|
|
|
|
MPASS(dvp->v_cache_dd == NULL);
|
|
|
|
mtx_unlock(dvlp);
|
|
|
|
if (dvlp2 != NULL)
|
|
|
|
mtx_unlock(dvlp2);
|
|
|
|
cache_free(ncp);
|
|
|
|
} else {
|
|
|
|
dvp->v_cache_dd = NULL;
|
|
|
|
mtx_unlock(dvlp);
|
|
|
|
if (dvlp2 != NULL)
|
|
|
|
mtx_unlock(dvlp2);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
|
|
|
|
blp = HASH2BUCKETLOCK(hash);
|
|
|
|
retry:
|
2017-11-05 21:59:39 +00:00
|
|
|
if (LIST_EMPTY(NCHHASH(hash)))
|
|
|
|
goto out_no_entry;
|
|
|
|
|
2017-11-05 22:28:39 +00:00
|
|
|
rw_wlock(blp);
|
2017-10-06 23:05:55 +00:00
|
|
|
|
|
|
|
LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
|
|
|
|
counter_u64_add(numchecks, 1);
|
|
|
|
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
|
|
|
|
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We failed to find an entry */
|
|
|
|
if (ncp == NULL) {
|
2017-11-05 22:28:39 +00:00
|
|
|
rw_wunlock(blp);
|
2017-11-05 21:59:39 +00:00
|
|
|
goto out_no_entry;
|
2017-10-06 23:05:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
counter_u64_add(numposzaps, 1);
|
|
|
|
|
2017-11-05 22:28:39 +00:00
|
|
|
error = cache_zap_wlocked_bucket(ncp, blp);
|
2017-10-06 23:05:55 +00:00
|
|
|
if (error != 0) {
|
|
|
|
zap_and_exit_bucket_fail++;
|
|
|
|
cache_maybe_yield();
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
cache_free(ncp);
|
|
|
|
return (0);
|
2017-11-05 21:59:39 +00:00
|
|
|
out_no_entry:
|
|
|
|
SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, NULL);
|
|
|
|
counter_u64_add(nummisszap, 1);
|
|
|
|
return (0);
|
2017-10-06 23:05:55 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
int
|
2016-01-07 02:04:17 +00:00
|
|
|
cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
|
|
|
|
struct timespec *tsp, int *ticksp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2017-09-10 11:17:32 +00:00
|
|
|
struct namecache_ts *ncp_ts;
|
1999-10-03 12:18:29 +00:00
|
|
|
struct namecache *ncp;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct rwlock *blp;
|
|
|
|
struct mtx *dvlp, *dvlp2;
|
2010-06-21 09:55:56 +00:00
|
|
|
uint32_t hash;
|
2016-09-23 04:45:11 +00:00
|
|
|
int error, ltype;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(!doingcache)) {
|
1995-03-06 06:45:52 +00:00
|
|
|
cnp->cn_flags &= ~MAKEENTRY;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
1995-03-06 06:45:52 +00:00
|
|
|
}
|
2017-10-06 23:05:55 +00:00
|
|
|
|
|
|
|
counter_u64_add(numcalls, 1);
|
|
|
|
|
|
|
|
if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'))
|
|
|
|
return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp));
|
|
|
|
|
|
|
|
if ((cnp->cn_flags & MAKEENTRY) == 0)
|
|
|
|
return (cache_lookup_nomakeentry(dvp, vpp, cnp, tsp, ticksp));
|
|
|
|
|
2005-03-29 12:59:06 +00:00
|
|
|
retry:
|
2016-09-23 04:45:11 +00:00
|
|
|
blp = NULL;
|
2009-01-28 19:05:18 +00:00
|
|
|
error = 0;
|
2017-11-01 08:40:04 +00:00
|
|
|
if (cnp->cn_namelen == 2 &&
|
|
|
|
cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') {
|
2017-10-06 23:05:55 +00:00
|
|
|
counter_u64_add(dotdothits, 1);
|
|
|
|
dvlp = VP2VNODELOCK(dvp);
|
|
|
|
dvlp2 = NULL;
|
|
|
|
mtx_lock(dvlp);
|
|
|
|
ncp = dvp->v_cache_dd;
|
|
|
|
if (ncp == NULL) {
|
|
|
|
SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
|
|
|
|
"..", NULL);
|
|
|
|
mtx_unlock(dvlp);
|
|
|
|
return (0);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
}
|
2017-10-06 23:05:55 +00:00
|
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
|
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE)
|
|
|
|
*vpp = NULL;
|
|
|
|
else
|
|
|
|
*vpp = ncp->nc_vp;
|
|
|
|
} else
|
|
|
|
*vpp = ncp->nc_dvp;
|
|
|
|
/* Return failure if negative entry was found. */
|
|
|
|
if (*vpp == NULL)
|
|
|
|
goto negative_success;
|
|
|
|
CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
|
|
|
|
dvp, cnp->cn_nameptr, *vpp);
|
|
|
|
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
|
|
|
|
*vpp);
|
|
|
|
cache_out_ts(ncp, tsp, ticksp);
|
|
|
|
if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
|
|
|
|
NCF_DTS && tsp != NULL) {
|
|
|
|
ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
|
|
|
|
*tsp = ncp_ts->nc_dotdottime;
|
|
|
|
}
|
|
|
|
goto success;
|
2016-09-10 16:29:53 +00:00
|
|
|
}
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
|
2016-01-21 01:05:41 +00:00
|
|
|
hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
|
2016-09-23 04:45:11 +00:00
|
|
|
blp = HASH2BUCKETLOCK(hash);
|
|
|
|
rw_rlock(blp);
|
2016-09-10 16:29:53 +00:00
|
|
|
|
2001-03-20 02:10:18 +00:00
|
|
|
LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numchecks, 1);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
|
2017-09-10 11:17:32 +00:00
|
|
|
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
|
1997-02-10 02:22:35 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We failed to find an entry */
|
2009-01-28 19:05:18 +00:00
|
|
|
if (ncp == NULL) {
|
2017-10-06 23:05:55 +00:00
|
|
|
rw_runlock(blp);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
|
|
|
|
NULL);
|
2017-10-06 23:05:55 +00:00
|
|
|
counter_u64_add(nummiss, 1);
|
2017-09-08 06:57:11 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
|
|
|
|
/* We found a "positive" match, return the vnode */
|
2016-10-19 18:29:52 +00:00
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE)) {
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numposhits, 1);
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = ncp->nc_vp;
|
2005-06-13 00:46:03 +00:00
|
|
|
CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
|
|
|
|
dvp, cnp->cn_nameptr, *vpp, ncp);
|
2017-09-10 11:17:32 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
|
2015-09-28 12:14:16 +00:00
|
|
|
*vpp);
|
2012-01-23 17:09:23 +00:00
|
|
|
cache_out_ts(ncp, tsp, ticksp);
|
2005-03-29 12:59:06 +00:00
|
|
|
goto success;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2009-04-14 23:56:48 +00:00
|
|
|
negative_success:
|
1995-03-09 20:23:45 +00:00
|
|
|
/* We found a negative match, and want to create it, so purge */
|
|
|
|
if (cnp->cn_nameiop == CREATE) {
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numnegzaps, 1);
|
2016-09-10 16:29:53 +00:00
|
|
|
goto zap_and_exit;
|
1995-03-09 20:23:45 +00:00
|
|
|
}
|
|
|
|
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numneghits, 1);
|
2016-09-10 16:29:53 +00:00
|
|
|
cache_negative_hit(ncp);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (ncp->nc_flag & NCF_WHITE)
|
|
|
|
cnp->cn_flags |= ISWHITEOUT;
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp->nc_name);
|
2012-01-23 17:09:23 +00:00
|
|
|
cache_out_ts(ncp, tsp, ticksp);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_lookup_unlock(blp, dvlp);
|
1995-03-09 20:23:45 +00:00
|
|
|
return (ENOENT);
|
2005-03-29 12:59:06 +00:00
|
|
|
|
|
|
|
success:
|
|
|
|
/*
|
|
|
|
* On success we return a locked and ref'd vnode as per the lookup
|
|
|
|
* protocol.
|
|
|
|
*/
|
2016-01-21 01:07:05 +00:00
|
|
|
MPASS(dvp != *vpp);
|
2007-05-25 22:23:38 +00:00
|
|
|
ltype = 0; /* silence gcc warning */
|
|
|
|
if (cnp->cn_flags & ISDOTDOT) {
|
2008-02-25 18:45:57 +00:00
|
|
|
ltype = VOP_ISLOCKED(dvp);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(dvp, 0);
|
2007-05-25 22:23:38 +00:00
|
|
|
}
|
2015-07-16 13:57:05 +00:00
|
|
|
vhold(*vpp);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_lookup_unlock(blp, dvlp);
|
2015-07-16 13:57:05 +00:00
|
|
|
error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread);
|
2009-04-10 10:22:44 +00:00
|
|
|
if (cnp->cn_flags & ISDOTDOT) {
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(dvp, ltype | LK_RETRY);
|
2009-04-10 10:22:44 +00:00
|
|
|
if (dvp->v_iflag & VI_DOOMED) {
|
|
|
|
if (error == 0)
|
|
|
|
vput(*vpp);
|
|
|
|
*vpp = NULL;
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
}
|
2005-04-13 10:59:09 +00:00
|
|
|
if (error) {
|
2005-03-29 12:59:06 +00:00
|
|
|
*vpp = NULL;
|
|
|
|
goto retry;
|
|
|
|
}
|
2008-04-09 20:19:55 +00:00
|
|
|
if ((cnp->cn_flags & ISLASTCN) &&
|
|
|
|
(cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
|
|
|
|
ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
|
|
|
|
}
|
2005-03-29 12:59:06 +00:00
|
|
|
return (-1);
|
2009-01-28 19:05:18 +00:00
|
|
|
|
2016-09-10 16:29:53 +00:00
|
|
|
zap_and_exit:
|
2016-09-23 04:45:11 +00:00
|
|
|
if (blp != NULL)
|
|
|
|
error = cache_zap_rlocked_bucket(ncp, blp);
|
|
|
|
else
|
|
|
|
error = cache_zap_locked_vnode(ncp, dvp);
|
|
|
|
if (error != 0) {
|
|
|
|
zap_and_exit_bucket_fail++;
|
|
|
|
cache_maybe_yield();
|
|
|
|
goto retry;
|
|
|
|
}
|
2016-09-10 16:29:53 +00:00
|
|
|
cache_free(ncp);
|
2009-01-28 19:05:18 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
struct celockstate {
|
|
|
|
struct mtx *vlp[3];
|
|
|
|
struct rwlock *blp[2];
|
|
|
|
};
|
|
|
|
CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3));
|
|
|
|
CTASSERT((nitems(((struct celockstate *)0)->blp) == 2));
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
cache_celockstate_init(struct celockstate *cel)
|
|
|
|
{
|
|
|
|
|
|
|
|
bzero(cel, sizeof(*cel));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp,
|
|
|
|
struct vnode *dvp)
|
|
|
|
{
|
|
|
|
struct mtx *vlp1, *vlp2;
|
|
|
|
|
|
|
|
MPASS(cel->vlp[0] == NULL);
|
|
|
|
MPASS(cel->vlp[1] == NULL);
|
|
|
|
MPASS(cel->vlp[2] == NULL);
|
|
|
|
|
|
|
|
MPASS(vp != NULL || dvp != NULL);
|
|
|
|
|
|
|
|
vlp1 = VP2VNODELOCK(vp);
|
|
|
|
vlp2 = VP2VNODELOCK(dvp);
|
|
|
|
cache_sort(&vlp1, &vlp2);
|
|
|
|
|
|
|
|
if (vlp1 != NULL) {
|
|
|
|
mtx_lock(vlp1);
|
|
|
|
cel->vlp[0] = vlp1;
|
|
|
|
}
|
|
|
|
mtx_lock(vlp2);
|
|
|
|
cel->vlp[1] = vlp2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_unlock_vnodes_cel(struct celockstate *cel)
|
|
|
|
{
|
|
|
|
|
|
|
|
MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL);
|
|
|
|
|
|
|
|
if (cel->vlp[0] != NULL)
|
|
|
|
mtx_unlock(cel->vlp[0]);
|
|
|
|
if (cel->vlp[1] != NULL)
|
|
|
|
mtx_unlock(cel->vlp[1]);
|
|
|
|
if (cel->vlp[2] != NULL)
|
|
|
|
mtx_unlock(cel->vlp[2]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct mtx *vlp;
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
cache_assert_vlp_locked(cel->vlp[0]);
|
|
|
|
cache_assert_vlp_locked(cel->vlp[1]);
|
|
|
|
MPASS(cel->vlp[2] == NULL);
|
|
|
|
|
2016-12-29 08:41:25 +00:00
|
|
|
MPASS(vp != NULL);
|
2016-09-23 04:45:11 +00:00
|
|
|
vlp = VP2VNODELOCK(vp);
|
2016-12-29 08:41:25 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
ret = true;
|
|
|
|
if (vlp >= cel->vlp[1]) {
|
|
|
|
mtx_lock(vlp);
|
|
|
|
} else {
|
|
|
|
if (mtx_trylock(vlp))
|
|
|
|
goto out;
|
|
|
|
cache_lock_vnodes_cel_3_failures++;
|
|
|
|
cache_unlock_vnodes_cel(cel);
|
|
|
|
if (vlp < cel->vlp[0]) {
|
|
|
|
mtx_lock(vlp);
|
|
|
|
mtx_lock(cel->vlp[0]);
|
|
|
|
mtx_lock(cel->vlp[1]);
|
|
|
|
} else {
|
|
|
|
if (cel->vlp[0] != NULL)
|
|
|
|
mtx_lock(cel->vlp[0]);
|
|
|
|
mtx_lock(vlp);
|
|
|
|
mtx_lock(cel->vlp[1]);
|
|
|
|
}
|
|
|
|
ret = false;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
cel->vlp[2] = vlp;
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1,
|
|
|
|
struct rwlock *blp2)
|
|
|
|
{
|
|
|
|
|
|
|
|
MPASS(cel->blp[0] == NULL);
|
|
|
|
MPASS(cel->blp[1] == NULL);
|
|
|
|
|
|
|
|
cache_sort(&blp1, &blp2);
|
|
|
|
|
|
|
|
if (blp1 != NULL) {
|
|
|
|
rw_wlock(blp1);
|
|
|
|
cel->blp[0] = blp1;
|
|
|
|
}
|
|
|
|
rw_wlock(blp2);
|
|
|
|
cel->blp[1] = blp2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_unlock_buckets_cel(struct celockstate *cel)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (cel->blp[0] != NULL)
|
|
|
|
rw_wunlock(cel->blp[0]);
|
|
|
|
rw_wunlock(cel->blp[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock part of the cache affected by the insertion.
|
|
|
|
*
|
|
|
|
* This means vnodelocks for dvp, vp and the relevant bucketlock.
|
|
|
|
* However, insertion can result in removal of an old entry. In this
|
|
|
|
* case we have an additional vnode and bucketlock pair to lock. If the
|
|
|
|
* entry is negative, ncelock is locked instead of the vnode.
|
|
|
|
*
|
|
|
|
* That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
|
|
|
|
* preserving the locking order (smaller address first).
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
|
|
|
|
uint32_t hash)
|
|
|
|
{
|
|
|
|
struct namecache *ncp;
|
|
|
|
struct rwlock *blps[2];
|
|
|
|
|
|
|
|
blps[0] = HASH2BUCKETLOCK(hash);
|
|
|
|
for (;;) {
|
|
|
|
blps[1] = NULL;
|
|
|
|
cache_lock_vnodes_cel(cel, dvp, vp);
|
|
|
|
if (vp == NULL || vp->v_type != VDIR)
|
|
|
|
break;
|
|
|
|
ncp = vp->v_cache_dd;
|
|
|
|
if (ncp == NULL)
|
|
|
|
break;
|
|
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
|
|
|
|
break;
|
|
|
|
MPASS(ncp->nc_dvp == vp);
|
|
|
|
blps[1] = NCP2BUCKETLOCK(ncp);
|
2016-10-19 18:29:52 +00:00
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE)
|
|
|
|
break;
|
2016-09-23 04:45:11 +00:00
|
|
|
if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* All vnodes got re-locked. Re-validate the state and if
|
|
|
|
* nothing changed we are done. Otherwise restart.
|
|
|
|
*/
|
|
|
|
if (ncp == vp->v_cache_dd &&
|
|
|
|
(ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
|
|
|
|
blps[1] == NCP2BUCKETLOCK(ncp) &&
|
|
|
|
VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
|
|
|
|
break;
|
|
|
|
cache_unlock_vnodes_cel(cel);
|
|
|
|
cel->vlp[0] = NULL;
|
|
|
|
cel->vlp[1] = NULL;
|
|
|
|
cel->vlp[2] = NULL;
|
|
|
|
}
|
|
|
|
cache_lock_buckets_cel(cel, blps[0], blps[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp,
|
|
|
|
uint32_t hash)
|
|
|
|
{
|
|
|
|
struct namecache *ncp;
|
|
|
|
struct rwlock *blps[2];
|
|
|
|
|
|
|
|
blps[0] = HASH2BUCKETLOCK(hash);
|
|
|
|
for (;;) {
|
|
|
|
blps[1] = NULL;
|
|
|
|
cache_lock_vnodes_cel(cel, dvp, vp);
|
|
|
|
ncp = dvp->v_cache_dd;
|
|
|
|
if (ncp == NULL)
|
|
|
|
break;
|
|
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
|
|
|
|
break;
|
|
|
|
MPASS(ncp->nc_dvp == dvp);
|
|
|
|
blps[1] = NCP2BUCKETLOCK(ncp);
|
2016-10-19 18:29:52 +00:00
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE)
|
|
|
|
break;
|
2016-09-23 04:45:11 +00:00
|
|
|
if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp))
|
|
|
|
break;
|
|
|
|
if (ncp == dvp->v_cache_dd &&
|
|
|
|
(ncp->nc_flag & NCF_ISDOTDOT) != 0 &&
|
|
|
|
blps[1] == NCP2BUCKETLOCK(ncp) &&
|
|
|
|
VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2])
|
|
|
|
break;
|
|
|
|
cache_unlock_vnodes_cel(cel);
|
|
|
|
cel->vlp[0] = NULL;
|
|
|
|
cel->vlp[1] = NULL;
|
|
|
|
cel->vlp[2] = NULL;
|
|
|
|
}
|
|
|
|
cache_lock_buckets_cel(cel, blps[0], blps[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cache_enter_unlock(struct celockstate *cel)
|
|
|
|
{
|
|
|
|
|
|
|
|
cache_unlock_buckets_cel(cel);
|
|
|
|
cache_unlock_vnodes_cel(cel);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1995-03-09 20:23:45 +00:00
|
|
|
* Add an entry to the cache.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2016-01-07 02:04:17 +00:00
|
|
|
cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
|
|
|
|
struct timespec *tsp, struct timespec *dtsp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2016-09-23 04:45:11 +00:00
|
|
|
struct celockstate cel;
|
|
|
|
struct namecache *ncp, *n2, *ndd;
|
2017-09-10 11:17:32 +00:00
|
|
|
struct namecache_ts *ncp_ts, *n2_ts;
|
1999-10-03 12:18:29 +00:00
|
|
|
struct nchashhead *ncpp;
|
2016-10-19 18:29:52 +00:00
|
|
|
struct neglist *neglist;
|
2010-06-21 09:55:56 +00:00
|
|
|
uint32_t hash;
|
2009-03-29 21:25:40 +00:00
|
|
|
int flag;
|
1999-10-03 12:18:29 +00:00
|
|
|
int len;
|
2016-10-19 18:29:52 +00:00
|
|
|
bool neg_locked;
|
2017-11-05 22:29:45 +00:00
|
|
|
int lnumcache;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-06-13 00:46:03 +00:00
|
|
|
CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
|
2005-06-11 08:47:30 +00:00
|
|
|
VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
|
2010-04-15 17:17:02 +00:00
|
|
|
("cache_enter: Adding a doomed vnode"));
|
2010-04-20 10:19:27 +00:00
|
|
|
VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
|
|
|
|
("cache_enter: Doomed vnode used as src"));
|
2005-06-11 08:47:30 +00:00
|
|
|
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(!doingcache))
|
1994-05-24 10:09:53 +00:00
|
|
|
return;
|
1995-03-09 20:23:45 +00:00
|
|
|
|
2009-01-20 04:21:21 +00:00
|
|
|
/*
|
|
|
|
* Avoid blowout in namecache entries.
|
|
|
|
*/
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(numcache >= desiredvnodes * ncsizefactor))
|
2009-01-20 04:21:21 +00:00
|
|
|
return;
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_celockstate_init(&cel);
|
|
|
|
ndd = NULL;
|
2009-03-29 21:25:40 +00:00
|
|
|
flag = 0;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (cnp->cn_nameptr[0] == '.') {
|
2009-03-29 21:25:40 +00:00
|
|
|
if (cnp->cn_namelen == 1)
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
return;
|
|
|
|
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
|
2016-09-23 04:45:11 +00:00
|
|
|
len = cnp->cn_namelen;
|
|
|
|
hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
|
|
|
|
cache_enter_lock_dd(&cel, dvp, vp, hash);
|
2009-03-29 21:25:40 +00:00
|
|
|
/*
|
|
|
|
* If dotdot entry already exists, just retarget it
|
|
|
|
* to new parent vnode, otherwise continue with new
|
|
|
|
* namecache entry allocation.
|
|
|
|
*/
|
2009-04-17 18:11:11 +00:00
|
|
|
if ((ncp = dvp->v_cache_dd) != NULL &&
|
|
|
|
ncp->nc_flag & NCF_ISDOTDOT) {
|
|
|
|
KASSERT(ncp->nc_dvp == dvp,
|
|
|
|
("wrong isdotdot parent"));
|
2016-10-19 18:29:52 +00:00
|
|
|
neg_locked = false;
|
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE || vp == NULL) {
|
|
|
|
neglist = NCP2NEGLIST(ncp);
|
|
|
|
mtx_lock(&ncneg_hot.nl_lock);
|
|
|
|
mtx_lock(&neglist->nl_lock);
|
|
|
|
neg_locked = true;
|
|
|
|
}
|
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE)) {
|
2009-03-29 21:25:40 +00:00
|
|
|
TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
|
|
|
|
ncp, nc_dst);
|
2013-12-27 17:09:59 +00:00
|
|
|
} else {
|
2016-10-19 18:29:52 +00:00
|
|
|
cache_negative_remove(ncp, true);
|
2013-12-27 17:09:59 +00:00
|
|
|
}
|
|
|
|
if (vp != NULL) {
|
2009-03-29 21:25:40 +00:00
|
|
|
TAILQ_INSERT_HEAD(&vp->v_cache_dst,
|
|
|
|
ncp, nc_dst);
|
2016-10-19 18:29:52 +00:00
|
|
|
ncp->nc_flag &= ~(NCF_NEGATIVE|NCF_HOTNEGATIVE);
|
2013-12-27 17:09:59 +00:00
|
|
|
} else {
|
2016-10-19 18:29:52 +00:00
|
|
|
ncp->nc_flag &= ~(NCF_HOTNEGATIVE);
|
|
|
|
ncp->nc_flag |= NCF_NEGATIVE;
|
|
|
|
cache_negative_insert(ncp, true);
|
|
|
|
}
|
|
|
|
if (neg_locked) {
|
|
|
|
mtx_unlock(&neglist->nl_lock);
|
|
|
|
mtx_unlock(&ncneg_hot.nl_lock);
|
2013-12-27 17:09:59 +00:00
|
|
|
}
|
2009-04-17 18:11:11 +00:00
|
|
|
ncp->nc_vp = vp;
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_enter_unlock(&cel);
|
2009-04-17 18:11:11 +00:00
|
|
|
return;
|
2009-03-29 21:25:40 +00:00
|
|
|
}
|
|
|
|
dvp->v_cache_dd = NULL;
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_enter_unlock(&cel);
|
|
|
|
cache_celockstate_init(&cel);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp);
|
2009-03-29 21:25:40 +00:00
|
|
|
flag = NCF_ISDOTDOT;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
}
|
2003-06-11 07:35:56 +00:00
|
|
|
|
2008-08-23 15:13:39 +00:00
|
|
|
/*
|
|
|
|
* Calculate the hash key and setup as much of the new
|
|
|
|
* namecache entry as possible before acquiring the lock.
|
|
|
|
*/
|
2012-01-22 01:11:06 +00:00
|
|
|
ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
|
2016-10-19 18:29:52 +00:00
|
|
|
ncp->nc_flag = flag;
|
2008-08-23 15:13:39 +00:00
|
|
|
ncp->nc_vp = vp;
|
2016-10-19 18:29:52 +00:00
|
|
|
if (vp == NULL)
|
|
|
|
ncp->nc_flag |= NCF_NEGATIVE;
|
2008-08-23 15:13:39 +00:00
|
|
|
ncp->nc_dvp = dvp;
|
2012-01-22 01:11:06 +00:00
|
|
|
if (tsp != NULL) {
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
|
|
|
|
ncp_ts->nc_time = *tsp;
|
|
|
|
ncp_ts->nc_ticks = ticks;
|
|
|
|
ncp_ts->nc_nc.nc_flag |= NCF_TS;
|
2012-03-03 01:06:54 +00:00
|
|
|
if (dtsp != NULL) {
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp_ts->nc_dotdottime = *dtsp;
|
|
|
|
ncp_ts->nc_nc.nc_flag |= NCF_DTS;
|
2012-03-03 01:06:54 +00:00
|
|
|
}
|
2012-01-22 01:11:06 +00:00
|
|
|
}
|
2008-08-23 15:13:39 +00:00
|
|
|
len = ncp->nc_nlen = cnp->cn_namelen;
|
2016-01-21 01:05:41 +00:00
|
|
|
hash = cache_get_hash(cnp->cn_nameptr, len, dvp);
|
2017-09-10 11:17:32 +00:00
|
|
|
strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_enter_lock(&cel, dvp, vp, hash);
|
2008-08-23 15:13:39 +00:00
|
|
|
|
|
|
|
/*
|
2008-12-30 12:51:14 +00:00
|
|
|
* See if this vnode or negative entry is already in the cache
|
|
|
|
* with this name. This can happen with concurrent lookups of
|
|
|
|
* the same path name.
|
2008-08-23 15:13:39 +00:00
|
|
|
*/
|
2008-12-30 12:51:14 +00:00
|
|
|
ncpp = NCHHASH(hash);
|
|
|
|
LIST_FOREACH(n2, ncpp, nc_hash) {
|
|
|
|
if (n2->nc_dvp == dvp &&
|
|
|
|
n2->nc_nlen == cnp->cn_namelen &&
|
2017-09-10 11:17:32 +00:00
|
|
|
!bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
|
2012-01-22 01:11:06 +00:00
|
|
|
if (tsp != NULL) {
|
2012-01-25 20:48:20 +00:00
|
|
|
KASSERT((n2->nc_flag & NCF_TS) != 0,
|
|
|
|
("no NCF_TS"));
|
2017-09-10 11:17:32 +00:00
|
|
|
n2_ts = __containerof(n2, struct namecache_ts, nc_nc);
|
|
|
|
n2_ts->nc_time = ncp_ts->nc_time;
|
|
|
|
n2_ts->nc_ticks = ncp_ts->nc_ticks;
|
2012-03-03 01:06:54 +00:00
|
|
|
if (dtsp != NULL) {
|
2017-09-10 11:17:32 +00:00
|
|
|
n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime;
|
2016-10-19 18:29:52 +00:00
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE)
|
|
|
|
mtx_lock(&ncneg_hot.nl_lock);
|
2017-09-10 11:17:32 +00:00
|
|
|
n2_ts->nc_nc.nc_flag |= NCF_DTS;
|
2016-10-19 18:29:52 +00:00
|
|
|
if (ncp->nc_flag & NCF_NEGATIVE)
|
|
|
|
mtx_unlock(&ncneg_hot.nl_lock);
|
2012-03-03 01:06:54 +00:00
|
|
|
}
|
2012-01-22 01:11:06 +00:00
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
goto out_unlock_free;
|
2008-12-02 11:14:16 +00:00
|
|
|
}
|
|
|
|
}
|
2008-08-23 15:13:39 +00:00
|
|
|
|
2009-04-11 20:23:08 +00:00
|
|
|
if (flag == NCF_ISDOTDOT) {
|
|
|
|
/*
|
|
|
|
* See if we are trying to add .. entry, but some other lookup
|
|
|
|
* has populated v_cache_dd pointer already.
|
|
|
|
*/
|
2016-09-23 04:45:11 +00:00
|
|
|
if (dvp->v_cache_dd != NULL)
|
|
|
|
goto out_unlock_free;
|
2009-04-11 20:23:08 +00:00
|
|
|
KASSERT(vp == NULL || vp->v_type == VDIR,
|
|
|
|
("wrong vnode type %p", vp));
|
|
|
|
dvp->v_cache_dd = ncp;
|
2009-03-29 21:25:40 +00:00
|
|
|
}
|
|
|
|
|
2016-01-21 01:09:39 +00:00
|
|
|
if (vp != NULL) {
|
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
if (flag != NCF_ISDOTDOT) {
|
|
|
|
/*
|
|
|
|
* For this case, the cache entry maps both the
|
|
|
|
* directory name in it and the name ".." for the
|
|
|
|
* directory's parent.
|
|
|
|
*/
|
2016-09-04 16:52:14 +00:00
|
|
|
if ((ndd = vp->v_cache_dd) != NULL) {
|
|
|
|
if ((ndd->nc_flag & NCF_ISDOTDOT) != 0)
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_zap_locked(ndd, false);
|
2016-09-04 16:52:14 +00:00
|
|
|
else
|
|
|
|
ndd = NULL;
|
|
|
|
}
|
2016-01-21 01:09:39 +00:00
|
|
|
vp->v_cache_dd = ncp;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vp->v_cache_dd = NULL;
|
2009-03-29 21:25:40 +00:00
|
|
|
}
|
1997-08-31 07:32:39 +00:00
|
|
|
}
|
1997-03-08 15:22:14 +00:00
|
|
|
|
2009-03-29 21:25:40 +00:00
|
|
|
if (flag != NCF_ISDOTDOT) {
|
|
|
|
if (LIST_EMPTY(&dvp->v_cache_src)) {
|
2016-01-21 01:09:39 +00:00
|
|
|
vhold(dvp);
|
2016-09-23 04:45:11 +00:00
|
|
|
atomic_add_rel_long(&numcachehv, 1);
|
2009-03-29 21:25:40 +00:00
|
|
|
}
|
|
|
|
LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
|
2001-04-18 11:19:50 +00:00
|
|
|
}
|
2009-03-29 21:25:40 +00:00
|
|
|
|
2016-09-10 16:29:53 +00:00
|
|
|
/*
|
|
|
|
* Insert the new namecache entry into the appropriate chain
|
|
|
|
* within the cache entries table.
|
|
|
|
*/
|
|
|
|
LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
|
|
|
|
|
2003-02-15 23:25:12 +00:00
|
|
|
/*
|
|
|
|
* If the entry is "negative", we place it into the
|
|
|
|
* "negative" cache queue, otherwise, we place it into the
|
|
|
|
* destination vnode's cache entries queue.
|
|
|
|
*/
|
2016-01-07 02:04:17 +00:00
|
|
|
if (vp != NULL) {
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
|
2017-09-10 11:17:32 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name,
|
2015-09-28 12:14:16 +00:00
|
|
|
vp);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
} else {
|
2016-01-21 01:09:39 +00:00
|
|
|
if (cnp->cn_flags & ISWHITEOUT)
|
|
|
|
ncp->nc_flag |= NCF_WHITE;
|
2016-10-19 18:29:52 +00:00
|
|
|
cache_negative_insert(ncp, false);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE2(vfs, namecache, enter_negative, done, dvp,
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp->nc_name);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_enter_unlock(&cel);
|
2017-11-05 22:29:45 +00:00
|
|
|
lnumcache = atomic_fetchadd_long(&numcache, 1) + 1;
|
|
|
|
if (numneg * ncnegfactor > lnumcache)
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_negative_zap_one();
|
2016-09-04 16:52:14 +00:00
|
|
|
cache_free(ndd);
|
2016-09-23 04:45:11 +00:00
|
|
|
return;
|
|
|
|
out_unlock_free:
|
|
|
|
cache_enter_unlock(&cel);
|
|
|
|
cache_free(ncp);
|
|
|
|
return;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2016-09-10 16:29:53 +00:00
|
|
|
static u_int
|
|
|
|
cache_roundup_2(u_int val)
|
|
|
|
{
|
|
|
|
u_int res;
|
|
|
|
|
|
|
|
for (res = 1; res <= val; res <<= 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Name cache initialization, from vfs_init() when we are booting
|
|
|
|
*/
|
2000-12-06 07:09:08 +00:00
|
|
|
static void
|
|
|
|
nchinit(void *dummy __unused)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2016-09-10 16:29:53 +00:00
|
|
|
u_int i;
|
1997-03-08 15:22:14 +00:00
|
|
|
|
2012-01-22 01:11:06 +00:00
|
|
|
cache_zone_small = uma_zcreate("S VFS Cache",
|
|
|
|
sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
|
2017-09-27 23:18:57 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache),
|
|
|
|
UMA_ZONE_ZINIT);
|
2012-01-22 01:11:06 +00:00
|
|
|
cache_zone_small_ts = uma_zcreate("STS VFS Cache",
|
|
|
|
sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
|
2017-09-27 23:18:57 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts),
|
|
|
|
UMA_ZONE_ZINIT);
|
2012-01-22 01:11:06 +00:00
|
|
|
cache_zone_large = uma_zcreate("L VFS Cache",
|
2012-03-03 01:06:54 +00:00
|
|
|
sizeof(struct namecache) + NAME_MAX + 1,
|
2017-09-27 23:18:57 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache),
|
|
|
|
UMA_ZONE_ZINIT);
|
2012-03-03 01:06:54 +00:00
|
|
|
cache_zone_large_ts = uma_zcreate("LTS VFS Cache",
|
2012-01-22 01:11:06 +00:00
|
|
|
sizeof(struct namecache_ts) + NAME_MAX + 1,
|
2017-09-27 23:18:57 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts),
|
|
|
|
UMA_ZONE_ZINIT);
|
2003-06-13 08:46:13 +00:00
|
|
|
|
2000-12-06 07:09:08 +00:00
|
|
|
nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
|
2016-12-29 08:41:25 +00:00
|
|
|
ncbuckethash = cache_roundup_2(mp_ncpus * 64) - 1;
|
|
|
|
if (ncbuckethash > nchash)
|
|
|
|
ncbuckethash = nchash;
|
2016-09-10 16:29:53 +00:00
|
|
|
bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
|
|
|
|
M_WAITOK | M_ZERO);
|
|
|
|
for (i = 0; i < numbucketlocks; i++)
|
2016-09-23 04:45:11 +00:00
|
|
|
rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
|
2016-12-29 08:41:25 +00:00
|
|
|
ncvnodehash = cache_roundup_2(mp_ncpus * 64) - 1;
|
2016-09-23 04:45:11 +00:00
|
|
|
vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
|
|
|
|
M_WAITOK | M_ZERO);
|
|
|
|
for (i = 0; i < numvnodelocks; i++)
|
|
|
|
mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
|
2016-10-03 00:02:32 +00:00
|
|
|
ncpurgeminvnodes = numbucketlocks;
|
2016-01-21 01:04:03 +00:00
|
|
|
|
2016-12-29 08:41:25 +00:00
|
|
|
ncneghash = 3;
|
2016-10-19 18:29:52 +00:00
|
|
|
neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
|
|
|
|
M_WAITOK | M_ZERO);
|
|
|
|
for (i = 0; i < numneglists; i++) {
|
|
|
|
mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF);
|
|
|
|
TAILQ_INIT(&neglists[i].nl_list);
|
|
|
|
}
|
|
|
|
mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
|
|
|
|
TAILQ_INIT(&ncneg_hot.nl_list);
|
|
|
|
|
2016-12-29 12:01:54 +00:00
|
|
|
mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
|
|
|
|
|
2016-01-21 01:04:03 +00:00
|
|
|
numcalls = counter_u64_alloc(M_WAITOK);
|
|
|
|
dothits = counter_u64_alloc(M_WAITOK);
|
|
|
|
dotdothits = counter_u64_alloc(M_WAITOK);
|
|
|
|
numchecks = counter_u64_alloc(M_WAITOK);
|
|
|
|
nummiss = counter_u64_alloc(M_WAITOK);
|
|
|
|
nummisszap = counter_u64_alloc(M_WAITOK);
|
|
|
|
numposzaps = counter_u64_alloc(M_WAITOK);
|
|
|
|
numposhits = counter_u64_alloc(M_WAITOK);
|
|
|
|
numnegzaps = counter_u64_alloc(M_WAITOK);
|
|
|
|
numneghits = counter_u64_alloc(M_WAITOK);
|
|
|
|
numfullpathcalls = counter_u64_alloc(M_WAITOK);
|
|
|
|
numfullpathfail1 = counter_u64_alloc(M_WAITOK);
|
|
|
|
numfullpathfail2 = counter_u64_alloc(M_WAITOK);
|
|
|
|
numfullpathfail4 = counter_u64_alloc(M_WAITOK);
|
|
|
|
numfullpathfound = counter_u64_alloc(M_WAITOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2008-03-16 10:58:09 +00:00
|
|
|
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
|
2000-12-06 07:09:08 +00:00
|
|
|
|
2015-09-06 05:50:51 +00:00
|
|
|
void
|
|
|
|
cache_changesize(int newmaxvnodes)
|
|
|
|
{
|
|
|
|
struct nchashhead *new_nchashtbl, *old_nchashtbl;
|
|
|
|
u_long new_nchash, old_nchash;
|
|
|
|
struct namecache *ncp;
|
|
|
|
uint32_t hash;
|
|
|
|
int i;
|
|
|
|
|
2016-11-23 19:50:12 +00:00
|
|
|
newmaxvnodes = cache_roundup_2(newmaxvnodes * 2);
|
|
|
|
if (newmaxvnodes < numbucketlocks)
|
|
|
|
newmaxvnodes = numbucketlocks;
|
|
|
|
|
|
|
|
new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash);
|
2015-09-06 05:50:51 +00:00
|
|
|
/* If same hash table size, nothing to do */
|
|
|
|
if (nchash == new_nchash) {
|
|
|
|
free(new_nchashtbl, M_VFSCACHE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Move everything from the old hash table to the new table.
|
|
|
|
* None of the namecache entries in the table can be removed
|
|
|
|
* because to do so, they have to be removed from the hash table.
|
|
|
|
*/
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_lock_all_vnodes();
|
2016-09-10 16:29:53 +00:00
|
|
|
cache_lock_all_buckets();
|
2015-09-06 05:50:51 +00:00
|
|
|
old_nchashtbl = nchashtbl;
|
|
|
|
old_nchash = nchash;
|
|
|
|
nchashtbl = new_nchashtbl;
|
|
|
|
nchash = new_nchash;
|
|
|
|
for (i = 0; i <= old_nchash; i++) {
|
|
|
|
while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
|
2017-09-10 11:17:32 +00:00
|
|
|
hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen,
|
2016-01-21 01:05:41 +00:00
|
|
|
ncp->nc_dvp);
|
2015-09-06 05:50:51 +00:00
|
|
|
LIST_REMOVE(ncp, nc_hash);
|
|
|
|
LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
|
|
|
|
}
|
|
|
|
}
|
2016-09-10 16:29:53 +00:00
|
|
|
cache_unlock_all_buckets();
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_unlock_all_vnodes();
|
2015-09-06 05:50:51 +00:00
|
|
|
free(old_nchashtbl, M_VFSCACHE);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1999-04-24 17:58:14 +00:00
|
|
|
* Invalidate all entries to a particular vnode.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2016-01-07 02:04:17 +00:00
|
|
|
cache_purge(struct vnode *vp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_HEAD(, namecache) ncps;
|
|
|
|
struct namecache *ncp, *nnp;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct mtx *vlp, *vlp2;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-06-13 00:46:03 +00:00
|
|
|
CTR1(KTR_VFS, "cache_purge(%p)", vp);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE1(vfs, namecache, purge, done, vp);
|
2016-09-23 04:45:11 +00:00
|
|
|
if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
|
|
|
|
vp->v_cache_dd == NULL)
|
|
|
|
return;
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_INIT(&ncps);
|
2016-09-23 04:45:11 +00:00
|
|
|
vlp = VP2VNODELOCK(vp);
|
|
|
|
vlp2 = NULL;
|
|
|
|
mtx_lock(vlp);
|
|
|
|
retry:
|
2016-09-04 16:52:14 +00:00
|
|
|
while (!LIST_EMPTY(&vp->v_cache_src)) {
|
|
|
|
ncp = LIST_FIRST(&vp->v_cache_src);
|
2016-09-23 04:45:11 +00:00
|
|
|
if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
|
|
|
|
goto retry;
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
|
|
|
|
}
|
|
|
|
while (!TAILQ_EMPTY(&vp->v_cache_dst)) {
|
|
|
|
ncp = TAILQ_FIRST(&vp->v_cache_dst);
|
2016-09-23 04:45:11 +00:00
|
|
|
if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
|
|
|
|
goto retry;
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
|
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
ncp = vp->v_cache_dd;
|
|
|
|
if (ncp != NULL) {
|
2016-09-04 16:52:14 +00:00
|
|
|
KASSERT(ncp->nc_flag & NCF_ISDOTDOT,
|
2009-03-29 21:25:40 +00:00
|
|
|
("lost dotdot link"));
|
2016-09-23 04:45:11 +00:00
|
|
|
if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2))
|
|
|
|
goto retry;
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
|
2009-03-29 21:25:40 +00:00
|
|
|
}
|
|
|
|
KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
|
|
|
if (vlp2 != NULL)
|
|
|
|
mtx_unlock(vlp2);
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
|
|
|
|
cache_free(ncp);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2009-02-19 22:28:48 +00:00
|
|
|
/*
|
|
|
|
* Invalidate all negative entries for a particular directory vnode.
|
|
|
|
*/
|
|
|
|
void
|
2016-01-07 02:04:17 +00:00
|
|
|
cache_purge_negative(struct vnode *vp)
|
2009-02-19 22:28:48 +00:00
|
|
|
{
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_HEAD(, namecache) ncps;
|
|
|
|
struct namecache *ncp, *nnp;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct mtx *vlp;
|
2009-02-19 22:28:48 +00:00
|
|
|
|
|
|
|
CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
|
2017-11-05 08:31:25 +00:00
|
|
|
if (LIST_EMPTY(&vp->v_cache_src))
|
|
|
|
return;
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_INIT(&ncps);
|
2016-09-23 04:45:11 +00:00
|
|
|
vlp = VP2VNODELOCK(vp);
|
|
|
|
mtx_lock(vlp);
|
2016-09-04 16:52:14 +00:00
|
|
|
LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) {
|
2016-10-19 18:29:52 +00:00
|
|
|
if (!(ncp->nc_flag & NCF_NEGATIVE))
|
2016-09-04 16:52:14 +00:00
|
|
|
continue;
|
2016-09-23 04:45:11 +00:00
|
|
|
cache_zap_negative_locked_vnode_kl(ncp, vp);
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst);
|
2009-02-19 22:28:48 +00:00
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
|
|
|
|
cache_free(ncp);
|
|
|
|
}
|
2009-02-19 22:28:48 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1995-03-09 20:23:45 +00:00
|
|
|
* Flush all entries referencing a particular filesystem.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2016-10-07 11:38:28 +00:00
|
|
|
cache_purgevfs(struct mount *mp, bool force)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_HEAD(, namecache) ncps;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct mtx *vlp1, *vlp2;
|
|
|
|
struct rwlock *blp;
|
2016-09-10 16:29:53 +00:00
|
|
|
struct nchashhead *bucket;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct namecache *ncp, *nnp;
|
2016-09-10 16:29:53 +00:00
|
|
|
u_long i, j, n_nchash;
|
2016-09-23 04:45:11 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-03-09 20:23:45 +00:00
|
|
|
/* Scan hash tables for applicable entries */
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE1(vfs, namecache, purgevfs, done, mp);
|
2016-10-07 11:38:28 +00:00
|
|
|
if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes)
|
2016-10-03 00:02:32 +00:00
|
|
|
return;
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_INIT(&ncps);
|
2016-09-10 16:29:53 +00:00
|
|
|
n_nchash = nchash + 1;
|
2016-09-23 04:45:11 +00:00
|
|
|
vlp1 = vlp2 = NULL;
|
2016-09-10 16:29:53 +00:00
|
|
|
for (i = 0; i < numbucketlocks; i++) {
|
2016-09-23 04:45:11 +00:00
|
|
|
blp = (struct rwlock *)&bucketlocks[i];
|
|
|
|
rw_wlock(blp);
|
2016-09-10 16:29:53 +00:00
|
|
|
for (j = i; j < n_nchash; j += numbucketlocks) {
|
2016-09-23 04:45:11 +00:00
|
|
|
retry:
|
2016-09-10 16:29:53 +00:00
|
|
|
bucket = &nchashtbl[j];
|
|
|
|
LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
|
|
|
|
cache_assert_bucket_locked(ncp, RA_WLOCKED);
|
|
|
|
if (ncp->nc_dvp->v_mount != mp)
|
|
|
|
continue;
|
2016-09-23 04:45:11 +00:00
|
|
|
error = cache_zap_wlocked_bucket_kl(ncp, blp,
|
|
|
|
&vlp1, &vlp2);
|
|
|
|
if (error != 0)
|
|
|
|
goto retry;
|
2016-09-10 16:29:53 +00:00
|
|
|
TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
rw_wunlock(blp);
|
|
|
|
if (vlp1 == NULL && vlp2 == NULL)
|
|
|
|
cache_maybe_yield();
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
if (vlp1 != NULL)
|
|
|
|
mtx_unlock(vlp1);
|
|
|
|
if (vlp2 != NULL)
|
|
|
|
mtx_unlock(vlp2);
|
|
|
|
|
2016-09-04 16:52:14 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) {
|
|
|
|
cache_free(ncp);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-08-26 07:32:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform canonical checks and cache lookup and pass on to filesystem
|
|
|
|
* through the vop_cachedlookup only if needed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2016-01-07 02:04:17 +00:00
|
|
|
vfs_cache_lookup(struct vop_lookup_args *ap)
|
1997-08-26 07:32:51 +00:00
|
|
|
{
|
2005-03-29 12:59:06 +00:00
|
|
|
struct vnode *dvp;
|
1997-08-26 07:32:51 +00:00
|
|
|
int error;
|
|
|
|
struct vnode **vpp = ap->a_vpp;
|
|
|
|
struct componentname *cnp = ap->a_cnp;
|
|
|
|
struct ucred *cred = cnp->cn_cred;
|
|
|
|
int flags = cnp->cn_flags;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td = cnp->cn_thread;
|
1997-08-26 07:32:51 +00:00
|
|
|
|
|
|
|
*vpp = NULL;
|
2000-09-10 03:46:12 +00:00
|
|
|
dvp = ap->a_dvp;
|
1997-08-26 07:32:51 +00:00
|
|
|
|
2000-09-10 03:46:12 +00:00
|
|
|
if (dvp->v_type != VDIR)
|
2003-06-11 07:35:56 +00:00
|
|
|
return (ENOTDIR);
|
1997-08-26 07:32:51 +00:00
|
|
|
|
2000-09-10 03:46:12 +00:00
|
|
|
if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
|
1997-08-26 07:32:51 +00:00
|
|
|
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
|
|
|
|
return (EROFS);
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_ACCESS(dvp, VEXEC, cred, td);
|
1997-08-26 07:32:51 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2012-02-06 17:00:28 +00:00
|
|
|
error = cache_lookup(dvp, vpp, cnp, NULL, NULL);
|
2005-03-29 12:59:06 +00:00
|
|
|
if (error == 0)
|
2005-03-29 10:08:23 +00:00
|
|
|
return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
|
2008-09-24 18:51:33 +00:00
|
|
|
if (error == -1)
|
|
|
|
return (0);
|
|
|
|
return (error);
|
1997-08-26 07:32:51 +00:00
|
|
|
}
|
1999-10-03 12:18:29 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* XXX All of these sysctls would probably be more productive dead.
|
|
|
|
*/
|
2017-01-27 14:56:36 +00:00
|
|
|
static int __read_mostly disablecwd;
|
2002-03-05 15:38:49 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
|
|
|
|
"Disable the getcwd syscall");
|
1999-10-03 12:18:29 +00:00
|
|
|
|
2007-03-05 13:10:58 +00:00
|
|
|
/* Implementation of the getcwd syscall. */
|
1999-10-03 12:18:29 +00:00
|
|
|
int
|
2016-01-07 02:04:17 +00:00
|
|
|
sys___getcwd(struct thread *td, struct __getcwd_args *uap)
|
1999-10-03 12:18:29 +00:00
|
|
|
{
|
2003-03-17 12:21:08 +00:00
|
|
|
|
2015-04-21 13:55:24 +00:00
|
|
|
return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen,
|
|
|
|
MAXPATHLEN));
|
2003-03-17 12:21:08 +00:00
|
|
|
}
|
|
|
|
|
2002-09-02 22:40:30 +00:00
|
|
|
int
|
2017-04-06 23:40:51 +00:00
|
|
|
kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen,
|
|
|
|
size_t path_max)
|
2002-09-02 22:40:30 +00:00
|
|
|
{
|
|
|
|
char *bp, *tmpbuf;
|
1999-10-03 12:18:29 +00:00
|
|
|
struct filedesc *fdp;
|
2008-11-25 15:36:15 +00:00
|
|
|
struct vnode *cdir, *rdir;
|
2012-10-22 17:50:54 +00:00
|
|
|
int error;
|
1999-10-03 12:18:29 +00:00
|
|
|
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(disablecwd))
|
1999-10-03 12:18:29 +00:00
|
|
|
return (ENODEV);
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(buflen < 2))
|
1999-10-03 12:18:29 +00:00
|
|
|
return (EINVAL);
|
2015-04-21 13:55:24 +00:00
|
|
|
if (buflen > path_max)
|
|
|
|
buflen = path_max;
|
2005-03-30 02:59:32 +00:00
|
|
|
|
|
|
|
tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2008-11-25 15:36:15 +00:00
|
|
|
cdir = fdp->fd_cdir;
|
2016-12-12 19:16:35 +00:00
|
|
|
vrefact(cdir);
|
2008-11-25 15:36:15 +00:00
|
|
|
rdir = fdp->fd_rdir;
|
2016-12-12 19:16:35 +00:00
|
|
|
vrefact(rdir);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2008-11-25 15:36:15 +00:00
|
|
|
error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
|
|
|
|
vrele(rdir);
|
|
|
|
vrele(cdir);
|
2005-03-30 02:59:32 +00:00
|
|
|
|
|
|
|
if (!error) {
|
|
|
|
if (bufseg == UIO_SYSSPACE)
|
|
|
|
bcopy(bp, buf, strlen(bp) + 1);
|
|
|
|
else
|
|
|
|
error = copyout(bp, buf, strlen(bp) + 1);
|
2009-03-20 10:47:16 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
if (KTRPOINT(curthread, KTR_NAMEI))
|
|
|
|
ktrnamei(bp);
|
|
|
|
#endif
|
2005-03-30 02:59:32 +00:00
|
|
|
}
|
2005-01-24 10:24:12 +00:00
|
|
|
free(tmpbuf, M_TEMP);
|
|
|
|
return (error);
|
1999-10-03 12:18:29 +00:00
|
|
|
}
|
|
|
|
|
2000-04-26 11:57:45 +00:00
|
|
|
/*
|
|
|
|
* Thus begins the fullpath magic.
|
|
|
|
*/
|
|
|
|
|
2017-01-27 14:56:36 +00:00
|
|
|
static int __read_mostly disablefullpath;
|
2002-03-05 15:38:49 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
|
2010-11-14 16:10:15 +00:00
|
|
|
"Disable the vn_fullpath function");
|
2000-04-26 11:57:45 +00:00
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Retrieve the full filesystem path that correspond to a vnode from the name
|
|
|
|
* cache (if available)
|
|
|
|
*/
|
2000-04-26 11:57:45 +00:00
|
|
|
int
|
2001-10-21 15:52:51 +00:00
|
|
|
vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
|
|
|
|
{
|
2005-03-30 02:59:32 +00:00
|
|
|
char *buf;
|
2000-04-26 11:57:45 +00:00
|
|
|
struct filedesc *fdp;
|
2008-11-25 15:36:15 +00:00
|
|
|
struct vnode *rdir;
|
2012-10-22 17:50:54 +00:00
|
|
|
int error;
|
2000-04-26 11:57:45 +00:00
|
|
|
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(disablefullpath))
|
2000-04-26 11:57:45 +00:00
|
|
|
return (ENODEV);
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(vn == NULL))
|
2000-04-26 11:57:45 +00:00
|
|
|
return (EINVAL);
|
2005-03-30 02:59:32 +00:00
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
|
2001-10-21 15:52:51 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2008-11-25 15:36:15 +00:00
|
|
|
rdir = fdp->fd_rdir;
|
2017-01-30 03:20:05 +00:00
|
|
|
vrefact(rdir);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2008-11-25 15:36:15 +00:00
|
|
|
error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
|
|
|
|
vrele(rdir);
|
2005-03-30 02:59:32 +00:00
|
|
|
|
|
|
|
if (!error)
|
2008-07-31 16:57:41 +00:00
|
|
|
*freebuf = buf;
|
|
|
|
else
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is similar to vn_fullpath, but it attempts to lookup the
|
|
|
|
* pathname relative to the global root mount point. This is required for the
|
|
|
|
* auditing sub-system, as audited pathnames must be absolute, relative to the
|
|
|
|
* global root mount point.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vn_fullpath_global(struct thread *td, struct vnode *vn,
|
|
|
|
char **retbuf, char **freebuf)
|
|
|
|
{
|
|
|
|
char *buf;
|
|
|
|
int error;
|
|
|
|
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(disablefullpath))
|
2008-07-31 16:57:41 +00:00
|
|
|
return (ENODEV);
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(vn == NULL))
|
2008-07-31 16:57:41 +00:00
|
|
|
return (EINVAL);
|
|
|
|
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
|
|
|
|
error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
|
|
|
|
if (!error)
|
2005-03-30 02:59:32 +00:00
|
|
|
*freebuf = buf;
|
|
|
|
else
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2009-05-31 14:57:43 +00:00
|
|
|
int
|
2009-06-21 19:21:01 +00:00
|
|
|
vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
|
2008-12-12 00:57:38 +00:00
|
|
|
{
|
|
|
|
struct vnode *dvp;
|
2009-05-31 14:57:43 +00:00
|
|
|
struct namecache *ncp;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct mtx *vlp;
|
2012-10-22 17:50:54 +00:00
|
|
|
int error;
|
2008-12-12 00:57:38 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
vlp = VP2VNODELOCK(*vp);
|
|
|
|
mtx_lock(vlp);
|
2009-05-31 14:57:43 +00:00
|
|
|
TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
|
|
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ncp != NULL) {
|
|
|
|
if (*buflen < ncp->nc_nlen) {
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(*vp);
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numfullpathfail4, 1);
|
2009-05-31 14:57:43 +00:00
|
|
|
error = ENOMEM;
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return, error,
|
|
|
|
vp, NULL);
|
2009-05-31 14:57:43 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
*buflen -= ncp->nc_nlen;
|
2017-09-10 11:17:32 +00:00
|
|
|
memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp,
|
2017-09-10 11:17:32 +00:00
|
|
|
ncp->nc_name, vp);
|
2011-11-19 07:50:49 +00:00
|
|
|
dvp = *vp;
|
2009-05-31 14:57:43 +00:00
|
|
|
*vp = ncp->nc_dvp;
|
2011-11-19 07:50:49 +00:00
|
|
|
vref(*vp);
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(dvp);
|
2009-05-31 14:57:43 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE1(vfs, namecache, fullpath, miss, vp);
|
2009-05-31 14:57:43 +00:00
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2008-12-12 00:57:38 +00:00
|
|
|
vn_lock(*vp, LK_SHARED | LK_RETRY);
|
2009-06-21 19:21:01 +00:00
|
|
|
error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
|
2011-11-19 07:50:49 +00:00
|
|
|
vput(*vp);
|
2008-12-12 00:57:38 +00:00
|
|
|
if (error) {
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numfullpathfail2, 1);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
|
2008-12-12 00:57:38 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2009-05-31 14:57:43 +00:00
|
|
|
|
2008-12-12 00:57:38 +00:00
|
|
|
*vp = dvp;
|
2011-11-19 07:50:49 +00:00
|
|
|
if (dvp->v_iflag & VI_DOOMED) {
|
2008-12-12 00:57:38 +00:00
|
|
|
/* forced unmount */
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(dvp);
|
2009-05-31 14:57:43 +00:00
|
|
|
error = ENOENT;
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
|
2009-05-31 14:57:43 +00:00
|
|
|
return (error);
|
2008-12-12 00:57:38 +00:00
|
|
|
}
|
2011-11-19 07:50:49 +00:00
|
|
|
/*
|
|
|
|
* *vp has its use count incremented still.
|
|
|
|
*/
|
2008-12-12 00:57:38 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2005-03-30 02:59:32 +00:00
|
|
|
/*
|
|
|
|
* The magic behind kern___getcwd() and vn_fullpath().
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
|
|
|
|
char *buf, char **retbuf, u_int buflen)
|
|
|
|
{
|
2012-10-22 17:50:54 +00:00
|
|
|
int error, slash_prefixed;
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
struct vnode *startvp = vp;
|
|
|
|
#endif
|
2011-11-19 07:50:49 +00:00
|
|
|
struct vnode *vp1;
|
2005-03-30 02:59:32 +00:00
|
|
|
|
2008-12-12 00:57:38 +00:00
|
|
|
buflen--;
|
2009-05-31 14:57:43 +00:00
|
|
|
buf[buflen] = '\0';
|
2005-03-30 02:59:32 +00:00
|
|
|
error = 0;
|
|
|
|
slash_prefixed = 0;
|
|
|
|
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numfullpathcalls, 1);
|
2011-11-19 07:50:49 +00:00
|
|
|
vref(vp);
|
2005-03-30 02:59:32 +00:00
|
|
|
if (vp->v_type != VDIR) {
|
2016-09-23 04:45:11 +00:00
|
|
|
error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
|
2009-05-31 14:57:43 +00:00
|
|
|
if (error)
|
Nul-terminate strings in the VFS name cache, which negligibly change
the size and cost of name cache entries, but make adding debugging
and tracing easier.
Add SDT DTrace probes for various namecache events:
vfs:namecache:enter:done - new entry in the name cache, passed parent
directory vnode pointer, name added to the cache, and child vnode
pointer.
vfs:namecache:enter_negative:done - new negative entry in the name cache,
passed parent vnode pointer, name added to the cache.
vfs:namecache:fullpath:enter - call to vn_fullpath1() is made, passed
the vnode to resolve to a name.
vfs:namecache:fullpath:hit - vn_fullpath1() successfully resolved a
search for the parent of an object using the namecache, passed the
discovered parent directory vnode pointer, name, and child vnode
pointer.
vfs:namecache:fullpath:miss - vn_fullpath1() failed to resolve a search
for the parent of an object using the namecache, passed the child
vnode pointer.
vfs:namecache:fullpath:return - vn_fullpath1() has completed, passed the
error number, and if that is zero, the vnode to resolve, and the
returned path.
vfs:namecache:lookup:hit - postive name cache entry hit, passed the
parent directory vnode pointer, name, and child vnode pointer.
vfs:namecache:lookup:hit_negative - negative name cache entry hit,
passed the parent directory vnode pointer and name.
vfs:namecache:lookup:miss - name cache miss, passed the parent directory
pointer and the full remaining component name (not terminated after the
cache miss component).
vfs:namecache:purge:done - name cache purge for a vnode, passed the vnode
pointer to purge.
vfs:namecache:purge_negative:done - name cache purge of negative entries
for children of a vnode, passed the vnode pointer to purge.
vfs:namecache:purgevfs - name cache purge for a mountpoint, passed the
mount pointer. Separate probes will also be invoked for each cache
entry zapped.
vfs:namecache:zap:done - name cache entry zapped, passed the parent
directory vnode pointer, name, and child vnode pointer.
vfs:namecache:zap_negative:done - negative name cache entry zapped,
passed the parent directory vnode pointer and name.
For any probes involving an extant name cache entry (enter, hit, zapp),
we use the nul-terminated string for the name component. For misses,
the remainder of the path, including later components, is provided as
an argument instead since there is no handy nul-terminated version of
the string around. This is arguably a bug.
MFC after: 1 month
Sponsored by: Google, Inc.
Reviewed by: jhb, kan, kib (earlier version)
2009-04-07 20:58:56 +00:00
|
|
|
return (error);
|
2009-06-05 16:44:42 +00:00
|
|
|
if (buflen == 0) {
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(vp);
|
2009-05-31 14:57:43 +00:00
|
|
|
return (ENOMEM);
|
2009-06-05 16:44:42 +00:00
|
|
|
}
|
2009-05-31 14:57:43 +00:00
|
|
|
buf[--buflen] = '/';
|
2005-03-30 02:59:32 +00:00
|
|
|
slash_prefixed = 1;
|
|
|
|
}
|
|
|
|
while (vp != rdir && vp != rootvnode) {
|
2016-11-07 10:55:56 +00:00
|
|
|
/*
|
|
|
|
* The vp vnode must be already fully constructed,
|
|
|
|
* since it is either found in namecache or obtained
|
|
|
|
* from VOP_VPTOCNP(). We may test for VV_ROOT safely
|
|
|
|
* without obtaining the vnode lock.
|
|
|
|
*/
|
|
|
|
if ((vp->v_vflag & VV_ROOT) != 0) {
|
|
|
|
vn_lock(vp, LK_RETRY | LK_SHARED);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With the vnode locked, check for races with
|
|
|
|
* unmount, forced or not. Note that we
|
|
|
|
* already verified that vp is not equal to
|
|
|
|
* the root vnode, which means that
|
|
|
|
* mnt_vnodecovered can be NULL only for the
|
|
|
|
* case of unmount.
|
|
|
|
*/
|
|
|
|
if ((vp->v_iflag & VI_DOOMED) != 0 ||
|
|
|
|
(vp1 = vp->v_mount->mnt_vnodecovered) == NULL ||
|
|
|
|
vp1->v_mountedhere != vp->v_mount) {
|
|
|
|
vput(vp);
|
2009-03-24 18:16:42 +00:00
|
|
|
error = ENOENT;
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return,
|
|
|
|
error, vp, NULL);
|
2005-03-30 02:59:32 +00:00
|
|
|
break;
|
2000-04-26 11:57:45 +00:00
|
|
|
}
|
2016-11-07 10:55:56 +00:00
|
|
|
|
2011-11-19 07:50:49 +00:00
|
|
|
vref(vp1);
|
2016-11-07 10:55:56 +00:00
|
|
|
vput(vp);
|
2011-11-19 07:50:49 +00:00
|
|
|
vp = vp1;
|
2000-04-26 11:57:45 +00:00
|
|
|
continue;
|
|
|
|
}
|
2008-12-12 00:57:38 +00:00
|
|
|
if (vp->v_type != VDIR) {
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(vp);
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numfullpathfail1, 1);
|
2005-03-30 02:59:32 +00:00
|
|
|
error = ENOTDIR;
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return,
|
|
|
|
error, vp, NULL);
|
2005-03-30 02:59:32 +00:00
|
|
|
break;
|
2000-04-26 11:57:45 +00:00
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen);
|
2009-05-31 14:57:43 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if (buflen == 0) {
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(vp);
|
2005-03-30 02:59:32 +00:00
|
|
|
error = ENOMEM;
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return, error,
|
|
|
|
startvp, NULL);
|
2005-03-30 02:59:32 +00:00
|
|
|
break;
|
2000-04-26 11:57:45 +00:00
|
|
|
}
|
2009-05-31 14:57:43 +00:00
|
|
|
buf[--buflen] = '/';
|
2000-04-26 11:57:45 +00:00
|
|
|
slash_prefixed = 1;
|
2005-03-30 02:59:32 +00:00
|
|
|
}
|
2009-05-31 14:57:43 +00:00
|
|
|
if (error)
|
2005-03-30 02:59:32 +00:00
|
|
|
return (error);
|
2000-04-26 11:57:45 +00:00
|
|
|
if (!slash_prefixed) {
|
2009-05-31 14:57:43 +00:00
|
|
|
if (buflen == 0) {
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(vp);
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numfullpathfail4, 1);
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
|
|
|
|
startvp, NULL);
|
2000-04-26 11:57:45 +00:00
|
|
|
return (ENOMEM);
|
2009-05-31 14:57:43 +00:00
|
|
|
}
|
|
|
|
buf[--buflen] = '/';
|
2000-04-26 11:57:45 +00:00
|
|
|
}
|
2016-01-21 01:04:03 +00:00
|
|
|
counter_u64_add(numfullpathfound, 1);
|
2011-11-19 07:50:49 +00:00
|
|
|
vrele(vp);
|
2005-03-30 02:59:32 +00:00
|
|
|
|
2015-09-28 12:14:16 +00:00
|
|
|
SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen);
|
2009-05-31 14:57:43 +00:00
|
|
|
*retbuf = buf + buflen;
|
2000-04-26 11:57:45 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2008-03-31 11:53:03 +00:00
|
|
|
|
2013-03-20 17:57:00 +00:00
|
|
|
struct vnode *
|
|
|
|
vn_dir_dd_ino(struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct namecache *ncp;
|
|
|
|
struct vnode *ddvp;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct mtx *vlp;
|
2013-03-20 17:57:00 +00:00
|
|
|
|
|
|
|
ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino");
|
2016-09-23 04:45:11 +00:00
|
|
|
vlp = VP2VNODELOCK(vp);
|
|
|
|
mtx_lock(vlp);
|
2013-03-20 17:57:00 +00:00
|
|
|
TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) {
|
|
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) != 0)
|
|
|
|
continue;
|
|
|
|
ddvp = ncp->nc_dvp;
|
2015-07-16 13:57:05 +00:00
|
|
|
vhold(ddvp);
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2015-07-16 13:57:05 +00:00
|
|
|
if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread))
|
2013-03-20 17:57:00 +00:00
|
|
|
return (NULL);
|
|
|
|
return (ddvp);
|
|
|
|
}
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2013-03-20 17:57:00 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2008-03-31 11:53:03 +00:00
|
|
|
int
|
|
|
|
vn_commname(struct vnode *vp, char *buf, u_int buflen)
|
|
|
|
{
|
|
|
|
struct namecache *ncp;
|
2016-09-23 04:45:11 +00:00
|
|
|
struct mtx *vlp;
|
2008-03-31 11:53:03 +00:00
|
|
|
int l;
|
|
|
|
|
2016-09-23 04:45:11 +00:00
|
|
|
vlp = VP2VNODELOCK(vp);
|
|
|
|
mtx_lock(vlp);
|
2009-03-29 21:25:40 +00:00
|
|
|
TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
|
|
|
|
if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
|
|
|
|
break;
|
|
|
|
if (ncp == NULL) {
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2008-03-31 11:53:03 +00:00
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
l = min(ncp->nc_nlen, buflen - 1);
|
2017-09-10 11:17:32 +00:00
|
|
|
memcpy(buf, ncp->nc_name, l);
|
2016-09-23 04:45:11 +00:00
|
|
|
mtx_unlock(vlp);
|
2008-03-31 11:53:03 +00:00
|
|
|
buf[l] = '\0';
|
|
|
|
return (0);
|
|
|
|
}
|
2012-01-15 12:08:20 +00:00
|
|
|
|
2012-01-20 20:02:01 +00:00
|
|
|
/* ABI compat shims for old kernel modules. */
|
|
|
|
#undef cache_enter
|
|
|
|
|
|
|
|
void cache_enter(struct vnode *dvp, struct vnode *vp,
|
|
|
|
struct componentname *cnp);
|
|
|
|
|
|
|
|
void
|
|
|
|
cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
|
|
|
|
{
|
|
|
|
|
2012-03-03 01:06:54 +00:00
|
|
|
cache_enter_time(dvp, vp, cnp, NULL, NULL);
|
2012-01-20 20:02:01 +00:00
|
|
|
}
|
|
|
|
|
2012-01-15 12:08:20 +00:00
|
|
|
/*
|
|
|
|
* This function updates path string to vnode's full global path
|
|
|
|
* and checks the size of the new path string against the pathlen argument.
|
|
|
|
*
|
2014-08-03 07:59:19 +00:00
|
|
|
* Requires a locked, referenced vnode.
|
2012-01-15 12:08:20 +00:00
|
|
|
* Vnode is re-locked on success or ENODEV, otherwise unlocked.
|
|
|
|
*
|
|
|
|
* If sysctl debug.disablefullpath is set, ENODEV is returned,
|
|
|
|
* vnode is left locked and path remain untouched.
|
|
|
|
*
|
|
|
|
* If vp is a directory, the call to vn_fullpath_global() always succeeds
|
2012-01-15 18:08:15 +00:00
|
|
|
* because it falls back to the ".." lookup if the namecache lookup fails.
|
2012-01-15 12:08:20 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path,
|
|
|
|
u_int pathlen)
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
struct vnode *vp1;
|
|
|
|
char *rpath, *fbuf;
|
2012-10-22 17:50:54 +00:00
|
|
|
int error;
|
2012-01-15 12:08:20 +00:00
|
|
|
|
|
|
|
ASSERT_VOP_ELOCKED(vp, __func__);
|
|
|
|
|
|
|
|
/* Return ENODEV if sysctl debug.disablefullpath==1 */
|
2016-12-29 16:35:49 +00:00
|
|
|
if (__predict_false(disablefullpath))
|
2012-01-15 12:08:20 +00:00
|
|
|
return (ENODEV);
|
|
|
|
|
|
|
|
/* Construct global filesystem path from vp. */
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
error = vn_fullpath_global(td, vp, &rpath, &fbuf);
|
|
|
|
|
|
|
|
if (error != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strlen(rpath) >= pathlen) {
|
|
|
|
vrele(vp);
|
|
|
|
error = ENAMETOOLONG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Re-lookup the vnode by path to detect a possible rename.
|
|
|
|
* As a side effect, the vnode is relocked.
|
|
|
|
* If vnode was renamed, return ENOENT.
|
|
|
|
*/
|
2012-10-22 17:50:54 +00:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
|
2012-01-15 12:08:20 +00:00
|
|
|
UIO_SYSSPACE, path, td);
|
|
|
|
error = namei(&nd);
|
|
|
|
if (error != 0) {
|
|
|
|
vrele(vp);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
NDFREE(&nd, NDF_ONLY_PNBUF);
|
|
|
|
vp1 = nd.ni_vp;
|
|
|
|
vrele(vp);
|
|
|
|
if (vp1 == vp)
|
|
|
|
strcpy(path, rpath);
|
|
|
|
else {
|
|
|
|
vput(vp1);
|
|
|
|
error = ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
free(fbuf, M_TEMP);
|
|
|
|
return (error);
|
|
|
|
}
|