freebsd-dev/sys/kern/vnode_if.src

799 lines
13 KiB
Plaintext
Raw Normal View History

#-
1994-05-24 10:09:53 +00:00
# Copyright (c) 1992, 1993
# The Regents of the University of California. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
1994-05-24 10:09:53 +00:00
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# @(#)vnode_if.src 8.12 (Berkeley) 5/14/95
1999-08-28 01:08:13 +00:00
# $FreeBSD$
#
#
# Above each of the vop descriptors in lines starting with %%
# is a specification of the locking protocol used by each vop call.
# The first column is the name of the variable, the remaining three
# columns are in, out and error respectively. The "in" column defines
# the lock state on input, the "out" column defines the state on successful
# return, and the "error" column defines the locking state on error exit.
#
# The locking value can take the following values:
# L: locked; not converted to type of lock.
# E: locked with exclusive lock for this process.
# U: unlocked.
# -: not applicable. vnode does not yet (or no longer) exists.
# =: the same on input and output, may be either L or U.
#
# The paramater named "vpp" is assumed to be always used with double
# indirection (**vpp) and that name is hard-coded in vnode_if.awk !
#
# Lines starting with %! specify a pre or post-condition function
# to call before/after the vop call.
#
# If other such parameters are introduced, they have to be added to
# the AWK script at the head of the definition of "add_debug_code()".
#
vop_islocked {
IN struct vnode *vp;
};
%% lookup dvp L L L
%% lookup vpp - L -
# XXX - the lookup locking protocol defies simple description and depends
# on the flags and operation fields in the (cnp) structure. Note
# especially that *vpp may equal dvp and both may be locked.
1994-05-24 10:09:53 +00:00
vop_lookup {
IN struct vnode *dvp;
INOUT struct vnode **vpp;
IN struct componentname *cnp;
};
%% cachedlookup dvp L L L
%% cachedlookup vpp - L -
# This must be an exact copy of lookup. See kern/vfs_cache.c for details.
vop_cachedlookup {
IN struct vnode *dvp;
INOUT struct vnode **vpp;
IN struct componentname *cnp;
};
%% create dvp E E E
%% create vpp - L -
%! create pre vop_create_pre
%! create post vop_create_post
1994-05-24 10:09:53 +00:00
vop_create {
IN struct vnode *dvp;
1994-05-24 10:09:53 +00:00
OUT struct vnode **vpp;
IN struct componentname *cnp;
IN struct vattr *vap;
};
%% whiteout dvp E E E
%! whiteout pre vop_whiteout_pre
%! whiteout post vop_whiteout_post
vop_whiteout {
IN struct vnode *dvp;
IN struct componentname *cnp;
IN int flags;
};
%% mknod dvp E E E
%% mknod vpp - L -
%! mknod pre vop_mknod_pre
%! mknod post vop_mknod_post
1994-05-24 10:09:53 +00:00
vop_mknod {
IN struct vnode *dvp;
1999-11-12 03:34:28 +00:00
OUT struct vnode **vpp;
1994-05-24 10:09:53 +00:00
IN struct componentname *cnp;
IN struct vattr *vap;
};
%% open vp L L L
%! open post vop_open_post
1994-05-24 10:09:53 +00:00
vop_open {
IN struct vnode *vp;
IN int mode;
IN struct ucred *cred;
IN struct thread *td;
IN struct file *fp;
1994-05-24 10:09:53 +00:00
};
%% close vp L L L
%! close post vop_close_post
1994-05-24 10:09:53 +00:00
vop_close {
IN struct vnode *vp;
IN int fflag;
IN struct ucred *cred;
IN struct thread *td;
1994-05-24 10:09:53 +00:00
};
%% fplookup_vexec vp - - -
%! fplookup_vexec debugpre vop_fplookup_vexec_debugpre
%! fplookup_vexec debugpost vop_fplookup_vexec_debugpost
vop_fplookup_vexec {
IN struct vnode *vp;
IN struct ucred *cred;
};
%% access vp L L L
1994-05-24 10:09:53 +00:00
vop_access {
IN struct vnode *vp;
IN accmode_t accmode;
IN struct ucred *cred;
IN struct thread *td;
};
%% accessx vp L L L
vop_accessx {
IN struct vnode *vp;
IN accmode_t accmode;
1994-05-24 10:09:53 +00:00
IN struct ucred *cred;
IN struct thread *td;
1994-05-24 10:09:53 +00:00
};
%% stat vp L L L
vop_stat {
IN struct vnode *vp;
OUT struct stat *sb;
IN struct ucred *active_cred;
IN struct ucred *file_cred;
IN struct thread *td;
};
%% getattr vp L L L
1994-05-24 10:09:53 +00:00
vop_getattr {
IN struct vnode *vp;
OUT struct vattr *vap;
1994-05-24 10:09:53 +00:00
IN struct ucred *cred;
};
%% setattr vp E E E
%! setattr pre vop_setattr_pre
%! setattr post vop_setattr_post
1994-05-24 10:09:53 +00:00
vop_setattr {
IN struct vnode *vp;
IN struct vattr *vap;
IN struct ucred *cred;
};
%% mmapped vp L L L
vop_mmapped {
IN struct vnode *vp;
};
%% read vp L L L
%! read post vop_read_post
1994-05-24 10:09:53 +00:00
vop_read {
IN struct vnode *vp;
INOUT struct uio *uio;
IN int ioflag;
IN struct ucred *cred;
};
%% write vp L L L
%! write pre VOP_WRITE_PRE
%! write post VOP_WRITE_POST
1994-05-24 10:09:53 +00:00
vop_write {
IN struct vnode *vp;
INOUT struct uio *uio;
IN int ioflag;
IN struct ucred *cred;
};
%% ioctl vp U U U
1994-05-24 10:09:53 +00:00
vop_ioctl {
IN struct vnode *vp;
IN u_long command;
2005-12-14 00:49:52 +00:00
IN void *data;
1994-05-24 10:09:53 +00:00
IN int fflag;
IN struct ucred *cred;
IN struct thread *td;
1994-05-24 10:09:53 +00:00
};
%% poll vp U U U
1997-09-14 02:35:25 +00:00
vop_poll {
1994-05-24 10:09:53 +00:00
IN struct vnode *vp;
1997-09-14 02:35:25 +00:00
IN int events;
1994-05-24 10:09:53 +00:00
IN struct ucred *cred;
IN struct thread *td;
1994-05-24 10:09:53 +00:00
};
%% kqfilter vp U U U
vop_kqfilter {
IN struct vnode *vp;
IN struct knote *kn;
};
%% revoke vp L L L
vop_revoke {
IN struct vnode *vp;
IN int flags;
};
%% fsync vp L L L
1994-05-24 10:09:53 +00:00
vop_fsync {
IN struct vnode *vp;
IN int waitfor;
IN struct thread *td;
1994-05-24 10:09:53 +00:00
};
%% remove dvp E E E
%% remove vp E E E
%! remove pre vop_remove_pre
%! remove post vop_remove_post
1994-05-24 10:09:53 +00:00
vop_remove {
IN struct vnode *dvp;
IN struct vnode *vp;
1994-05-24 10:09:53 +00:00
IN struct componentname *cnp;
};
%% link tdvp E E E
%% link vp E E E
%! link pre vop_link_pre
%! link post vop_link_post
vop_link {
IN struct vnode *tdvp;
IN struct vnode *vp;
IN struct componentname *cnp;
};
1994-05-24 10:09:53 +00:00
%! rename pre vop_rename_pre
%! rename post vop_rename_post
1994-05-24 10:09:53 +00:00
vop_rename {
IN WILLRELE struct vnode *fdvp;
IN WILLRELE struct vnode *fvp;
IN struct componentname *fcnp;
IN WILLRELE struct vnode *tdvp;
IN WILLRELE struct vnode *tvp;
IN struct componentname *tcnp;
};
%% mkdir dvp E E E
%% mkdir vpp - E -
%! mkdir pre vop_mkdir_pre
%! mkdir post vop_mkdir_post
1994-05-24 10:09:53 +00:00
vop_mkdir {
IN struct vnode *dvp;
1994-05-24 10:09:53 +00:00
OUT struct vnode **vpp;
IN struct componentname *cnp;
IN struct vattr *vap;
};
%% rmdir dvp E E E
%% rmdir vp E E E
%! rmdir pre vop_rmdir_pre
%! rmdir post vop_rmdir_post
1994-05-24 10:09:53 +00:00
vop_rmdir {
IN struct vnode *dvp;
IN struct vnode *vp;
1994-05-24 10:09:53 +00:00
IN struct componentname *cnp;
};
%% symlink dvp E E E
%% symlink vpp - E -
%! symlink pre vop_symlink_pre
%! symlink post vop_symlink_post
1994-05-24 10:09:53 +00:00
vop_symlink {
IN struct vnode *dvp;
OUT struct vnode **vpp;
1994-05-24 10:09:53 +00:00
IN struct componentname *cnp;
IN struct vattr *vap;
IN const char *target;
1994-05-24 10:09:53 +00:00
};
%% readdir vp L L L
%! readdir post vop_readdir_post
1994-05-24 10:09:53 +00:00
vop_readdir {
IN struct vnode *vp;
INOUT struct uio *uio;
IN struct ucred *cred;
INOUT int *eofflag;
OUT int *ncookies;
INOUT u_long **cookies;
1994-05-24 10:09:53 +00:00
};
%% readlink vp L L L
1994-05-24 10:09:53 +00:00
vop_readlink {
IN struct vnode *vp;
INOUT struct uio *uio;
IN struct ucred *cred;
};
%% inactive vp E E E
1994-05-24 10:09:53 +00:00
vop_inactive {
IN struct vnode *vp;
IN struct thread *td;
1994-05-24 10:09:53 +00:00
};
%! need_inactive debugpre vop_need_inactive_debugpre
%! need_inactive debugpost vop_need_inactive_debugpost
vop_need_inactive {
IN struct vnode *vp;
};
%% reclaim vp E E E
%! reclaim post vop_reclaim_post
1994-05-24 10:09:53 +00:00
vop_reclaim {
IN struct vnode *vp;
};
%! lock1 debugpre vop_lock_debugpre
%! lock1 debugpost vop_lock_debugpost
vop_lock1 {
1994-05-24 10:09:53 +00:00
IN struct vnode *vp;
IN int flags;
IN const char *file;
IN int line;
1994-05-24 10:09:53 +00:00
};
%! unlock debugpre vop_unlock_debugpre
1994-05-24 10:09:53 +00:00
vop_unlock {
IN struct vnode *vp;
};
%% bmap vp L L L
1994-05-24 10:09:53 +00:00
vop_bmap {
IN struct vnode *vp;
IN daddr_t bn;
OUT struct bufobj **bop;
IN daddr_t *bnp;
1994-05-24 10:09:53 +00:00
OUT int *runp;
OUT int *runb;
1994-05-24 10:09:53 +00:00
};
%% strategy vp L L L
%! strategy debugpre vop_strategy_debugpre
vop_strategy {
IN struct vnode *vp;
IN struct buf *bp;
};
1994-05-24 10:09:53 +00:00
%% getwritemount vp = = =
vop_getwritemount {
IN struct vnode *vp;
OUT struct mount **mpp;
};
%% print vp - - -
1994-05-24 10:09:53 +00:00
vop_print {
IN struct vnode *vp;
};
%% pathconf vp L L L
1994-05-24 10:09:53 +00:00
vop_pathconf {
IN struct vnode *vp;
IN int name;
OUT long *retval;
1994-05-24 10:09:53 +00:00
};
%% advlock vp U U U
1994-05-24 10:09:53 +00:00
vop_advlock {
IN struct vnode *vp;
2005-12-14 00:49:52 +00:00
IN void *id;
1994-05-24 10:09:53 +00:00
IN int op;
IN struct flock *fl;
IN int flags;
};
Add the new kernel-mode NFS Lock Manager. To use it instead of the user-mode lock manager, build a kernel with the NFSLOCKD option and add '-k' to 'rpc_lockd_flags' in rc.conf. Highlights include: * Thread-safe kernel RPC client - many threads can use the same RPC client handle safely with replies being de-multiplexed at the socket upcall (typically driven directly by the NIC interrupt) and handed off to whichever thread matches the reply. For UDP sockets, many RPC clients can share the same socket. This allows the use of a single privileged UDP port number to talk to an arbitrary number of remote hosts. * Single-threaded kernel RPC server. Adding support for multi-threaded server would be relatively straightforward and would follow approximately the Solaris KPI. A single thread should be sufficient for the NLM since it should rarely block in normal operation. * Kernel mode NLM server supporting cancel requests and granted callbacks. I've tested the NLM server reasonably extensively - it passes both my own tests and the NFS Connectathon locking tests running on Solaris, Mac OS X and Ubuntu Linux. * Userland NLM client supported. While the NLM server doesn't have support for the local NFS client's locking needs, it does have to field async replies and granted callbacks from remote NLMs that the local client has contacted. We relay these replies to the userland rpc.lockd over a local domain RPC socket. * Robust deadlock detection for the local lock manager. In particular it will detect deadlocks caused by a lock request that covers more than one blocking request. As required by the NLM protocol, all deadlock detection happens synchronously - a user is guaranteed that if a lock request isn't rejected immediately, the lock will eventually be granted. The old system allowed for a 'deferred deadlock' condition where a blocked lock request could wake up and find that some other deadlock-causing lock owner had beaten them to the lock. * Since both local and remote locks are managed by the same kernel locking code, local and remote processes can safely use file locks for mutual exclusion. Local processes have no fairness advantage compared to remote processes when contending to lock a region that has just been unlocked - the local lock manager enforces a strict first-come first-served model for both local and remote lockers. Sponsored by: Isilon Systems PR: 95247 107555 115524 116679 MFC after: 2 weeks
2008-03-26 15:23:12 +00:00
%% advlockasync vp U U U
vop_advlockasync {
IN struct vnode *vp;
IN void *id;
IN int op;
IN struct flock *fl;
IN int flags;
IN struct task *task;
INOUT void **cookiep;
};
%% advlockpurge vp E E E
vop_advlockpurge {
IN struct vnode *vp;
};
%% reallocblks vp E E E
1994-05-24 10:09:53 +00:00
vop_reallocblks {
IN struct vnode *vp;
IN struct cluster_save *buflist;
};
%% getpages vp L L L
vop_getpages {
IN struct vnode *vp;
IN vm_page_t *m;
IN int count;
IN int *rbehind;
IN int *rahead;
};
%% getpages_async vp L L L
vop_getpages_async {
IN struct vnode *vp;
IN vm_page_t *m;
IN int count;
IN int *rbehind;
IN int *rahead;
IN vop_getpages_iodone_t *iodone;
IN void *arg;
};
%% putpages vp L L L
vop_putpages {
IN struct vnode *vp;
IN vm_page_t *m;
IN int count;
IN int sync;
IN int *rtvals;
};
%% getacl vp L L L
vop_getacl {
IN struct vnode *vp;
IN acl_type_t type;
OUT struct acl *aclp;
IN struct ucred *cred;
IN struct thread *td;
};
%% setacl vp E E E
%! setacl pre vop_setacl_pre
%! setacl post vop_setacl_post
vop_setacl {
IN struct vnode *vp;
IN acl_type_t type;
IN struct acl *aclp;
IN struct ucred *cred;
IN struct thread *td;
};
%% aclcheck vp = = =
vop_aclcheck {
IN struct vnode *vp;
IN acl_type_t type;
IN struct acl *aclp;
IN struct ucred *cred;
IN struct thread *td;
};
%% closeextattr vp L L L
vop_closeextattr {
IN struct vnode *vp;
IN int commit;
IN struct ucred *cred;
IN struct thread *td;
};
%% getextattr vp L L L
vop_getextattr {
IN struct vnode *vp;
IN int attrnamespace;
IN const char *name;
INOUT struct uio *uio;
OUT size_t *size;
IN struct ucred *cred;
IN struct thread *td;
};
%% listextattr vp L L L
vop_listextattr {
IN struct vnode *vp;
IN int attrnamespace;
INOUT struct uio *uio;
OUT size_t *size;
IN struct ucred *cred;
IN struct thread *td;
};
%% openextattr vp L L L
vop_openextattr {
IN struct vnode *vp;
IN struct ucred *cred;
IN struct thread *td;
};
%% deleteextattr vp E E E
%! deleteextattr pre vop_deleteextattr_pre
%! deleteextattr post vop_deleteextattr_post
vop_deleteextattr {
IN struct vnode *vp;
IN int attrnamespace;
IN const char *name;
IN struct ucred *cred;
IN struct thread *td;
};
%% setextattr vp E E E
%! setextattr pre vop_setextattr_pre
%! setextattr post vop_setextattr_post
vop_setextattr {
IN struct vnode *vp;
IN int attrnamespace;
IN const char *name;
INOUT struct uio *uio;
IN struct ucred *cred;
IN struct thread *td;
};
%% setlabel vp E E E
vop_setlabel {
IN struct vnode *vp;
IN struct label *label;
IN struct ucred *cred;
IN struct thread *td;
};
%% vptofh vp = = =
vop_vptofh {
IN struct vnode *vp;
IN struct fid *fhp;
};
%% vptocnp vp L L L
%% vptocnp vpp - U -
vop_vptocnp {
IN struct vnode *vp;
OUT struct vnode **vpp;
IN struct ucred *cred;
INOUT char *buf;
INOUT size_t *buflen;
};
%% allocate vp E E E
vop_allocate {
IN struct vnode *vp;
INOUT off_t *offset;
INOUT off_t *len;
};
Add the posix_fadvise(2) system call. It is somewhat similar to madvise(2) except that it operates on a file descriptor instead of a memory region. It is currently only supported on regular files. Just as with madvise(2), the advice given to posix_fadvise(2) can be divided into two types. The first type provide hints about data access patterns and are used in the file read and write routines to modify the I/O flags passed down to VOP_READ() and VOP_WRITE(). These modes are thus filesystem independent. Note that to ease implementation (and since this API is only advisory anyway), only a single non-normal range is allowed per file descriptor. The second type of hints are used to hint to the OS that data will or will not be used. These hints are implemented via a new VOP_ADVISE(). A default implementation is provided which does nothing for the WILLNEED request and attempts to move any clean pages to the cache page queue for the DONTNEED request. This latter case required two other changes. First, a new V_CLEANONLY flag was added to vinvalbuf(). This requests vinvalbuf() to only flush clean buffers for the vnode from the buffer cache and to not remove any backing pages from the vnode. This is used to ensure clean pages are not wired into the buffer cache before attempting to move them to the cache page queue. The second change adds a new vm_object_page_cache() method. This method is somewhat similar to vm_object_page_remove() except that instead of freeing each page in the specified range, it attempts to move clean pages to the cache queue if possible. To preserve the ABI of struct file, the f_cdevpriv pointer is now reused in a union to point to the currently active advice region if one is present for regular files. Reviewed by: jilles, kib, arch@ Approved by: re (kib) MFC after: 1 month
2011-11-04 04:02:50 +00:00
Add the posix_fadvise(2) system call. It is somewhat similar to madvise(2) except that it operates on a file descriptor instead of a memory region. It is currently only supported on regular files. Just as with madvise(2), the advice given to posix_fadvise(2) can be divided into two types. The first type provide hints about data access patterns and are used in the file read and write routines to modify the I/O flags passed down to VOP_READ() and VOP_WRITE(). These modes are thus filesystem independent. Note that to ease implementation (and since this API is only advisory anyway), only a single non-normal range is allowed per file descriptor. The second type of hints are used to hint to the OS that data will or will not be used. These hints are implemented via a new VOP_ADVISE(). A default implementation is provided which does nothing for the WILLNEED request and attempts to move any clean pages to the cache page queue for the DONTNEED request. This latter case required two other changes. First, a new V_CLEANONLY flag was added to vinvalbuf(). This requests vinvalbuf() to only flush clean buffers for the vnode from the buffer cache and to not remove any backing pages from the vnode. This is used to ensure clean pages are not wired into the buffer cache before attempting to move them to the cache page queue. The second change adds a new vm_object_page_cache() method. This method is somewhat similar to vm_object_page_remove() except that instead of freeing each page in the specified range, it attempts to move clean pages to the cache queue if possible. To preserve the ABI of struct file, the f_cdevpriv pointer is now reused in a union to point to the currently active advice region if one is present for regular files. Reviewed by: jilles, kib, arch@ Approved by: re (kib) MFC after: 1 month
2011-11-04 04:02:50 +00:00
%% advise vp U U U
vop_advise {
IN struct vnode *vp;
IN off_t start;
IN off_t end;
IN int advice;
};
%% unp_bind vp E E E
vop_unp_bind {
IN struct vnode *vp;
IN struct unpcb *unpcb;
};
%% unp_connect vp L L L
vop_unp_connect {
IN struct vnode *vp;
OUT struct unpcb **unpcb;
};
%% unp_detach vp = = =
vop_unp_detach {
IN struct vnode *vp;
};
%% is_text vp L L L
vop_is_text {
IN struct vnode *vp;
};
Switch to use shared vnode locks for text files during image activation. kern_execve() locks text vnode exclusive to be able to set and clear VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0 condition. The change removes VV_TEXT, replacing it with the condition v_writecount <= -1, and puts v_writecount under the vnode interlock. Each text reference decrements v_writecount. To clear the text reference when the segment is unmapped, it is recorded in the vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and v_writecount is incremented on the map entry removal The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that v_writecount does not contradict the desired change. vn_writecheck() is now racy and its use was eliminated everywhere except access. Atomic check for writeability and increment of v_writecount is performed by the VOP. vn_truncate() now increments v_writecount around VOP_SETATTR() call, lack of which is arguably a bug on its own. nullfs bypasses v_writecount to the lower vnode always, so nullfs vnode has its own v_writecount correct, and lower vnode gets all references, since object->handle is always lower vnode. On the text vnode' vm object dealloc, the v_writecount value is reset to zero, and deadfs vop_unset_text short-circuit the operation. Reclamation of lowervp always reclaims all nullfs vnodes referencing lowervp first, so no stray references are left. Reviewed by: markj, trasz Tested by: mjg, pho Sponsored by: The FreeBSD Foundation MFC after: 1 month Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
%% set_text vp = = =
vop_set_text {
IN struct vnode *vp;
};
%% vop_unset_text vp L L L
vop_unset_text {
IN struct vnode *vp;
};
Switch to use shared vnode locks for text files during image activation. kern_execve() locks text vnode exclusive to be able to set and clear VV_TEXT flag. VV_TEXT is mutually exclusive with the v_writecount > 0 condition. The change removes VV_TEXT, replacing it with the condition v_writecount <= -1, and puts v_writecount under the vnode interlock. Each text reference decrements v_writecount. To clear the text reference when the segment is unmapped, it is recorded in the vm_map_entry backed by the text file as MAP_ENTRY_VN_TEXT flag, and v_writecount is incremented on the map entry removal The operations like VOP_ADD_WRITECOUNT() and VOP_SET_TEXT() check that v_writecount does not contradict the desired change. vn_writecheck() is now racy and its use was eliminated everywhere except access. Atomic check for writeability and increment of v_writecount is performed by the VOP. vn_truncate() now increments v_writecount around VOP_SETATTR() call, lack of which is arguably a bug on its own. nullfs bypasses v_writecount to the lower vnode always, so nullfs vnode has its own v_writecount correct, and lower vnode gets all references, since object->handle is always lower vnode. On the text vnode' vm object dealloc, the v_writecount value is reset to zero, and deadfs vop_unset_text short-circuit the operation. Reclamation of lowervp always reclaims all nullfs vnodes referencing lowervp first, so no stray references are left. Reviewed by: markj, trasz Tested by: mjg, pho Sponsored by: The FreeBSD Foundation MFC after: 1 month Differential revision: https://reviews.freebsd.org/D19923
2019-05-05 11:20:43 +00:00
%% add_writecount vp L L L
vop_add_writecount {
IN struct vnode *vp;
IN int inc;
};
%% fdatasync vp L L L
vop_fdatasync {
IN struct vnode *vp;
IN struct thread *td;
};
%% copy_file_range invp U U U
%% copy_file_range outvp U U U
vop_copy_file_range {
IN struct vnode *invp;
INOUT off_t *inoffp;
IN struct vnode *outvp;
INOUT off_t *outoffp;
INOUT size_t *lenp;
IN unsigned int flags;
IN struct ucred *incred;
IN struct ucred *outcred;
IN struct thread *fsizetd;
};
# The VOPs below are spares at the end of the table to allow new VOPs to be
# added in stable branches without breaking the KBI. New VOPs in HEAD should
# be added above these spares. When merging a new VOP to a stable branch,
# the new VOP should replace one of the spares.
vop_spare1 {
IN struct vnode *vp;
};
vop_spare2 {
IN struct vnode *vp;
};
vop_spare3 {
IN struct vnode *vp;
};
vop_spare4 {
IN struct vnode *vp;
};
vop_spare5 {
IN struct vnode *vp;
};