Make various vnode related functions static

This commit is contained in:
Poul-Henning Kamp 2005-02-10 12:28:58 +00:00
parent 44dc16a986
commit 1ba212823f
3 changed files with 12 additions and 80 deletions

View File

@ -104,7 +104,7 @@ static int vfs_bio_clcheck(struct vnode *vp, int size,
daddr_t lblkno, daddr_t blkno);
static int flushbufqueues(int flushdeps);
static void buf_daemon(void);
void bremfreel(struct buf *bp);
static void bremfreel(struct buf *bp);
int vmiodirenable = TRUE;
SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
@ -674,7 +674,7 @@ bremfreef(struct buf *bp)
* Removes a buffer from the free list, must be called with the
* bqlock held.
*/
void
static void
bremfreel(struct buf *bp)
{
int s = splbio();
@ -2054,7 +2054,7 @@ buf_daemon()
* free up B_INVAL buffers instead of write them, which NFS is
* particularly sensitive to.
*/
int flushwithdeps = 0;
static int flushwithdeps = 0;
SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
0, "Number of buffers flushed with dependecies that require rollbacks");

View File

@ -90,7 +90,9 @@ static void syncer_shutdown(void *arg, int howto);
static int vtryrecycle(struct vnode *vp);
static void vx_lock(struct vnode *vp);
static void vx_unlock(struct vnode *vp);
static void vbusy(struct vnode *vp);
static void vdropl(struct vnode *vp);
static void vholdl(struct vnode *);
/*
* Enable Giant pushdown based on whether or not the vm is mpsafe in this
@ -1984,7 +1986,7 @@ vhold(struct vnode *vp)
VI_UNLOCK(vp);
}
void
static void
vholdl(struct vnode *vp)
{
@ -2006,9 +2008,8 @@ vdrop(struct vnode *vp)
VI_UNLOCK(vp);
}
void
vdropl(vp)
struct vnode *vp;
static void
vdropl(struct vnode *vp)
{
if (vp->v_holdcnt <= 0)
@ -2358,8 +2359,6 @@ vgonel(struct vnode *vp, struct thread *td)
*/
vp->v_vnlock = &vp->v_lock;
vp->v_op = &dead_vnodeops;
if (vp->v_pollinfo != NULL)
vn_pollgone(vp);
vp->v_tag = "none";
VI_UNLOCK(vp);
@ -2606,8 +2605,8 @@ vfs_sysctl(SYSCTL_HANDLER_ARGS)
return (EOPNOTSUPP);
}
SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
"Generic filesystem");
static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
vfs_sysctl, "Generic filesystem");
#if 1 || defined(COMPAT_PRELITE2)
@ -2851,7 +2850,7 @@ vfree(struct vnode *vp)
/*
* Opposite of vfree() - mark a vnode as in use.
*/
void
static void
vbusy(struct vnode *vp)
{
@ -2923,61 +2922,6 @@ vn_pollrecord(vp, td, events)
return 0;
}
/*
* Note the occurrence of an event. If the VN_POLLEVENT macro is used,
* it is possible for us to miss an event due to race conditions, but
* that condition is expected to be rare, so for the moment it is the
* preferred interface.
*/
void
vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
if (vp->v_pollinfo == NULL)
v_addpollinfo(vp);
mtx_lock(&vp->v_pollinfo->vpi_lock);
if (vp->v_pollinfo->vpi_events & events) {
/*
* We clear vpi_events so that we don't
* call selwakeup() twice if two events are
* posted before the polling process(es) is
* awakened. This also ensures that we take at
* most one selwakeup() if the polling process
* is no longer interested. However, it does
* mean that only one event can be noticed at
* a time. (Perhaps we should only clear those
* event bits which we note?) XXX
*/
vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */
vp->v_pollinfo->vpi_revents |= events;
selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
}
mtx_unlock(&vp->v_pollinfo->vpi_lock);
}
/*
* Wake up anyone polling on vp because it is being revoked.
* This depends on dead_poll() returning POLLHUP for correct
* behavior.
*/
void
vn_pollgone(vp)
struct vnode *vp;
{
mtx_lock(&vp->v_pollinfo->vpi_lock);
VN_KNOTE_LOCKED(vp, NOTE_REVOKE);
if (vp->v_pollinfo->vpi_events) {
vp->v_pollinfo->vpi_events = 0;
selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
}
mtx_unlock(&vp->v_pollinfo->vpi_lock);
}
/*
* Routine to create and manage a filesystem syncer vnode.
*/

View File

@ -190,13 +190,6 @@ struct xvnode {
#define xv_dev xv_un.xv_uns.xvu_dev
#define xv_ino xv_un.xv_uns.xvu_ino
#define VN_POLLEVENT(vp, events) \
do { \
if ((vp)->v_pollinfo != NULL && \
(vp)->v_pollinfo->vpi_events & (events)) \
vn_pollevent((vp), (events)); \
} while (0)
#define VN_KNOTE(vp, b, a) \
do { \
if ((vp)->v_pollinfo != NULL) \
@ -599,7 +592,6 @@ int vaccess_acl_posix1e(enum vtype type, uid_t file_uid,
void vattr_null(struct vattr *vap);
int vcount(struct vnode *vp);
void vdrop(struct vnode *);
void vdropl(struct vnode *);
int vfinddev(struct cdev *dev, struct vnode **vpp);
void vfs_add_vnodeops(const void *);
void vfs_rm_vnodeops(const void *);
@ -608,7 +600,6 @@ int vget(struct vnode *vp, int lockflag, struct thread *td);
void vgone(struct vnode *vp);
void vgonel(struct vnode *vp, struct thread *td);
void vhold(struct vnode *);
void vholdl(struct vnode *);
int vinvalbuf(struct vnode *vp, int save,
struct thread *td, int slpflag, int slptimeo);
int vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
@ -629,8 +620,6 @@ int debug_vn_lock(struct vnode *vp, int flags, struct thread *p,
int vn_open(struct nameidata *ndp, int *flagp, int cmode, int fdidx);
int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode,
struct ucred *cred, int fdidx);
void vn_pollevent(struct vnode *vp, int events);
void vn_pollgone(struct vnode *vp);
int vn_pollrecord(struct vnode *vp, struct thread *p, int events);
int vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base,
int len, off_t offset, enum uio_seg segflg, int ioflg,
@ -680,7 +669,6 @@ void vput(struct vnode *vp);
void vrele(struct vnode *vp);
void vref(struct vnode *vp);
int vrefcnt(struct vnode *vp);
void vbusy(struct vnode *vp);
void v_addpollinfo(struct vnode *vp);
int vnode_create_vobject(struct vnode *vp, size_t size, struct thread *td);