style(9)
This commit is contained in:
parent
4a1f529a56
commit
029839a449
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=175053
@ -216,7 +216,7 @@ ffs_syncvnode(struct vnode *vp, int waitfor)
|
||||
TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs)
|
||||
bp->b_vflags &= ~BV_SCANNED;
|
||||
TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
|
||||
/*
|
||||
/*
|
||||
* Reasons to skip this buffer: it has already been considered
|
||||
* on this pass, this pass is the first time through on a
|
||||
* synchronous flush request and the buffer being considered
|
||||
@ -270,7 +270,7 @@ ffs_syncvnode(struct vnode *vp, int waitfor)
|
||||
s = splbio();
|
||||
}
|
||||
} else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) {
|
||||
/*
|
||||
/*
|
||||
* If the buffer is for data that has been truncated
|
||||
* off the file, then throw it away.
|
||||
*/
|
||||
@ -283,7 +283,7 @@ ffs_syncvnode(struct vnode *vp, int waitfor)
|
||||
vfs_bio_awrite(bp);
|
||||
|
||||
/*
|
||||
* Since we may have slept during the I/O, we need
|
||||
* Since we may have slept during the I/O, we need
|
||||
* to start from a known point.
|
||||
*/
|
||||
VI_LOCK(vp);
|
||||
@ -302,7 +302,7 @@ ffs_syncvnode(struct vnode *vp, int waitfor)
|
||||
bufobj_wwait(&vp->v_bufobj, 3, 0);
|
||||
VI_UNLOCK(vp);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Ensure that any filesystem metatdata associated
|
||||
* with the vnode has been written.
|
||||
*/
|
||||
@ -351,7 +351,7 @@ ffs_lock(ap)
|
||||
int flags;
|
||||
struct lock *lkp;
|
||||
int result;
|
||||
|
||||
|
||||
switch (ap->a_flags & LK_TYPE_MASK) {
|
||||
case LK_SHARED:
|
||||
case LK_UPGRADE:
|
||||
@ -472,12 +472,12 @@ ffs_read(ap)
|
||||
/*
|
||||
* size of buffer. The buffer representing the
|
||||
* end of the file is rounded up to the size of
|
||||
* the block type ( fragment or full block,
|
||||
* the block type ( fragment or full block,
|
||||
* depending ).
|
||||
*/
|
||||
size = blksize(fs, ip, lbn);
|
||||
blkoffset = blkoff(fs, uio->uio_offset);
|
||||
|
||||
|
||||
/*
|
||||
* The amount we want to transfer in this iteration is
|
||||
* one FS block less the amount of the data before
|
||||
@ -501,7 +501,7 @@ ffs_read(ap)
|
||||
*/
|
||||
error = bread(vp, lbn, size, NOCRED, &bp);
|
||||
} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
|
||||
/*
|
||||
/*
|
||||
* Otherwise if we are allowed to cluster,
|
||||
* grab as much as we can.
|
||||
*
|
||||
@ -524,7 +524,7 @@ ffs_read(ap)
|
||||
size, &nextlbn, &nextsize, 1, NOCRED, &bp);
|
||||
} else {
|
||||
/*
|
||||
* Failing all of the above, just read what the
|
||||
* Failing all of the above, just read what the
|
||||
* user asked for. Interestingly, the same as
|
||||
* the first option above.
|
||||
*/
|
||||
@ -584,7 +584,7 @@ ffs_read(ap)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* This can only happen in the case of an error
|
||||
* because the loop above resets bp to NULL on each iteration
|
||||
* and on normal completion has not set a new value into it.
|
||||
@ -708,7 +708,7 @@ ffs_write(ap)
|
||||
if (uio->uio_offset + xfersize > ip->i_size)
|
||||
vnode_pager_setsize(vp, uio->uio_offset + xfersize);
|
||||
|
||||
/*
|
||||
/*
|
||||
* We must perform a read-before-write if the transfer size
|
||||
* does not cover the entire buffer.
|
||||
*/
|
||||
@ -753,7 +753,7 @@ ffs_write(ap)
|
||||
|
||||
/*
|
||||
* If IO_SYNC each buffer is written synchronously. Otherwise
|
||||
* if we have a severe page deficiency write the buffer
|
||||
* if we have a severe page deficiency write the buffer
|
||||
* asynchronously. Otherwise try to cluster, and if that
|
||||
* doesn't do it then either do an async write (if O_DIRECT),
|
||||
* or a delayed write (if not).
|
||||
@ -889,12 +889,12 @@ ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
|
||||
/*
|
||||
* size of buffer. The buffer representing the
|
||||
* end of the file is rounded up to the size of
|
||||
* the block type ( fragment or full block,
|
||||
* the block type ( fragment or full block,
|
||||
* depending ).
|
||||
*/
|
||||
size = sblksize(fs, dp->di_extsize, lbn);
|
||||
blkoffset = blkoff(fs, uio->uio_offset);
|
||||
|
||||
|
||||
/*
|
||||
* The amount we want to transfer in this iteration is
|
||||
* one FS block less the amount of the data before
|
||||
@ -985,7 +985,7 @@ ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* This can only happen in the case of an error
|
||||
* because the loop above resets bp to NULL on each iteration
|
||||
* and on normal completion has not set a new value into it.
|
||||
@ -1056,7 +1056,7 @@ ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
|
||||
if (uio->uio_resid < xfersize)
|
||||
xfersize = uio->uio_resid;
|
||||
|
||||
/*
|
||||
/*
|
||||
* We must perform a read-before-write if the transfer size
|
||||
* does not cover the entire buffer.
|
||||
*/
|
||||
@ -1096,7 +1096,7 @@ ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
|
||||
|
||||
/*
|
||||
* If IO_SYNC each buffer is written synchronously. Otherwise
|
||||
* if we have a severe page deficiency write the buffer
|
||||
* if we have a severe page deficiency write the buffer
|
||||
* asynchronously. Otherwise try to cluster, and if that
|
||||
* doesn't do it then either do an async write (if O_DIRECT),
|
||||
* or a delayed write (if not).
|
||||
|
Loading…
Reference in New Issue
Block a user