Import 4.4BSD-Lite2 onto the vendor branch, note that in the kernel, all

files are off the vendor branch, so this should not change anything.

A "U" marker generally means that the file was not changed in between
the 4.4Lite and Lite-2 releases, and does not need a merge.  "C" generally
means that there was a change.
This commit is contained in:
peter 1996-03-11 19:47:21 +00:00
parent aabcb633d0
commit 76d9a2d652
40 changed files with 2274 additions and 1744 deletions

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94
* @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
*/
#include <sys/param.h>
@ -52,16 +52,18 @@
extern u_long nextgennumber;
static daddr_t ffs_alloccg __P((struct inode *, int, daddr_t, int));
static daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, daddr_t));
static daddr_t ffs_clusteralloc __P((struct inode *, int, daddr_t, int));
static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int));
static ufs_daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, ufs_daddr_t));
static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t,
int));
static ino_t ffs_dirpref __P((struct fs *));
static daddr_t ffs_fragextend __P((struct inode *, int, long, int, int));
static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int));
static void ffs_fserr __P((struct fs *, u_int, char *));
static u_long ffs_hashalloc
__P((struct inode *, int, long, int, u_long (*)()));
static ino_t ffs_nodealloccg __P((struct inode *, int, daddr_t, int));
static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int));
__P((struct inode *, int, long, int, u_int32_t (*)()));
static ino_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int));
static ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, ufs_daddr_t,
int));
/*
* Allocate a block in the file system.
@ -84,13 +86,13 @@ static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int));
*/
ffs_alloc(ip, lbn, bpref, size, cred, bnp)
register struct inode *ip;
daddr_t lbn, bpref;
ufs_daddr_t lbn, bpref;
int size;
struct ucred *cred;
daddr_t *bnp;
ufs_daddr_t *bnp;
{
register struct fs *fs;
daddr_t bno;
ufs_daddr_t bno;
int cg, error;
*bnp = 0;
@ -118,8 +120,8 @@ ffs_alloc(ip, lbn, bpref, size, cred, bnp)
cg = ino_to_cg(fs, ip->i_number);
else
cg = dtog(fs, bpref);
bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size,
(u_long (*)())ffs_alloccg);
bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size,
(u_int32_t (*)())ffs_alloccg);
if (bno > 0) {
ip->i_blocks += btodb(size);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
@ -148,8 +150,8 @@ ffs_alloc(ip, lbn, bpref, size, cred, bnp)
*/
ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
register struct inode *ip;
daddr_t lbprev;
daddr_t bpref;
ufs_daddr_t lbprev;
ufs_daddr_t bpref;
int osize, nsize;
struct ucred *cred;
struct buf **bpp;
@ -157,7 +159,7 @@ ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
register struct fs *fs;
struct buf *bp;
int cg, request, error;
daddr_t bprev, bno;
ufs_daddr_t bprev, bno;
*bpp = 0;
fs = ip->i_fs;
@ -255,8 +257,8 @@ ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
panic("ffs_realloccg: bad optim");
/* NOTREACHED */
}
bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request,
(u_long (*)())ffs_alloccg);
bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request,
(u_int32_t (*)())ffs_alloccg);
if (bno > 0) {
bp->b_blkno = fsbtodb(fs, bno);
(void) vnode_pager_uncache(ITOV(ip));
@ -302,9 +304,10 @@ ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
* Note that the error return is not reflected back to the user. Rather
* the previous block allocation will be used.
*/
#include <sys/sysctl.h>
int doasyncfree = 1;
struct ctldebug debug14 = { "doasyncfree", &doasyncfree };
int doreallocblks = 1;
int prtrealloc = 0;
int
ffs_reallocblks(ap)
struct vop_reallocblks_args /* {
@ -316,12 +319,14 @@ ffs_reallocblks(ap)
struct inode *ip;
struct vnode *vp;
struct buf *sbp, *ebp;
daddr_t *bap, *sbap, *ebap;
ufs_daddr_t *bap, *sbap, *ebap;
struct cluster_save *buflist;
daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno;
ufs_daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno;
struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
int i, len, start_lvl, end_lvl, pref, ssize;
if (doreallocblks == 0)
return (ENOSPC);
vp = ap->a_vp;
ip = VTOI(vp);
fs = ip->i_fs;
@ -332,9 +337,18 @@ ffs_reallocblks(ap)
start_lbn = buflist->bs_children[0]->b_lblkno;
end_lbn = start_lbn + len - 1;
#ifdef DIAGNOSTIC
for (i = 0; i < len; i++)
if (!ffs_checkblk(ip,
dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
panic("ffs_reallocblks: unallocated block 1");
for (i = 1; i < len; i++)
if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
panic("ffs_reallocblks: non-cluster");
panic("ffs_reallocblks: non-logical cluster");
blkno = buflist->bs_children[0]->b_blkno;
ssize = fsbtodb(fs, fs->fs_frag);
for (i = 1; i < len - 1; i++)
if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
panic("ffs_reallocblks: non-physical cluster %d", i);
#endif
/*
* If the latest allocation is in a new cylinder group, assume that
@ -359,7 +373,7 @@ ffs_reallocblks(ap)
brelse(sbp);
return (ENOSPC);
}
sbap = (daddr_t *)sbp->b_data;
sbap = (ufs_daddr_t *)sbp->b_data;
soff = idp->in_off;
}
/*
@ -379,13 +393,13 @@ ffs_reallocblks(ap)
ssize = len - (idp->in_off + 1);
if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
goto fail;
ebap = (daddr_t *)ebp->b_data;
ebap = (ufs_daddr_t *)ebp->b_data;
}
/*
* Search the block map looking for an allocation of the desired size.
*/
if ((newblk = (daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref,
len, (u_long (*)())ffs_clusteralloc)) == 0)
if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref,
len, (u_int32_t (*)())ffs_clusteralloc)) == 0)
goto fail;
/*
* We have found a new contiguous block.
@ -394,13 +408,25 @@ ffs_reallocblks(ap)
* block pointers in the inode and indirect blocks associated
* with the file.
*/
#ifdef DEBUG
if (prtrealloc)
printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number,
start_lbn, end_lbn);
#endif
blkno = newblk;
for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
if (i == ssize)
bap = ebap;
#ifdef DIAGNOSTIC
if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap))
if (!ffs_checkblk(ip,
dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
panic("ffs_reallocblks: unallocated block 2");
if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
panic("ffs_reallocblks: alloc mismatch");
#endif
#ifdef DEBUG
if (prtrealloc)
printf(" %d,", *bap);
#endif
*bap++ = blkno;
}
@ -436,11 +462,28 @@ ffs_reallocblks(ap)
/*
* Last, free the old blocks and assign the new blocks to the buffers.
*/
#ifdef DEBUG
if (prtrealloc)
printf("\n\tnew:");
#endif
for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
ffs_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno),
fs->fs_bsize);
buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
#ifdef DEBUG
if (!ffs_checkblk(ip,
dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
panic("ffs_reallocblks: unallocated block 3");
if (prtrealloc)
printf(" %d,", blkno);
#endif
}
#ifdef DEBUG
if (prtrealloc) {
prtrealloc--;
printf("\n");
}
#endif
return (0);
fail:
@ -579,17 +622,17 @@ ffs_dirpref(fs)
* fs_rotdelay milliseconds. This is to allow time for the processor to
* schedule another I/O transfer.
*/
daddr_t
ufs_daddr_t
ffs_blkpref(ip, lbn, indx, bap)
struct inode *ip;
daddr_t lbn;
ufs_daddr_t lbn;
int indx;
daddr_t *bap;
ufs_daddr_t *bap;
{
register struct fs *fs;
register int cg;
int avgbfree, startcg;
daddr_t nextblk;
ufs_daddr_t nextblk;
fs = ip->i_fs;
if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
@ -657,7 +700,7 @@ ffs_hashalloc(ip, cg, pref, size, allocator)
int cg;
long pref;
int size; /* size for data blocks, mode for inodes */
u_long (*allocator)();
u_int32_t (*allocator)();
{
register struct fs *fs;
long result;
@ -704,7 +747,7 @@ ffs_hashalloc(ip, cg, pref, size, allocator)
* Check to see if the necessary fragments are available, and
* if they are, allocate them.
*/
static daddr_t
static ufs_daddr_t
ffs_fragextend(ip, cg, bprev, osize, nsize)
struct inode *ip;
int cg;
@ -774,11 +817,11 @@ ffs_fragextend(ip, cg, bprev, osize, nsize)
* Check to see if a block of the appropriate size is available,
* and if it is, allocate it.
*/
static daddr_t
static ufs_daddr_t
ffs_alloccg(ip, cg, bpref, size)
struct inode *ip;
int cg;
daddr_t bpref;
ufs_daddr_t bpref;
int size;
{
register struct fs *fs;
@ -868,13 +911,13 @@ ffs_alloccg(ip, cg, bpref, size)
* Note that this routine only allocates fs_bsize blocks; these
* blocks may be fragmented by the routine that allocates them.
*/
static daddr_t
static ufs_daddr_t
ffs_alloccgblk(fs, cgp, bpref)
register struct fs *fs;
register struct cg *cgp;
daddr_t bpref;
ufs_daddr_t bpref;
{
daddr_t bno, blkno;
ufs_daddr_t bno, blkno;
int cylno, pos, delta;
short *cylbp;
register int i;
@ -892,13 +935,7 @@ ffs_alloccgblk(fs, cgp, bpref)
bno = bpref;
goto gotit;
}
/*
* check for a block available on the same cylinder
*/
cylno = cbtocylno(fs, bpref);
if (cg_blktot(cgp)[cylno] == 0)
goto norot;
if (fs->fs_cpc == 0) {
if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) {
/*
* Block layout information is not available.
* Leaving bpref unchanged means we take the
@ -909,6 +946,12 @@ ffs_alloccgblk(fs, cgp, bpref)
*/
goto norot;
}
/*
* check for a block available on the same cylinder
*/
cylno = cbtocylno(fs, bpref);
if (cg_blktot(cgp)[cylno] == 0)
goto norot;
/*
* check the summary information to see if a block is
* available in the requested cylinder starting at the
@ -979,21 +1022,22 @@ ffs_alloccgblk(fs, cgp, bpref)
* are multiple choices in the same cylinder group. Instead we just
* take the first one that we find following bpref.
*/
static daddr_t
static ufs_daddr_t
ffs_clusteralloc(ip, cg, bpref, len)
struct inode *ip;
int cg;
daddr_t bpref;
ufs_daddr_t bpref;
int len;
{
register struct fs *fs;
register struct cg *cgp;
struct buf *bp;
int i, run, bno, bit, map;
int i, got, run, bno, bit, map;
u_char *mapp;
int32_t *lp;
fs = ip->i_fs;
if (fs->fs_cs(fs, cg).cs_nbfree < len)
if (fs->fs_maxcluster[cg] < len)
return (NULL);
if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
NOCRED, &bp))
@ -1005,11 +1049,25 @@ ffs_clusteralloc(ip, cg, bpref, len)
* Check to see if a cluster of the needed size (or bigger) is
* available in this cylinder group.
*/
lp = &cg_clustersum(cgp)[len];
for (i = len; i <= fs->fs_contigsumsize; i++)
if (cg_clustersum(cgp)[i] > 0)
if (*lp++ > 0)
break;
if (i > fs->fs_contigsumsize)
if (i > fs->fs_contigsumsize) {
/*
* This is the first time looking for a cluster in this
* cylinder group. Update the cluster summary information
* to reflect the true maximum sized cluster so that
* future cluster allocation requests can avoid reading
* the cylinder group map only to find no clusters.
*/
lp = &cg_clustersum(cgp)[len - 1];
for (i = len - 1; i > 0; i--)
if (*lp-- > 0)
break;
fs->fs_maxcluster[cg] = i;
goto fail;
}
/*
* Search the cluster map to find a big enough cluster.
* We take the first one that we find, even if it is larger
@ -1028,7 +1086,7 @@ ffs_clusteralloc(ip, cg, bpref, len)
mapp = &cg_clustersfree(cgp)[bpref / NBBY];
map = *mapp++;
bit = 1 << (bpref % NBBY);
for (run = 0, i = bpref; i < cgp->cg_nclusterblks; i++) {
for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
if ((map & bit) == 0) {
run = 0;
} else {
@ -1036,22 +1094,27 @@ ffs_clusteralloc(ip, cg, bpref, len)
if (run == len)
break;
}
if ((i & (NBBY - 1)) != (NBBY - 1)) {
if ((got & (NBBY - 1)) != (NBBY - 1)) {
bit <<= 1;
} else {
map = *mapp++;
bit = 1;
}
}
if (i == cgp->cg_nclusterblks)
if (got == cgp->cg_nclusterblks)
goto fail;
/*
* Allocate the cluster that we have found.
*/
bno = cg * fs->fs_fpg + blkstofrags(fs, i - run + 1);
for (i = 1; i <= len; i++)
if (!ffs_isblock(fs, cg_blksfree(cgp), got - run + i))
panic("ffs_clusteralloc: map mismatch");
bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1);
if (dtog(fs, bno) != cg)
panic("ffs_clusteralloc: allocated out of group");
len = blkstofrags(fs, len);
for (i = 0; i < len; i += fs->fs_frag)
if (ffs_alloccgblk(fs, cgp, bno + i) != bno + i)
if ((got = ffs_alloccgblk(fs, cgp, bno + i)) != bno + i)
panic("ffs_clusteralloc: lost block");
brelse(bp);
return (bno);
@ -1074,7 +1137,7 @@ static ino_t
ffs_nodealloccg(ip, cg, ipref, mode)
struct inode *ip;
int cg;
daddr_t ipref;
ufs_daddr_t ipref;
int mode;
{
register struct fs *fs;
@ -1152,13 +1215,13 @@ ffs_nodealloccg(ip, cg, ipref, mode)
*/
ffs_blkfree(ip, bno, size)
register struct inode *ip;
daddr_t bno;
ufs_daddr_t bno;
long size;
{
register struct fs *fs;
register struct cg *cgp;
struct buf *bp;
daddr_t blkno;
ufs_daddr_t blkno;
int i, error, cg, blk, frags, bbase;
fs = ip->i_fs;
@ -1249,6 +1312,56 @@ ffs_blkfree(ip, bno, size)
bdwrite(bp);
}
#ifdef DIAGNOSTIC
/*
* Verify allocation of a block or fragment. Returns true if block or
* fragment is allocated, false if it is free.
*/
ffs_checkblk(ip, bno, size)
struct inode *ip;
ufs_daddr_t bno;
long size;
{
struct fs *fs;
struct cg *cgp;
struct buf *bp;
int i, error, frags, free;
fs = ip->i_fs;
if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
printf("bsize = %d, size = %d, fs = %s\n",
fs->fs_bsize, size, fs->fs_fsmnt);
panic("checkblk: bad size");
}
if ((u_int)bno >= fs->fs_size)
panic("checkblk: bad block %d", bno);
error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
(int)fs->fs_cgsize, NOCRED, &bp);
if (error) {
brelse(bp);
return;
}
cgp = (struct cg *)bp->b_data;
if (!cg_chkmagic(cgp)) {
brelse(bp);
return;
}
bno = dtogd(fs, bno);
if (size == fs->fs_bsize) {
free = ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno));
} else {
frags = numfrags(fs, size);
for (free = 0, i = 0; i < frags; i++)
if (isset(cg_blksfree(cgp), bno + i))
free++;
if (free != 0 && free != frags)
panic("checkblk: partially free fragment");
}
brelse(bp);
return (!free);
}
#endif /* DIAGNOSTIC */
/*
* Free an inode.
*
@ -1316,14 +1429,14 @@ ffs_vfree(ap)
* It is a panic if a request is made to find a block if none are
* available.
*/
static daddr_t
static ufs_daddr_t
ffs_mapsearch(fs, cgp, bpref, allocsiz)
register struct fs *fs;
register struct cg *cgp;
daddr_t bpref;
ufs_daddr_t bpref;
int allocsiz;
{
daddr_t bno;
ufs_daddr_t bno;
int start, len, loc, i;
int blk, field, subfield, pos;
@ -1383,10 +1496,11 @@ ffs_mapsearch(fs, cgp, bpref, allocsiz)
ffs_clusteracct(fs, cgp, blkno, cnt)
struct fs *fs;
struct cg *cgp;
daddr_t blkno;
ufs_daddr_t blkno;
int cnt;
{
long *sump;
int32_t *sump;
int32_t *lp;
u_char *freemapp, *mapp;
int i, start, end, forw, back, map, bit;
@ -1455,6 +1569,14 @@ ffs_clusteracct(fs, cgp, blkno, cnt)
sump[back] -= cnt;
if (forw > 0)
sump[forw] -= cnt;
/*
* Update cluster summary information.
*/
lp = &sump[fs->fs_contigsumsize];
for (i = fs->fs_contigsumsize; i > 0; i--)
if (*lp-- > 0)
break;
fs->fs_maxcluster[cgp->cg_cgx] = i;
}
/*

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ffs_balloc.c 8.4 (Berkeley) 9/23/93
* @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
*/
#include <sys/param.h>
@ -54,27 +54,27 @@
* by allocating the physical blocks on a device given
* the inode and the logical block number in a file.
*/
ffs_balloc(ip, bn, size, cred, bpp, flags)
ffs_balloc(ip, lbn, size, cred, bpp, flags)
register struct inode *ip;
register daddr_t bn;
register ufs_daddr_t lbn;
int size;
struct ucred *cred;
struct buf **bpp;
int flags;
{
register struct fs *fs;
register daddr_t nb;
register ufs_daddr_t nb;
struct buf *bp, *nbp;
struct vnode *vp = ITOV(ip);
struct indir indirs[NIADDR + 2];
daddr_t newb, lbn, *bap, pref;
int osize, nsize, num, i, error;
ufs_daddr_t newb, *bap, pref;
int deallocated, osize, nsize, num, i, error;
ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
*bpp = NULL;
if (bn < 0)
if (lbn < 0)
return (EFBIG);
fs = ip->i_fs;
lbn = bn;
/*
* If the next write will extend the file into a new block,
@ -82,7 +82,7 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
* this fragment has to be extended to be a full block.
*/
nb = lblkno(fs, ip->i_size);
if (nb < NDADDR && nb < bn) {
if (nb < NDADDR && nb < lbn) {
osize = blksize(fs, ip, nb);
if (osize < fs->fs_bsize && osize > 0) {
error = ffs_realloccg(ip, nb,
@ -103,10 +103,10 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
/*
* The first NDADDR blocks are direct blocks
*/
if (bn < NDADDR) {
nb = ip->i_db[bn];
if (nb != 0 && ip->i_size >= (bn + 1) * fs->fs_bsize) {
error = bread(vp, bn, fs->fs_bsize, NOCRED, &bp);
if (lbn < NDADDR) {
nb = ip->i_db[lbn];
if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
if (error) {
brelse(bp);
return (error);
@ -121,34 +121,34 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
osize = fragroundup(fs, blkoff(fs, ip->i_size));
nsize = fragroundup(fs, size);
if (nsize <= osize) {
error = bread(vp, bn, osize, NOCRED, &bp);
error = bread(vp, lbn, osize, NOCRED, &bp);
if (error) {
brelse(bp);
return (error);
}
} else {
error = ffs_realloccg(ip, bn,
ffs_blkpref(ip, bn, (int)bn, &ip->i_db[0]),
osize, nsize, cred, &bp);
error = ffs_realloccg(ip, lbn,
ffs_blkpref(ip, lbn, (int)lbn,
&ip->i_db[0]), osize, nsize, cred, &bp);
if (error)
return (error);
}
} else {
if (ip->i_size < (bn + 1) * fs->fs_bsize)
if (ip->i_size < (lbn + 1) * fs->fs_bsize)
nsize = fragroundup(fs, size);
else
nsize = fs->fs_bsize;
error = ffs_alloc(ip, bn,
ffs_blkpref(ip, bn, (int)bn, &ip->i_db[0]),
error = ffs_alloc(ip, lbn,
ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
nsize, cred, &newb);
if (error)
return (error);
bp = getblk(vp, bn, nsize, 0, 0);
bp = getblk(vp, lbn, nsize, 0, 0);
bp->b_blkno = fsbtodb(fs, newb);
if (flags & B_CLRBUF)
clrbuf(bp);
}
ip->i_db[bn] = dbtofsb(fs, bp->b_blkno);
ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
*bpp = bp;
return (0);
@ -157,7 +157,7 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
* Determine the number of levels of indirection.
*/
pref = 0;
if (error = ufs_getlbns(vp, bn, indirs, &num))
if (error = ufs_getlbns(vp, lbn, indirs, &num))
return(error);
#ifdef DIAGNOSTIC
if (num < 1)
@ -168,24 +168,26 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
*/
--num;
nb = ip->i_ib[indirs[0].in_off];
allocib = NULL;
allocblk = allociblk;
if (nb == 0) {
pref = ffs_blkpref(ip, lbn, 0, (daddr_t *)0);
pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
cred, &newb))
return (error);
nb = newb;
*allocblk++ = nb;
bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0);
bp->b_blkno = fsbtodb(fs, newb);
bp->b_blkno = fsbtodb(fs, nb);
clrbuf(bp);
/*
* Write synchronously so that indirect blocks
* never point at garbage.
*/
if (error = bwrite(bp)) {
ffs_blkfree(ip, nb, fs->fs_bsize);
return (error);
}
ip->i_ib[indirs[0].in_off] = newb;
if (error = bwrite(bp))
goto fail;
allocib = &ip->i_ib[indirs[0].in_off];
*allocib = nb;
ip->i_flag |= IN_CHANGE | IN_UPDATE;
}
/*
@ -196,9 +198,9 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
if (error) {
brelse(bp);
return (error);
goto fail;
}
bap = (daddr_t *)bp->b_data;
bap = (ufs_daddr_t *)bp->b_data;
nb = bap[indirs[i].in_off];
if (i == num)
break;
@ -208,13 +210,14 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
continue;
}
if (pref == 0)
pref = ffs_blkpref(ip, lbn, 0, (daddr_t *)0);
pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
if (error =
ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
brelse(bp);
return (error);
goto fail;
}
nb = newb;
*allocblk++ = nb;
nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0);
nbp->b_blkno = fsbtodb(fs, nb);
clrbuf(nbp);
@ -223,9 +226,8 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
* never point at garbage.
*/
if (error = bwrite(nbp)) {
ffs_blkfree(ip, nb, fs->fs_bsize);
brelse(bp);
return (error);
goto fail;
}
bap[indirs[i - 1].in_off] = nb;
/*
@ -246,9 +248,10 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
if (error = ffs_alloc(ip,
lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
brelse(bp);
return (error);
goto fail;
}
nb = newb;
*allocblk++ = nb;
nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
nbp->b_blkno = fsbtodb(fs, nb);
if (flags & B_CLRBUF)
@ -271,7 +274,7 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
if (error) {
brelse(nbp);
return (error);
goto fail;
}
} else {
nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
@ -279,4 +282,26 @@ ffs_balloc(ip, bn, size, cred, bpp, flags)
}
*bpp = nbp;
return (0);
fail:
/*
* If we have failed part way through block allocation, we
* have to deallocate any indirect blocks that we have allocated.
*/
for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
ffs_blkfree(ip, *blkp, fs->fs_bsize);
deallocated += fs->fs_bsize;
}
if (allocib != NULL)
*allocib = 0;
if (deallocated) {
#ifdef QUOTA
/*
* Restore user's disk quota because allocation failed.
*/
(void) chkdq(ip, (long)-btodb(deallocated), cred, FORCE);
#endif
ip->i_blocks -= btodb(deallocated);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
}
return (error);
}

View File

@ -30,9 +30,26 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ffs_extern.h 8.3 (Berkeley) 4/16/94
* @(#)ffs_extern.h 8.6 (Berkeley) 3/30/95
*/
/*
* Sysctl values for the fast filesystem.
*/
#define FFS_CLUSTERREAD 1 /* cluster reading enabled */
#define FFS_CLUSTERWRITE 2 /* cluster writing enabled */
#define FFS_REALLOCBLKS 3 /* block reallocation enabled */
#define FFS_ASYNCFREE 4 /* asynchronous block freeing enabled */
#define FFS_MAXID 5 /* number of valid ffs ids */
#define FFS_NAMES { \
{ 0, 0 }, \
{ "doclusterread", CTLTYPE_INT }, \
{ "doclusterwrite", CTLTYPE_INT }, \
{ "doreallocblks", CTLTYPE_INT }, \
{ "doasyncfree", CTLTYPE_INT }, \
}
struct buf;
struct fid;
struct fs;
@ -46,23 +63,24 @@ struct ucred;
struct uio;
struct vnode;
struct mbuf;
struct vfsconf;
__BEGIN_DECLS
int ffs_alloc __P((struct inode *,
daddr_t, daddr_t, int, struct ucred *, daddr_t *));
ufs_daddr_t, ufs_daddr_t, int, struct ucred *, ufs_daddr_t *));
int ffs_balloc __P((struct inode *,
daddr_t, int, struct ucred *, struct buf **, int));
ufs_daddr_t, int, struct ucred *, struct buf **, int));
int ffs_blkatoff __P((struct vop_blkatoff_args *));
int ffs_blkfree __P((struct inode *, daddr_t, long));
daddr_t ffs_blkpref __P((struct inode *, daddr_t, int, daddr_t *));
int ffs_blkfree __P((struct inode *, ufs_daddr_t, long));
ufs_daddr_t ffs_blkpref __P((struct inode *, ufs_daddr_t, int, ufs_daddr_t *));
int ffs_bmap __P((struct vop_bmap_args *));
void ffs_clrblock __P((struct fs *, u_char *, daddr_t));
void ffs_clrblock __P((struct fs *, u_char *, ufs_daddr_t));
int ffs_fhtovp __P((struct mount *, struct fid *, struct mbuf *,
struct vnode **, int *, struct ucred **));
void ffs_fragacct __P((struct fs *, int, long [], int));
void ffs_fragacct __P((struct fs *, int, int32_t [], int));
int ffs_fsync __P((struct vop_fsync_args *));
int ffs_init __P((void));
int ffs_isblock __P((struct fs *, u_char *, daddr_t));
int ffs_init __P((struct vfsconf *));
int ffs_isblock __P((struct fs *, u_char *, ufs_daddr_t));
int ffs_mount __P((struct mount *,
char *, caddr_t, struct nameidata *, struct proc *));
int ffs_mountfs __P((struct vnode *, struct mount *, struct proc *));
@ -70,11 +88,13 @@ int ffs_mountroot __P((void));
int ffs_read __P((struct vop_read_args *));
int ffs_reallocblks __P((struct vop_reallocblks_args *));
int ffs_realloccg __P((struct inode *,
daddr_t, daddr_t, int, int, struct ucred *, struct buf **));
ufs_daddr_t, ufs_daddr_t, int, int, struct ucred *, struct buf **));
int ffs_reclaim __P((struct vop_reclaim_args *));
void ffs_setblock __P((struct fs *, u_char *, daddr_t));
void ffs_setblock __P((struct fs *, u_char *, ufs_daddr_t));
int ffs_statfs __P((struct mount *, struct statfs *, struct proc *));
int ffs_sync __P((struct mount *, int, struct ucred *, struct proc *));
int ffs_sysctl __P((int *, u_int, void *, size_t *, void *, size_t,
struct proc *));
int ffs_truncate __P((struct vop_truncate_args *));
int ffs_unmount __P((struct mount *, int, struct proc *));
int ffs_update __P((struct vop_update_args *));

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93
* @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95
*/
#include <sys/param.h>
@ -55,14 +55,8 @@
#include <ufs/ffs/fs.h>
#include <ufs/ffs/ffs_extern.h>
static int ffs_indirtrunc __P((struct inode *, daddr_t, daddr_t, daddr_t, int,
long *));
int
ffs_init()
{
return (ufs_init());
}
static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t,
ufs_daddr_t, int, long *));
/*
* Update the access, modified, and inode change times as specified by the
@ -97,13 +91,13 @@ ffs_update(ap)
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0)
return (0);
if (ip->i_flag & IN_ACCESS)
ip->i_atime.ts_sec = ap->a_access->tv_sec;
ip->i_atime = ap->a_access->tv_sec;
if (ip->i_flag & IN_UPDATE) {
ip->i_mtime.ts_sec = ap->a_modify->tv_sec;
ip->i_mtime = ap->a_modify->tv_sec;
ip->i_modrev++;
}
if (ip->i_flag & IN_CHANGE)
ip->i_ctime.ts_sec = time.tv_sec;
ip->i_ctime = time.tv_sec;
ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE);
fs = ip->i_fs;
/*
@ -122,7 +116,7 @@ ffs_update(ap)
}
*((struct dinode *)bp->b_data +
ino_to_fsbo(fs, ip->i_number)) = ip->i_din;
if (ap->a_waitfor)
if (ap->a_waitfor && (ap->a_vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
return (bwrite(bp));
else {
bdwrite(bp);
@ -147,10 +141,10 @@ ffs_truncate(ap)
} */ *ap;
{
register struct vnode *ovp = ap->a_vp;
register daddr_t lastblock;
ufs_daddr_t lastblock;
register struct inode *oip;
daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
off_t length = ap->a_length;
register struct fs *fs;
struct buf *bp;
@ -161,6 +155,8 @@ ffs_truncate(ap)
int aflags, error, allerror;
off_t osize;
if (length < 0)
return (EINVAL);
oip = VTOI(ovp);
tv = time;
if (ovp->v_type == VLNK &&
@ -182,15 +178,16 @@ ffs_truncate(ap)
if (error = getinoquota(oip))
return (error);
#endif
vnode_pager_setsize(ovp, (u_long)length);
fs = oip->i_fs;
osize = oip->i_size;
/*
* Lengthen the size of the file. We must ensure that the
* last byte of the file is allocated. Since the smallest
* value of oszie is 0, length will be at least 1.
* value of osize is 0, length will be at least 1.
*/
if (osize < length) {
if (length > fs->fs_maxfilesize)
return (EFBIG);
offset = blkoff(fs, length - 1);
lbn = lblkno(fs, length - 1);
aflags = B_CLRBUF;
@ -200,8 +197,9 @@ ffs_truncate(ap)
aflags))
return (error);
oip->i_size = length;
vnode_pager_setsize(ovp, (u_long)length);
(void) vnode_pager_uncache(ovp);
if (aflags & IO_SYNC)
if (aflags & B_SYNC)
bwrite(bp);
else
bawrite(bp);
@ -231,11 +229,12 @@ ffs_truncate(ap)
(void) vnode_pager_uncache(ovp);
bzero((char *)bp->b_data + offset, (u_int)(size - offset));
allocbuf(bp, size);
if (aflags & IO_SYNC)
if (aflags & B_SYNC)
bwrite(bp);
else
bawrite(bp);
}
vnode_pager_setsize(ovp, (u_long)length);
/*
* Calculate index into inode's block list of
* last direct and indirect blocks (if any)
@ -383,17 +382,17 @@ ffs_truncate(ap)
static int
ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
register struct inode *ip;
daddr_t lbn, lastbn;
daddr_t dbn;
ufs_daddr_t lbn, lastbn;
ufs_daddr_t dbn;
int level;
long *countp;
{
register int i;
struct buf *bp;
register struct fs *fs = ip->i_fs;
register daddr_t *bap;
register ufs_daddr_t *bap;
struct vnode *vp;
daddr_t *copy, nb, nlbn, last;
ufs_daddr_t *copy, nb, nlbn, last;
long blkcount, factor;
int nblocks, blocksreleased = 0;
int error = 0, allerror = 0;
@ -439,11 +438,11 @@ ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
return (error);
}
bap = (daddr_t *)bp->b_data;
MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
bap = (ufs_daddr_t *)bp->b_data;
MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
bzero((caddr_t)&bap[last + 1],
(u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
(u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t));
if (last == -1)
bp->b_flags |= B_INVAL;
error = bwrite(bp);
@ -460,8 +459,8 @@ ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
if (nb == 0)
continue;
if (level > SINGLE) {
if (error = ffs_indirtrunc(ip, nlbn,
fsbtodb(fs, nb), (daddr_t)-1, level - 1, &blkcount))
if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
(ufs_daddr_t)-1, level - 1, &blkcount))
allerror = error;
blocksreleased += blkcount;
}

View File

@ -30,19 +30,22 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ffs_subr.c 8.2 (Berkeley) 9/21/93
* @(#)ffs_subr.c 8.5 (Berkeley) 3/21/95
*/
#include <sys/param.h>
#ifndef KERNEL
#include <ufs/ufs/dinode.h>
#include <ufs/ffs/fs.h>
#else
#ifdef KERNEL
#include <sys/systm.h>
#include <sys/vnode.h>
#include <ufs/ffs/ffs_extern.h>
#include <sys/buf.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ffs/fs.h>
#include <ufs/ffs/ffs_extern.h>
/*
* Return buffer with the contents of block "offset" from the beginning of
@ -61,7 +64,7 @@ ffs_blkatoff(ap)
struct inode *ip;
register struct fs *fs;
struct buf *bp;
daddr_t lbn;
ufs_daddr_t lbn;
int bsize, error;
ip = VTOI(ap->a_vp);
@ -89,7 +92,7 @@ void
ffs_fragacct(fs, fragmap, fraglist, cnt)
struct fs *fs;
int fragmap;
long fraglist[];
int32_t fraglist[];
int cnt;
{
int inblk;
@ -123,7 +126,7 @@ ffs_checkoverlap(bp, ip)
struct inode *ip;
{
register struct buf *ebp, *ep;
register daddr_t start, last;
register ufs_daddr_t start, last;
struct vnode *vp;
ebp = &buf[nbuf];
@ -133,7 +136,8 @@ ffs_checkoverlap(bp, ip)
if (ep == bp || (ep->b_flags & B_INVAL) ||
ep->b_vp == NULLVP)
continue;
if (VOP_BMAP(ep->b_vp, (daddr_t)0, &vp, (daddr_t)0, NULL))
if (VOP_BMAP(ep->b_vp, (ufs_daddr_t)0, &vp, (ufs_daddr_t)0,
NULL))
continue;
if (vp != ip->i_devvp)
continue;
@ -159,7 +163,7 @@ int
ffs_isblock(fs, cp, h)
struct fs *fs;
unsigned char *cp;
daddr_t h;
ufs_daddr_t h;
{
unsigned char mask;
@ -187,7 +191,7 @@ void
ffs_clrblock(fs, cp, h)
struct fs *fs;
u_char *cp;
daddr_t h;
ufs_daddr_t h;
{
switch ((int)fs->fs_frag) {
@ -215,7 +219,7 @@ void
ffs_setblock(fs, cp, h)
struct fs *fs;
unsigned char *cp;
daddr_t h;
ufs_daddr_t h;
{
switch ((int)fs->fs_frag) {

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ffs_vfsops.c 8.8 (Berkeley) 4/18/94
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
*/
#include <sys/param.h>
@ -73,22 +73,19 @@ struct vfsops ufs_vfsops = {
ffs_fhtovp,
ffs_vptofh,
ffs_init,
ffs_sysctl,
};
extern u_long nextgennumber;
/*
* Called by main() when ufs is going to be mounted as root.
*
* Name is updated by mount(8) after booting.
*/
#define ROOTNAME "root_device"
ffs_mountroot()
{
extern struct vnode *rootvp;
register struct fs *fs;
register struct mount *mp;
struct fs *fs;
struct mount *mp;
struct proc *p = curproc; /* XXX */
struct ufsmount *ump;
u_int size;
@ -97,36 +94,27 @@ ffs_mountroot()
/*
* Get vnodes for swapdev and rootdev.
*/
if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp))
panic("ffs_mountroot: can't setup bdevvp's");
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
mp->mnt_op = &ufs_vfsops;
mp->mnt_flag = MNT_RDONLY;
if ((error = bdevvp(swapdev, &swapdev_vp)) ||
(error = bdevvp(rootdev, &rootvp))) {
printf("ffs_mountroot: can't setup bdevvp's");
return (error);
}
if (error = vfs_rootmountalloc("ufs", "root_device", &mp))
return (error);
if (error = ffs_mountfs(rootvp, mp, p)) {
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free(mp, M_MOUNT);
return (error);
}
if (error = vfs_lock(mp)) {
(void)ffs_unmount(mp, 0, p);
free(mp, M_MOUNT);
return (error);
}
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mp->mnt_flag |= MNT_ROOTFS;
mp->mnt_vnodecovered = NULLVP;
simple_lock(&mountlist_slock);
CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
simple_unlock(&mountlist_slock);
ump = VFSTOUFS(mp);
fs = ump->um_fs;
bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt));
fs->fs_fsmnt[0] = '/';
bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
MNAMELEN);
(void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
&size);
bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
(void) copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
(void)ffs_statfs(mp, &mp->mnt_stat, p);
vfs_unlock(mp);
vfs_unbusy(mp, p);
inittodr(fs->fs_time);
return (0);
}
@ -150,6 +138,7 @@ ffs_mount(mp, path, data, ndp, p)
register struct fs *fs;
u_int size;
int error, flags;
mode_t accessmode;
if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
return (error);
@ -160,22 +149,42 @@ ffs_mount(mp, path, data, ndp, p)
if (mp->mnt_flag & MNT_UPDATE) {
ump = VFSTOUFS(mp);
fs = ump->um_fs;
error = 0;
if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
flags = WRITECLOSE;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
if (vfs_busy(mp))
return (EBUSY);
error = ffs_flushfiles(mp, flags, p);
vfs_unbusy(mp);
}
if (!error && (mp->mnt_flag & MNT_RELOAD))
error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
if (error)
if (error = ffs_flushfiles(mp, flags, p))
return (error);
if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR))
fs->fs_clean = 1;
fs->fs_ronly = 1;
if (error = ffs_sbupdate(ump, MNT_WAIT)) {
fs->fs_clean = 0;
fs->fs_ronly = 0;
return (error);
}
}
if ((mp->mnt_flag & MNT_RELOAD) &&
(error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)))
return (error);
if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
/*
* If upgrade to read-write by non-root, then verify
* that user has necessary permissions on the device.
*/
if (p->p_ucred->cr_uid != 0) {
devvp = ump->um_devvp;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
if (error = VOP_ACCESS(devvp, VREAD | VWRITE,
p->p_ucred, p)) {
VOP_UNLOCK(devvp, 0, p);
return (error);
}
VOP_UNLOCK(devvp, 0, p);
}
fs->fs_ronly = 0;
fs->fs_clean = 0;
(void) ffs_sbupdate(ump, MNT_WAIT);
}
if (args.fspec == 0) {
/*
* Process export requests.
@ -200,6 +209,21 @@ ffs_mount(mp, path, data, ndp, p)
vrele(devvp);
return (ENXIO);
}
/*
* If mount by non-root, then verify that user has necessary
* permissions on the device.
*/
if (p->p_ucred->cr_uid != 0) {
accessmode = VREAD;
if ((mp->mnt_flag & MNT_RDONLY) == 0)
accessmode |= VWRITE;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) {
vput(devvp);
return (error);
}
VOP_UNLOCK(devvp, 0, p);
}
if ((mp->mnt_flag & MNT_UPDATE) == 0)
error = ffs_mountfs(devvp, mp, p);
else {
@ -247,8 +271,10 @@ ffs_reload(mountp, cred, p)
struct inode *ip;
struct csum *space;
struct buf *bp;
struct fs *fs;
struct fs *fs, *newfs;
struct partinfo dpart;
int i, blks, size, error;
int32_t *lp;
if ((mountp->mnt_flag & MNT_RDONLY) == 0)
return (EINVAL);
@ -261,21 +287,31 @@ ffs_reload(mountp, cred, p)
/*
* Step 2: re-read superblock from disk.
*/
if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp))
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
size = DEV_BSIZE;
else
size = dpart.disklab->d_secsize;
if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp))
return (error);
fs = (struct fs *)bp->b_data;
if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
fs->fs_bsize < sizeof(struct fs)) {
newfs = (struct fs *)bp->b_data;
if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
newfs->fs_bsize < sizeof(struct fs)) {
brelse(bp);
return (EIO); /* XXX needs translation */
}
fs = VFSTOUFS(mountp)->um_fs;
bcopy(&fs->fs_csp[0], &((struct fs *)bp->b_data)->fs_csp[0],
sizeof(fs->fs_csp));
bcopy(bp->b_data, fs, (u_int)fs->fs_sbsize);
/*
* Copy pointer fields back into superblock before copying in XXX
* new superblock. These should really be in the ufsmount. XXX
* Note that important parameters (eg fs_ncg) are unchanged.
*/
bcopy(&fs->fs_csp[0], &newfs->fs_csp[0], sizeof(fs->fs_csp));
newfs->fs_maxcluster = fs->fs_maxcluster;
bcopy(newfs, fs, (u_int)fs->fs_sbsize);
if (fs->fs_sbsize < SBSIZE)
bp->b_flags |= B_INVAL;
brelse(bp);
mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
ffs_oldfscompat(fs);
/*
* Step 3: re-read summary information from disk.
@ -292,21 +328,36 @@ ffs_reload(mountp, cred, p)
bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size);
brelse(bp);
}
/*
* We no longer know anything about clusters per cylinder group.
*/
if (fs->fs_contigsumsize > 0) {
lp = fs->fs_maxcluster;
for (i = 0; i < fs->fs_ncg; i++)
*lp++ = fs->fs_contigsumsize;
}
loop:
simple_lock(&mntvnode_slock);
for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
if (vp->v_mount != mountp) {
simple_unlock(&mntvnode_slock);
goto loop;
}
nvp = vp->v_mntvnodes.le_next;
/*
* Step 4: invalidate all inactive vnodes.
*/
if (vp->v_usecount == 0) {
vgone(vp);
continue;
}
if (vrecycle(vp, &mntvnode_slock, p))
goto loop;
/*
* Step 5: invalidate all cached file data.
*/
if (vget(vp, 1))
simple_lock(&vp->v_interlock);
simple_unlock(&mntvnode_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
goto loop;
}
if (vinvalbuf(vp, 0, cred, p, 0, 0))
panic("ffs_reload: dirty2");
/*
@ -323,9 +374,9 @@ ffs_reload(mountp, cred, p)
ino_to_fsbo(fs, ip->i_number));
brelse(bp);
vput(vp);
if (vp->v_mount != mountp)
goto loop;
simple_lock(&mntvnode_slock);
}
simple_unlock(&mntvnode_slock);
return (0);
}
@ -341,14 +392,17 @@ ffs_mountfs(devvp, mp, p)
register struct ufsmount *ump;
struct buf *bp;
register struct fs *fs;
dev_t dev = devvp->v_rdev;
dev_t dev;
struct partinfo dpart;
caddr_t base, space;
int havepart = 0, blks;
int error, i, size;
int ronly;
int error, i, blks, size, ronly;
int32_t *lp;
struct ucred *cred;
extern struct vnode *rootvp;
u_int64_t maxfilesize; /* XXX */
dev = devvp->v_rdev;
cred = p ? p->p_ucred : NOCRED;
/*
* Disallow multiple mounts of the same device.
* Disallow mounting of a device that is currently in use
@ -359,22 +413,20 @@ ffs_mountfs(devvp, mp, p)
return (error);
if (vcount(devvp) > 1 && devvp != rootvp)
return (EBUSY);
if (error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0))
if (error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0))
return (error);
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))
return (error);
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
size = DEV_BSIZE;
else {
havepart = 1;
else
size = dpart.disklab->d_secsize;
}
bp = NULL;
ump = NULL;
if (error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp))
if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, cred, &bp))
goto out;
fs = (struct fs *)bp->b_data;
if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
@ -382,6 +434,11 @@ ffs_mountfs(devvp, mp, p)
error = EINVAL; /* XXX needs translation */
goto out;
}
/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
error = EROFS; /* needs translation */
goto out;
}
ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
bzero((caddr_t)ump, sizeof *ump);
ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
@ -393,18 +450,17 @@ ffs_mountfs(devvp, mp, p)
bp = NULL;
fs = ump->um_fs;
fs->fs_ronly = ronly;
if (ronly == 0)
fs->fs_fmod = 1;
blks = howmany(fs->fs_cssize, fs->fs_fsize);
base = space = malloc((u_long)fs->fs_cssize, M_UFSMNT,
M_WAITOK);
size = fs->fs_cssize;
blks = howmany(size, fs->fs_fsize);
if (fs->fs_contigsumsize > 0)
size += fs->fs_ncg * sizeof(int32_t);
base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
for (i = 0; i < blks; i += fs->fs_frag) {
size = fs->fs_bsize;
if (i + fs->fs_frag > blks)
size = (blks - i) * fs->fs_fsize;
error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
NOCRED, &bp);
if (error) {
if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
cred, &bp)) {
free(base, M_UFSMNT);
goto out;
}
@ -414,11 +470,15 @@ ffs_mountfs(devvp, mp, p)
brelse(bp);
bp = NULL;
}
if (fs->fs_contigsumsize > 0) {
fs->fs_maxcluster = lp = (int32_t *)space;
for (i = 0; i < fs->fs_ncg; i++)
*lp++ = fs->fs_contigsumsize;
}
mp->mnt_data = (qaddr_t)ump;
mp->mnt_stat.f_fsid.val[0] = (long)dev;
mp->mnt_stat.f_fsid.val[1] = MOUNT_UFS;
mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
mp->mnt_flag |= MNT_LOCAL;
ump->um_mountp = mp;
ump->um_dev = dev;
ump->um_devvp = devvp;
@ -429,11 +489,19 @@ ffs_mountfs(devvp, mp, p)
ump->um_quotas[i] = NULLVP;
devvp->v_specflags |= SI_MOUNTEDON;
ffs_oldfscompat(fs);
ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */
if (fs->fs_maxfilesize > maxfilesize) /* XXX */
fs->fs_maxfilesize = maxfilesize; /* XXX */
if (ronly == 0) {
fs->fs_clean = 0;
(void) ffs_sbupdate(ump, MNT_WAIT);
}
return (0);
out:
if (bp)
brelse(bp);
(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
if (ump) {
free(ump->um_fs, M_UFSMNT);
free(ump, M_UFSMNT);
@ -457,7 +525,7 @@ ffs_oldfscompat(fs)
if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
fs->fs_nrpos = 8; /* XXX */
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
quad_t sizepb = fs->fs_bsize; /* XXX */
u_int64_t sizepb = fs->fs_bsize; /* XXX */
/* XXX */
fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
for (i = 0; i < NIADDR; i++) { /* XXX */
@ -481,28 +549,30 @@ ffs_unmount(mp, mntflags, p)
{
register struct ufsmount *ump;
register struct fs *fs;
int error, flags, ronly;
int error, flags;
flags = 0;
if (mntflags & MNT_FORCE) {
if (mp->mnt_flag & MNT_ROOTFS)
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
if (error = ffs_flushfiles(mp, flags, p))
return (error);
ump = VFSTOUFS(mp);
fs = ump->um_fs;
ronly = !fs->fs_ronly;
if (fs->fs_ronly == 0) {
fs->fs_clean = 1;
if (error = ffs_sbupdate(ump, MNT_WAIT)) {
fs->fs_clean = 0;
return (error);
}
}
ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE,
error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
NOCRED, p);
vrele(ump->um_devvp);
free(fs->fs_csp[0], M_UFSMNT);
free(fs, M_UFSMNT);
free(ump, M_UFSMNT);
mp->mnt_data = (qaddr_t)0;
mp->mnt_flag &= ~MNT_LOCAL;
return (error);
}
@ -514,12 +584,9 @@ ffs_flushfiles(mp, flags, p)
int flags;
struct proc *p;
{
extern int doforce;
register struct ufsmount *ump;
int i, error;
if (!doforce)
flags &= ~FORCECLOSE;
ump = VFSTOUFS(mp);
#ifdef QUOTA
if (mp->mnt_flag & MNT_QUOTA) {
@ -556,7 +623,6 @@ ffs_statfs(mp, sbp, p)
fs = ump->um_fs;
if (fs->fs_magic != FS_MAGIC)
panic("ffs_statfs");
sbp->f_type = MOUNT_UFS;
sbp->f_bsize = fs->fs_fsize;
sbp->f_iosize = fs->fs_bsize;
sbp->f_blocks = fs->fs_dsize;
@ -567,6 +633,7 @@ ffs_statfs(mp, sbp, p)
sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
sbp->f_ffree = fs->fs_cstotal.cs_nifree;
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy((caddr_t)mp->mnt_stat.f_mntonname,
(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
@ -589,53 +656,55 @@ ffs_sync(mp, waitfor, cred, p)
struct ucred *cred;
struct proc *p;
{
register struct vnode *vp;
register struct inode *ip;
register struct ufsmount *ump = VFSTOUFS(mp);
register struct fs *fs;
struct vnode *nvp, *vp;
struct inode *ip;
struct ufsmount *ump = VFSTOUFS(mp);
struct fs *fs;
int error, allerror = 0;
fs = ump->um_fs;
/*
* Write back modified superblock.
* Consistency check that the superblock
* is still in the buffer cache.
*/
if (fs->fs_fmod != 0) {
if (fs->fs_ronly != 0) { /* XXX */
if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
printf("fs = %s\n", fs->fs_fsmnt);
panic("update: rofs mod");
}
fs->fs_fmod = 0;
fs->fs_time = time.tv_sec;
allerror = ffs_sbupdate(ump, waitfor);
}
/*
* Write back each (modified) inode.
*/
simple_lock(&mntvnode_slock);
loop:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
if (VOP_ISLOCKED(vp))
continue;
simple_lock(&vp->v_interlock);
nvp = vp->v_mntvnodes.le_next;
ip = VTOI(vp);
if ((ip->i_flag &
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
vp->v_dirtyblkhd.lh_first == NULL)
vp->v_dirtyblkhd.lh_first == NULL) {
simple_unlock(&vp->v_interlock);
continue;
if (vget(vp, 1))
}
simple_unlock(&mntvnode_slock);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
simple_lock(&mntvnode_slock);
if (error == ENOENT)
goto loop;
continue;
}
if (error = VOP_FSYNC(vp, cred, waitfor, p))
allerror = error;
vput(vp);
VOP_UNLOCK(vp, 0, p);
vrele(vp);
simple_lock(&mntvnode_slock);
}
simple_unlock(&mntvnode_slock);
/*
* Force stale file system control information to be flushed.
*/
@ -644,6 +713,15 @@ ffs_sync(mp, waitfor, cred, p)
#ifdef QUOTA
qsync(mp);
#endif
/*
* Write back modified superblock.
*/
if (fs->fs_fmod != 0) {
fs->fs_fmod = 0;
fs->fs_time = time.tv_sec;
if (error = ffs_sbupdate(ump, waitfor))
allerror = error;
}
return (allerror);
}
@ -659,8 +737,9 @@ ffs_vget(mp, ino, vpp)
ino_t ino;
struct vnode **vpp;
{
register struct fs *fs;
register struct inode *ip;
struct proc *p = curproc; /* XXX */
struct fs *fs;
struct inode *ip;
struct ufsmount *ump;
struct buf *bp;
struct vnode *vp;
@ -680,6 +759,7 @@ ffs_vget(mp, ino, vpp)
type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */
MALLOC(ip, struct inode *, sizeof(struct inode), type, M_WAITOK);
bzero((caddr_t)ip, sizeof(struct inode));
lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
vp->v_data = ip;
ip->i_vnode = vp;
ip->i_fs = fs = ump->um_fs;
@ -801,6 +881,53 @@ ffs_vptofh(vp, fhp)
return (0);
}
/*
* Initialize the filesystem; just use ufs_init.
*/
int
ffs_init(vfsp)
struct vfsconf *vfsp;
{
return (ufs_init(vfsp));
}
/*
* fast filesystem related variables.
*/
ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
int *name;
u_int namelen;
void *oldp;
size_t *oldlenp;
void *newp;
size_t newlen;
struct proc *p;
{
extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
/* all sysctl names at this level are terminal */
if (namelen != 1)
return (ENOTDIR); /* overloaded */
switch (name[0]) {
case FFS_CLUSTERREAD:
return (sysctl_int(oldp, oldlenp, newp, newlen,
&doclusterread));
case FFS_CLUSTERWRITE:
return (sysctl_int(oldp, oldlenp, newp, newlen,
&doclusterwrite));
case FFS_REALLOCBLKS:
return (sysctl_int(oldp, oldlenp, newp, newlen,
&doreallocblks));
case FFS_ASYNCFREE:
return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
default:
return (EOPNOTSUPP);
}
/* NOTREACHED */
}
/*
* Write a superblock and associated information back to disk.
*/
@ -809,21 +936,15 @@ ffs_sbupdate(mp, waitfor)
struct ufsmount *mp;
int waitfor;
{
register struct fs *fs = mp->um_fs;
register struct fs *dfs, *fs = mp->um_fs;
register struct buf *bp;
int blks;
caddr_t space;
int i, size, error = 0;
int i, size, error, allerror = 0;
bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
/* Restore compatibility to old file systems. XXX */
if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
((struct fs *)bp->b_data)->fs_nrpos = -1; /* XXX */
if (waitfor == MNT_WAIT)
error = bwrite(bp);
else
bawrite(bp);
/*
* First write back the summary information.
*/
blks = howmany(fs->fs_cssize, fs->fs_fsize);
space = (caddr_t)fs->fs_csp[0];
for (i = 0; i < blks; i += fs->fs_frag) {
@ -834,10 +955,37 @@ ffs_sbupdate(mp, waitfor)
size, 0, 0);
bcopy(space, bp->b_data, (u_int)size);
space += size;
if (waitfor == MNT_WAIT)
error = bwrite(bp);
else
if (waitfor != MNT_WAIT)
bawrite(bp);
else if (error = bwrite(bp))
allerror = error;
}
return (error);
/*
* Now write back the superblock itself. If any errors occurred
* up to this point, then fail so that the superblock avoids
* being written out as clean.
*/
if (allerror)
return (allerror);
bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
/* Restore compatibility to old file systems. XXX */
dfs = (struct fs *)bp->b_data; /* XXX */
if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
dfs->fs_nrpos = -1; /* XXX */
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
int32_t *lp, tmp; /* XXX */
/* XXX */
lp = (int32_t *)&dfs->fs_qbmask; /* XXX */
tmp = lp[4]; /* XXX */
for (i = 4; i > 0; i--) /* XXX */
lp[i] = lp[i-1]; /* XXX */
lp[0] = tmp; /* XXX */
} /* XXX */
dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
if (waitfor != MNT_WAIT)
bawrite(bp);
else if (error = bwrite(bp))
allerror = error;
return (allerror);
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ffs_vnops.c 8.7 (Berkeley) 2/3/94
* @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
*/
#include <sys/param.h>
@ -55,6 +55,7 @@
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/dir.h>
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
#include <ufs/ffs/fs.h>
@ -66,6 +67,7 @@ struct vnodeopv_entry_desc ffs_vnodeop_entries[] = {
{ &vop_default_desc, vn_default_error },
{ &vop_lookup_desc, ufs_lookup }, /* lookup */
{ &vop_create_desc, ufs_create }, /* create */
{ &vop_whiteout_desc, ufs_whiteout }, /* whiteout */
{ &vop_mknod_desc, ufs_mknod }, /* mknod */
{ &vop_open_desc, ufs_open }, /* open */
{ &vop_close_desc, ufs_close }, /* close */
@ -74,8 +76,10 @@ struct vnodeopv_entry_desc ffs_vnodeop_entries[] = {
{ &vop_setattr_desc, ufs_setattr }, /* setattr */
{ &vop_read_desc, ffs_read }, /* read */
{ &vop_write_desc, ffs_write }, /* write */
{ &vop_lease_desc, ufs_lease_check }, /* lease */
{ &vop_ioctl_desc, ufs_ioctl }, /* ioctl */
{ &vop_select_desc, ufs_select }, /* select */
{ &vop_revoke_desc, ufs_revoke }, /* revoke */
{ &vop_mmap_desc, ufs_mmap }, /* mmap */
{ &vop_fsync_desc, ffs_fsync }, /* fsync */
{ &vop_seek_desc, ufs_seek }, /* seek */
@ -89,7 +93,7 @@ struct vnodeopv_entry_desc ffs_vnodeop_entries[] = {
{ &vop_readlink_desc, ufs_readlink }, /* readlink */
{ &vop_abortop_desc, ufs_abortop }, /* abortop */
{ &vop_inactive_desc, ufs_inactive }, /* inactive */
{ &vop_reclaim_desc, ufs_reclaim }, /* reclaim */
{ &vop_reclaim_desc, ffs_reclaim }, /* reclaim */
{ &vop_lock_desc, ufs_lock }, /* lock */
{ &vop_unlock_desc, ufs_unlock }, /* unlock */
{ &vop_bmap_desc, ufs_bmap }, /* bmap */
@ -123,8 +127,10 @@ struct vnodeopv_entry_desc ffs_specop_entries[] = {
{ &vop_setattr_desc, ufs_setattr }, /* setattr */
{ &vop_read_desc, ufsspec_read }, /* read */
{ &vop_write_desc, ufsspec_write }, /* write */
{ &vop_lease_desc, spec_lease_check }, /* lease */
{ &vop_ioctl_desc, spec_ioctl }, /* ioctl */
{ &vop_select_desc, spec_select }, /* select */
{ &vop_revoke_desc, spec_revoke }, /* revoke */
{ &vop_mmap_desc, spec_mmap }, /* mmap */
{ &vop_fsync_desc, ffs_fsync }, /* fsync */
{ &vop_seek_desc, spec_seek }, /* seek */
@ -138,7 +144,7 @@ struct vnodeopv_entry_desc ffs_specop_entries[] = {
{ &vop_readlink_desc, spec_readlink }, /* readlink */
{ &vop_abortop_desc, spec_abortop }, /* abortop */
{ &vop_inactive_desc, ufs_inactive }, /* inactive */
{ &vop_reclaim_desc, ufs_reclaim }, /* reclaim */
{ &vop_reclaim_desc, ffs_reclaim }, /* reclaim */
{ &vop_lock_desc, ufs_lock }, /* lock */
{ &vop_unlock_desc, ufs_unlock }, /* unlock */
{ &vop_bmap_desc, spec_bmap }, /* bmap */
@ -173,8 +179,10 @@ struct vnodeopv_entry_desc ffs_fifoop_entries[] = {
{ &vop_setattr_desc, ufs_setattr }, /* setattr */
{ &vop_read_desc, ufsfifo_read }, /* read */
{ &vop_write_desc, ufsfifo_write }, /* write */
{ &vop_lease_desc, fifo_lease_check }, /* lease */
{ &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
{ &vop_select_desc, fifo_select }, /* select */
{ &vop_revoke_desc, fifo_revoke }, /* revoke */
{ &vop_mmap_desc, fifo_mmap }, /* mmap */
{ &vop_fsync_desc, ffs_fsync }, /* fsync */
{ &vop_seek_desc, fifo_seek }, /* seek */
@ -188,7 +196,7 @@ struct vnodeopv_entry_desc ffs_fifoop_entries[] = {
{ &vop_readlink_desc, fifo_readlink }, /* readlink */
{ &vop_abortop_desc, fifo_abortop }, /* abortop */
{ &vop_inactive_desc, ufs_inactive }, /* inactive */
{ &vop_reclaim_desc, ufs_reclaim }, /* reclaim */
{ &vop_reclaim_desc, ffs_reclaim }, /* reclaim */
{ &vop_lock_desc, ufs_lock }, /* lock */
{ &vop_unlock_desc, ufs_unlock }, /* unlock */
{ &vop_bmap_desc, fifo_bmap }, /* bmap */
@ -210,20 +218,11 @@ struct vnodeopv_desc ffs_fifoop_opv_desc =
{ &ffs_fifoop_p, ffs_fifoop_entries };
#endif /* FIFO */
#ifdef DEBUG
/*
* Enabling cluster read/write operations.
*/
#include <sys/sysctl.h>
int doclusterread = 1;
struct ctldebug debug11 = { "doclusterread", &doclusterread };
int doclusterwrite = 1;
struct ctldebug debug12 = { "doclusterwrite", &doclusterwrite };
#else
/* XXX for ufs_readwrite */
#define doclusterread 1
#define doclusterwrite 1
#endif
#include <ufs/ufs/ufs_readwrite.c>
@ -286,3 +285,24 @@ ffs_fsync(ap)
tv = time;
return (VOP_UPDATE(ap->a_vp, &tv, &tv, ap->a_waitfor == MNT_WAIT));
}
/*
* Reclaim an inode so that it can be used for other purposes.
*/
int
ffs_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
int error;
if (error = ufs_reclaim(vp, ap->a_p))
return (error);
FREE(vp->v_data, VFSTOUFS(vp->v_mount)->um_devvp->v_tag == VT_MFS ?
M_MFSNODE : M_FFSNODE);
vp->v_data = NULL;
return (0);
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)fs.h 8.7 (Berkeley) 4/19/94
* @(#)fs.h 8.13 (Berkeley) 3/21/95
*/
/*
@ -61,8 +61,8 @@
#define SBSIZE 8192
#define BBOFF ((off_t)(0))
#define SBOFF ((off_t)(BBOFF + BBSIZE))
#define BBLOCK ((daddr_t)(0))
#define SBLOCK ((daddr_t)(BBLOCK + BBSIZE / DEV_BSIZE))
#define BBLOCK ((ufs_daddr_t)(0))
#define SBLOCK ((ufs_daddr_t)(BBLOCK + BBSIZE / DEV_BSIZE))
/*
* Addresses stored in inodes are capable of addressing fragments
@ -98,12 +98,18 @@
* The path name on which the file system is mounted is maintained
* in fs_fsmnt. MAXMNTLEN defines the amount of space allocated in
* the super block for this name.
* The limit on the amount of summary information per file system
* is defined by MAXCSBUFS. It is currently parameterized for a
* maximum of two million cylinders.
*/
#define MAXMNTLEN 512
#define MAXCSBUFS 32
/*
* The limit on the amount of summary information per file system
* is defined by MAXCSBUFS. It is currently parameterized for a
* size of 128 bytes (2 million cylinder groups on machines with
* 32-bit pointers, and 1 million on 64-bit machines). One pointer
* is taken away to point to an array of cluster sizes that is
* computed as cylinder groups are inspected.
*/
#define MAXCSBUFS ((128 / sizeof(void *)) - 1)
/*
* A summary of contiguous blocks of various sizes is maintained
@ -138,105 +144,107 @@
* the ``fs_cs'' macro to work (see below).
*/
struct csum {
long cs_ndir; /* number of directories */
long cs_nbfree; /* number of free blocks */
long cs_nifree; /* number of free inodes */
long cs_nffree; /* number of free frags */
int32_t cs_ndir; /* number of directories */
int32_t cs_nbfree; /* number of free blocks */
int32_t cs_nifree; /* number of free inodes */
int32_t cs_nffree; /* number of free frags */
};
/*
* Super block for a file system.
* Super block for an FFS file system.
*/
struct fs {
struct fs *fs_link; /* linked list of file systems */
struct fs *fs_rlink; /* used for incore super blocks */
daddr_t fs_sblkno; /* addr of super-block in filesys */
daddr_t fs_cblkno; /* offset of cyl-block in filesys */
daddr_t fs_iblkno; /* offset of inode-blocks in filesys */
daddr_t fs_dblkno; /* offset of first data after cg */
long fs_cgoffset; /* cylinder group offset in cylinder */
long fs_cgmask; /* used to calc mod fs_ntrak */
int32_t fs_firstfield; /* historic file system linked list, */
int32_t fs_unused_1; /* used for incore super blocks */
ufs_daddr_t fs_sblkno; /* addr of super-block in filesys */
ufs_daddr_t fs_cblkno; /* offset of cyl-block in filesys */
ufs_daddr_t fs_iblkno; /* offset of inode-blocks in filesys */
ufs_daddr_t fs_dblkno; /* offset of first data after cg */
int32_t fs_cgoffset; /* cylinder group offset in cylinder */
int32_t fs_cgmask; /* used to calc mod fs_ntrak */
time_t fs_time; /* last time written */
long fs_size; /* number of blocks in fs */
long fs_dsize; /* number of data blocks in fs */
long fs_ncg; /* number of cylinder groups */
long fs_bsize; /* size of basic blocks in fs */
long fs_fsize; /* size of frag blocks in fs */
long fs_frag; /* number of frags in a block in fs */
int32_t fs_size; /* number of blocks in fs */
int32_t fs_dsize; /* number of data blocks in fs */
int32_t fs_ncg; /* number of cylinder groups */
int32_t fs_bsize; /* size of basic blocks in fs */
int32_t fs_fsize; /* size of frag blocks in fs */
int32_t fs_frag; /* number of frags in a block in fs */
/* these are configuration parameters */
long fs_minfree; /* minimum percentage of free blocks */
long fs_rotdelay; /* num of ms for optimal next block */
long fs_rps; /* disk revolutions per second */
int32_t fs_minfree; /* minimum percentage of free blocks */
int32_t fs_rotdelay; /* num of ms for optimal next block */
int32_t fs_rps; /* disk revolutions per second */
/* these fields can be computed from the others */
long fs_bmask; /* ``blkoff'' calc of blk offsets */
long fs_fmask; /* ``fragoff'' calc of frag offsets */
long fs_bshift; /* ``lblkno'' calc of logical blkno */
long fs_fshift; /* ``numfrags'' calc number of frags */
int32_t fs_bmask; /* ``blkoff'' calc of blk offsets */
int32_t fs_fmask; /* ``fragoff'' calc of frag offsets */
int32_t fs_bshift; /* ``lblkno'' calc of logical blkno */
int32_t fs_fshift; /* ``numfrags'' calc number of frags */
/* these are configuration parameters */
long fs_maxcontig; /* max number of contiguous blks */
long fs_maxbpg; /* max number of blks per cyl group */
int32_t fs_maxcontig; /* max number of contiguous blks */
int32_t fs_maxbpg; /* max number of blks per cyl group */
/* these fields can be computed from the others */
long fs_fragshift; /* block to frag shift */
long fs_fsbtodb; /* fsbtodb and dbtofsb shift constant */
long fs_sbsize; /* actual size of super block */
long fs_csmask; /* csum block offset */
long fs_csshift; /* csum block number */
long fs_nindir; /* value of NINDIR */
long fs_inopb; /* value of INOPB */
long fs_nspf; /* value of NSPF */
int32_t fs_fragshift; /* block to frag shift */
int32_t fs_fsbtodb; /* fsbtodb and dbtofsb shift constant */
int32_t fs_sbsize; /* actual size of super block */
int32_t fs_csmask; /* csum block offset */
int32_t fs_csshift; /* csum block number */
int32_t fs_nindir; /* value of NINDIR */
int32_t fs_inopb; /* value of INOPB */
int32_t fs_nspf; /* value of NSPF */
/* yet another configuration parameter */
long fs_optim; /* optimization preference, see below */
int32_t fs_optim; /* optimization preference, see below */
/* these fields are derived from the hardware */
long fs_npsect; /* # sectors/track including spares */
long fs_interleave; /* hardware sector interleave */
long fs_trackskew; /* sector 0 skew, per track */
long fs_headswitch; /* head switch time, usec */
long fs_trkseek; /* track-to-track seek, usec */
int32_t fs_npsect; /* # sectors/track including spares */
int32_t fs_interleave; /* hardware sector interleave */
int32_t fs_trackskew; /* sector 0 skew, per track */
int32_t fs_headswitch; /* head switch time, usec */
int32_t fs_trkseek; /* track-to-track seek, usec */
/* sizes determined by number of cylinder groups and their sizes */
daddr_t fs_csaddr; /* blk addr of cyl grp summary area */
long fs_cssize; /* size of cyl grp summary area */
long fs_cgsize; /* cylinder group size */
ufs_daddr_t fs_csaddr; /* blk addr of cyl grp summary area */
int32_t fs_cssize; /* size of cyl grp summary area */
int32_t fs_cgsize; /* cylinder group size */
/* these fields are derived from the hardware */
long fs_ntrak; /* tracks per cylinder */
long fs_nsect; /* sectors per track */
long fs_spc; /* sectors per cylinder */
int32_t fs_ntrak; /* tracks per cylinder */
int32_t fs_nsect; /* sectors per track */
int32_t fs_spc; /* sectors per cylinder */
/* this comes from the disk driver partitioning */
long fs_ncyl; /* cylinders in file system */
int32_t fs_ncyl; /* cylinders in file system */
/* these fields can be computed from the others */
long fs_cpg; /* cylinders per group */
long fs_ipg; /* inodes per group */
long fs_fpg; /* blocks per group * fs_frag */
int32_t fs_cpg; /* cylinders per group */
int32_t fs_ipg; /* inodes per group */
int32_t fs_fpg; /* blocks per group * fs_frag */
/* this data must be re-computed after crashes */
struct csum fs_cstotal; /* cylinder summary information */
/* these fields are cleared at mount time */
char fs_fmod; /* super block modified flag */
char fs_clean; /* file system is clean flag */
char fs_ronly; /* mounted read-only flag */
char fs_flags; /* currently unused flag */
char fs_fsmnt[MAXMNTLEN]; /* name mounted on */
int8_t fs_fmod; /* super block modified flag */
int8_t fs_clean; /* file system is clean flag */
int8_t fs_ronly; /* mounted read-only flag */
int8_t fs_flags; /* currently unused flag */
u_char fs_fsmnt[MAXMNTLEN]; /* name mounted on */
/* these fields retain the current block allocation info */
long fs_cgrotor; /* last cg searched */
int32_t fs_cgrotor; /* last cg searched */
struct csum *fs_csp[MAXCSBUFS];/* list of fs_cs info buffers */
long fs_cpc; /* cyl per cycle in postbl */
short fs_opostbl[16][8]; /* old rotation block list head */
long fs_sparecon[50]; /* reserved for future constants */
long fs_contigsumsize; /* size of cluster summary array */
long fs_maxsymlinklen; /* max length of an internal symlink */
long fs_inodefmt; /* format of on-disk inodes */
u_quad_t fs_maxfilesize; /* maximum representable file size */
quad_t fs_qbmask; /* ~fs_bmask - for use with quad size */
quad_t fs_qfmask; /* ~fs_fmask - for use with quad size */
long fs_state; /* validate fs_clean field */
long fs_postblformat; /* format of positional layout tables */
long fs_nrpos; /* number of rotational positions */
long fs_postbloff; /* (short) rotation block list head */
long fs_rotbloff; /* (u_char) blocks for each rotation */
long fs_magic; /* magic number */
u_char fs_space[1]; /* list of blocks for each rotation */
int32_t *fs_maxcluster; /* max cluster in each cyl group */
int32_t fs_cpc; /* cyl per cycle in postbl */
int16_t fs_opostbl[16][8]; /* old rotation block list head */
int32_t fs_sparecon[50]; /* reserved for future constants */
int32_t fs_contigsumsize; /* size of cluster summary array */
int32_t fs_maxsymlinklen; /* max length of an internal symlink */
int32_t fs_inodefmt; /* format of on-disk inodes */
u_int64_t fs_maxfilesize; /* maximum representable file size */
int64_t fs_qbmask; /* ~fs_bmask for use with 64-bit size */
int64_t fs_qfmask; /* ~fs_fmask for use with 64-bit size */
int32_t fs_state; /* validate fs_clean field */
int32_t fs_postblformat; /* format of positional layout tables */
int32_t fs_nrpos; /* number of rotational positions */
int32_t fs_postbloff; /* (u_int16) rotation block list head */
int32_t fs_rotbloff; /* (u_int8) blocks for each rotation */
int32_t fs_magic; /* magic number */
u_int8_t fs_space[1]; /* list of blocks for each rotation */
/* actually longer */
};
/*
* Filesystem idetification
* Filesystem identification
*/
#define FS_MAGIC 0x011954 /* the fast filesystem magic number */
#define FS_OKAY 0x7c269d38 /* superblock checksum */
@ -259,11 +267,12 @@ struct fs {
#define fs_postbl(fs, cylno) \
(((fs)->fs_postblformat == FS_42POSTBLFMT) \
? ((fs)->fs_opostbl[cylno]) \
: ((short *)((char *)(fs) + (fs)->fs_postbloff) + (cylno) * (fs)->fs_nrpos))
: ((int16_t *)((u_int8_t *)(fs) + \
(fs)->fs_postbloff) + (cylno) * (fs)->fs_nrpos))
#define fs_rotbl(fs) \
(((fs)->fs_postblformat == FS_42POSTBLFMT) \
? ((fs)->fs_space) \
: ((u_char *)((char *)(fs) + (fs)->fs_rotbloff)))
: ((u_int8_t *)((u_int8_t *)(fs) + (fs)->fs_rotbloff)))
/*
* The size of a cylinder group is calculated by CGSIZE. The maximum size
@ -272,13 +281,13 @@ struct fs {
* cylinder group and the (struct cg) size.
*/
#define CGSIZE(fs) \
/* base cg */ (sizeof(struct cg) + sizeof(long) + \
/* blktot size */ (fs)->fs_cpg * sizeof(long) + \
/* blks size */ (fs)->fs_cpg * (fs)->fs_nrpos * sizeof(short) + \
/* base cg */ (sizeof(struct cg) + sizeof(int32_t) + \
/* blktot size */ (fs)->fs_cpg * sizeof(int32_t) + \
/* blks size */ (fs)->fs_cpg * (fs)->fs_nrpos * sizeof(int16_t) + \
/* inode map */ howmany((fs)->fs_ipg, NBBY) + \
/* block map */ howmany((fs)->fs_cpg * (fs)->fs_spc / NSPF(fs), NBBY) +\
/* if present */ ((fs)->fs_contigsumsize <= 0 ? 0 : \
/* cluster sum */ (fs)->fs_contigsumsize * sizeof(long) + \
/* cluster sum */ (fs)->fs_contigsumsize * sizeof(int32_t) + \
/* cluster map */ howmany((fs)->fs_cpg * (fs)->fs_spc / NSPB(fs), NBBY)))
/*
@ -294,78 +303,80 @@ struct fs {
*/
#define CG_MAGIC 0x090255
struct cg {
struct cg *cg_link; /* linked list of cyl groups */
long cg_magic; /* magic number */
int32_t cg_firstfield; /* historic cyl groups linked list */
int32_t cg_magic; /* magic number */
time_t cg_time; /* time last written */
long cg_cgx; /* we are the cgx'th cylinder group */
short cg_ncyl; /* number of cyl's this cg */
short cg_niblk; /* number of inode blocks this cg */
long cg_ndblk; /* number of data blocks this cg */
int32_t cg_cgx; /* we are the cgx'th cylinder group */
int16_t cg_ncyl; /* number of cyl's this cg */
int16_t cg_niblk; /* number of inode blocks this cg */
int32_t cg_ndblk; /* number of data blocks this cg */
struct csum cg_cs; /* cylinder summary information */
long cg_rotor; /* position of last used block */
long cg_frotor; /* position of last used frag */
long cg_irotor; /* position of last used inode */
long cg_frsum[MAXFRAG]; /* counts of available frags */
long cg_btotoff; /* (long) block totals per cylinder */
long cg_boff; /* (short) free block positions */
long cg_iusedoff; /* (char) used inode map */
long cg_freeoff; /* (u_char) free block map */
long cg_nextfreeoff; /* (u_char) next available space */
long cg_clustersumoff; /* (long) counts of avail clusters */
long cg_clusteroff; /* (char) free cluster map */
long cg_nclusterblks; /* number of clusters this cg */
long cg_sparecon[13]; /* reserved for future use */
u_char cg_space[1]; /* space for cylinder group maps */
int32_t cg_rotor; /* position of last used block */
int32_t cg_frotor; /* position of last used frag */
int32_t cg_irotor; /* position of last used inode */
int32_t cg_frsum[MAXFRAG]; /* counts of available frags */
int32_t cg_btotoff; /* (int32) block totals per cylinder */
int32_t cg_boff; /* (u_int16) free block positions */
int32_t cg_iusedoff; /* (u_int8) used inode map */
int32_t cg_freeoff; /* (u_int8) free block map */
int32_t cg_nextfreeoff; /* (u_int8) next available space */
int32_t cg_clustersumoff; /* (u_int32) counts of avail clusters */
int32_t cg_clusteroff; /* (u_int8) free cluster map */
int32_t cg_nclusterblks; /* number of clusters this cg */
int32_t cg_sparecon[13]; /* reserved for future use */
u_int8_t cg_space[1]; /* space for cylinder group maps */
/* actually longer */
};
/*
* Macros for access to cylinder group array structures
*/
#define cg_blktot(cgp) \
(((cgp)->cg_magic != CG_MAGIC) \
? (((struct ocg *)(cgp))->cg_btot) \
: ((long *)((char *)(cgp) + (cgp)->cg_btotoff)))
: ((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_btotoff)))
#define cg_blks(fs, cgp, cylno) \
(((cgp)->cg_magic != CG_MAGIC) \
? (((struct ocg *)(cgp))->cg_b[cylno]) \
: ((short *)((char *)(cgp) + (cgp)->cg_boff) + (cylno) * (fs)->fs_nrpos))
: ((int16_t *)((u_int8_t *)(cgp) + \
(cgp)->cg_boff) + (cylno) * (fs)->fs_nrpos))
#define cg_inosused(cgp) \
(((cgp)->cg_magic != CG_MAGIC) \
? (((struct ocg *)(cgp))->cg_iused) \
: ((char *)((char *)(cgp) + (cgp)->cg_iusedoff)))
: ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_iusedoff)))
#define cg_blksfree(cgp) \
(((cgp)->cg_magic != CG_MAGIC) \
? (((struct ocg *)(cgp))->cg_free) \
: ((u_char *)((char *)(cgp) + (cgp)->cg_freeoff)))
: ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff)))
#define cg_chkmagic(cgp) \
((cgp)->cg_magic == CG_MAGIC || ((struct ocg *)(cgp))->cg_magic == CG_MAGIC)
#define cg_clustersfree(cgp) \
((u_char *)((char *)(cgp) + (cgp)->cg_clusteroff))
((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_clusteroff))
#define cg_clustersum(cgp) \
((long *)((char *)(cgp) + (cgp)->cg_clustersumoff))
((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_clustersumoff))
/*
* The following structure is defined
* for compatibility with old file systems.
*/
struct ocg {
struct ocg *cg_link; /* linked list of cyl groups */
struct ocg *cg_rlink; /* used for incore cyl groups */
int32_t cg_firstfield; /* historic linked list of cyl groups */
int32_t cg_unused_1; /* used for incore cyl groups */
time_t cg_time; /* time last written */
long cg_cgx; /* we are the cgx'th cylinder group */
short cg_ncyl; /* number of cyl's this cg */
short cg_niblk; /* number of inode blocks this cg */
long cg_ndblk; /* number of data blocks this cg */
int32_t cg_cgx; /* we are the cgx'th cylinder group */
int16_t cg_ncyl; /* number of cyl's this cg */
int16_t cg_niblk; /* number of inode blocks this cg */
int32_t cg_ndblk; /* number of data blocks this cg */
struct csum cg_cs; /* cylinder summary information */
long cg_rotor; /* position of last used block */
long cg_frotor; /* position of last used frag */
long cg_irotor; /* position of last used inode */
long cg_frsum[8]; /* counts of available frags */
long cg_btot[32]; /* block totals per cylinder */
short cg_b[32][8]; /* positions of free blocks */
char cg_iused[256]; /* used inode map */
long cg_magic; /* magic number */
u_char cg_free[1]; /* free block map */
int32_t cg_rotor; /* position of last used block */
int32_t cg_frotor; /* position of last used frag */
int32_t cg_irotor; /* position of last used inode */
int32_t cg_frsum[8]; /* counts of available frags */
int32_t cg_btot[32]; /* block totals per cylinder */
int16_t cg_b[32][8]; /* positions of free blocks */
u_int8_t cg_iused[256]; /* used inode map */
int32_t cg_magic; /* magic number */
u_int8_t cg_free[1]; /* free block map */
/* actually longer */
};
@ -380,7 +391,7 @@ struct ocg {
* Cylinder group macros to locate things in cylinder groups.
* They calc file system addresses of cylinder group data structures.
*/
#define cgbase(fs, c) ((daddr_t)((fs)->fs_fpg * (c)))
#define cgbase(fs, c) ((ufs_daddr_t)((fs)->fs_fpg * (c)))
#define cgdmin(fs, c) (cgstart(fs, c) + (fs)->fs_dblkno) /* 1st data */
#define cgimin(fs, c) (cgstart(fs, c) + (fs)->fs_iblkno) /* inode blk */
#define cgsblock(fs, c) (cgstart(fs, c) + (fs)->fs_sblkno) /* super blk */
@ -396,7 +407,7 @@ struct ocg {
*/
#define ino_to_cg(fs, x) ((x) / (fs)->fs_ipg)
#define ino_to_fsba(fs, x) \
((daddr_t)(cgimin(fs, ino_to_cg(fs, x)) + \
((ufs_daddr_t)(cgimin(fs, ino_to_cg(fs, x)) + \
(blkstofrags((fs), (((x) % (fs)->fs_ipg) / INOPB(fs))))))
#define ino_to_fsbo(fs, x) ((x) % INOPB(fs))
@ -450,7 +461,7 @@ struct ocg {
/*
* Determine the number of available frags given a
* percentage to hold in reserve
* percentage to hold in reserve.
*/
#define freespace(fs, percentreserved) \
(blkstofrags((fs), (fs)->fs_cstotal.cs_nbfree) + \
@ -469,19 +480,20 @@ struct ocg {
: (fragroundup(fs, blkoff(fs, (dip)->di_size))))
/*
* Number of disk sectors per block; assumes DEV_BSIZE byte sector size.
* Number of disk sectors per block/fragment; assumes DEV_BSIZE byte
* sector size.
*/
#define NSPB(fs) ((fs)->fs_nspf << (fs)->fs_fragshift)
#define NSPF(fs) ((fs)->fs_nspf)
/*
* INOPB is the number of inodes in a secondary storage block.
* Number of inodes in a secondary storage block/fragment.
*/
#define INOPB(fs) ((fs)->fs_inopb)
#define INOPF(fs) ((fs)->fs_inopb >> (fs)->fs_fragshift)
/*
* NINDIR is the number of indirects in a file system block.
* Number of indirects in a file system block.
*/
#define NINDIR(fs) ((fs)->fs_nindir)

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs.h 8.3 (Berkeley) 9/23/93
* @(#)lfs.h 8.9 (Berkeley) 5/8/95
*/
#define LFS_LABELPAD 8192 /* LFS label size */
@ -55,14 +55,15 @@
/* On-disk and in-memory checkpoint segment usage structure. */
typedef struct segusage SEGUSE;
struct segusage {
u_long su_nbytes; /* number of live bytes */
u_long su_lastmod; /* SEGUSE last modified timestamp */
u_short su_nsums; /* number of summaries in segment */
u_short su_ninos; /* number of inode blocks in seg */
#define SEGUSE_ACTIVE 0x1 /* segment is currently being written */
#define SEGUSE_DIRTY 0x2 /* segment has data in it */
#define SEGUSE_SUPERBLOCK 0x4 /* segment contains a superblock */
u_long su_flags;
u_int32_t su_nbytes; /* number of live bytes */
u_int32_t su_lastmod; /* SEGUSE last modified timestamp */
u_int16_t su_nsums; /* number of summaries in segment */
u_int16_t su_ninos; /* number of inode blocks in seg */
#define SEGUSE_ACTIVE 0x01 /* segment is currently being written */
#define SEGUSE_DIRTY 0x02 /* segment has data in it */
#define SEGUSE_SUPERBLOCK 0x04 /* segment contains a superblock */
u_int32_t su_flags;
};
#define SEGUPB(fs) (1 << (fs)->lfs_sushift)
@ -72,70 +73,76 @@ struct segusage {
/* On-disk file information. One per file with data blocks in the segment. */
typedef struct finfo FINFO;
struct finfo {
u_long fi_nblocks; /* number of blocks */
u_long fi_version; /* version number */
u_long fi_ino; /* inode number */
long fi_blocks[1]; /* array of logical block numbers */
u_int32_t fi_nblocks; /* number of blocks */
u_int32_t fi_version; /* version number */
u_int32_t fi_ino; /* inode number */
u_int32_t fi_lastlength; /* length of last block in array */
ufs_daddr_t fi_blocks[1]; /* array of logical block numbers */
};
/* On-disk and in-memory super block. */
struct lfs {
#define LFS_MAGIC 0x070162
u_long lfs_magic; /* magic number */
u_int32_t lfs_magic; /* magic number */
#define LFS_VERSION 1
u_long lfs_version; /* version number */
u_int32_t lfs_version; /* version number */
u_long lfs_size; /* number of blocks in fs */
u_long lfs_ssize; /* number of blocks per segment */
u_long lfs_dsize; /* number of disk blocks in fs */
u_long lfs_bsize; /* file system block size */
u_long lfs_fsize; /* size of frag blocks in fs */
u_long lfs_frag; /* number of frags in a block in fs */
u_int32_t lfs_size; /* number of blocks in fs */
u_int32_t lfs_ssize; /* number of blocks per segment */
u_int32_t lfs_dsize; /* number of disk blocks in fs */
u_int32_t lfs_bsize; /* file system block size */
u_int32_t lfs_fsize; /* size of frag blocks in fs */
u_int32_t lfs_frag; /* number of frags in a block in fs */
/* Checkpoint region. */
ino_t lfs_free; /* start of the free list */
u_long lfs_bfree; /* number of free disk blocks */
u_long lfs_nfiles; /* number of allocated inodes */
long lfs_avail; /* blocks available for writing */
u_long lfs_uinodes; /* inodes in cache not yet on disk */
daddr_t lfs_idaddr; /* inode file disk address */
u_int32_t lfs_bfree; /* number of free disk blocks */
u_int32_t lfs_nfiles; /* number of allocated inodes */
int32_t lfs_avail; /* blocks available for writing */
u_int32_t lfs_uinodes; /* inodes in cache not yet on disk */
ufs_daddr_t lfs_idaddr; /* inode file disk address */
ino_t lfs_ifile; /* inode file inode number */
daddr_t lfs_lastseg; /* address of last segment written */
daddr_t lfs_nextseg; /* address of next segment to write */
daddr_t lfs_curseg; /* current segment being written */
daddr_t lfs_offset; /* offset in curseg for next partial */
daddr_t lfs_lastpseg; /* address of last partial written */
u_long lfs_tstamp; /* time stamp */
ufs_daddr_t lfs_lastseg; /* address of last segment written */
ufs_daddr_t lfs_nextseg; /* address of next segment to write */
ufs_daddr_t lfs_curseg; /* current segment being written */
ufs_daddr_t lfs_offset; /* offset in curseg for next partial */
ufs_daddr_t lfs_lastpseg; /* address of last partial written */
u_int32_t lfs_tstamp; /* time stamp */
/* These are configuration parameters. */
u_long lfs_minfree; /* minimum percentage of free blocks */
u_int32_t lfs_minfree; /* minimum percentage of free blocks */
/* These fields can be computed from the others. */
u_quad_t lfs_maxfilesize; /* maximum representable file size */
u_long lfs_dbpseg; /* disk blocks per segment */
u_long lfs_inopb; /* inodes per block */
u_long lfs_ifpb; /* IFILE entries per block */
u_long lfs_sepb; /* SEGUSE entries per block */
u_long lfs_nindir; /* indirect pointers per block */
u_long lfs_nseg; /* number of segments */
u_long lfs_nspf; /* number of sectors per fragment */
u_long lfs_cleansz; /* cleaner info size in blocks */
u_long lfs_segtabsz; /* segment table size in blocks */
u_int64_t lfs_maxfilesize; /* maximum representable file size */
u_int32_t lfs_dbpseg; /* disk blocks per segment */
u_int32_t lfs_inopb; /* inodes per block */
u_int32_t lfs_ifpb; /* IFILE entries per block */
u_int32_t lfs_sepb; /* SEGUSE entries per block */
u_int32_t lfs_nindir; /* indirect pointers per block */
u_int32_t lfs_nseg; /* number of segments */
u_int32_t lfs_nspf; /* number of sectors per fragment */
u_int32_t lfs_cleansz; /* cleaner info size in blocks */
u_int32_t lfs_segtabsz; /* segment table size in blocks */
u_long lfs_segmask; /* calculate offset within a segment */
u_long lfs_segshift; /* fast mult/div for segments */
u_long lfs_bmask; /* calc block offset from file offset */
u_long lfs_bshift; /* calc block number from file offset */
u_long lfs_ffmask; /* calc frag offset from file offset */
u_long lfs_ffshift; /* fast mult/div for frag from file */
u_long lfs_fbmask; /* calc frag offset from block offset */
u_long lfs_fbshift; /* fast mult/div for frag from block */
u_long lfs_fsbtodb; /* fsbtodb and dbtofsb shift constant */
u_long lfs_sushift; /* fast mult/div for segusage table */
u_int32_t lfs_segmask; /* calculate offset within a segment */
u_int32_t lfs_segshift; /* fast mult/div for segments */
u_int64_t lfs_bmask; /* calc block offset from file offset */
u_int32_t lfs_bshift; /* calc block number from file offset */
u_int64_t lfs_ffmask; /* calc frag offset from file offset */
u_int32_t lfs_ffshift; /* fast mult/div for frag from file */
u_int64_t lfs_fbmask; /* calc frag offset from block offset */
u_int32_t lfs_fbshift; /* fast mult/div for frag from block */
u_int32_t lfs_fsbtodb; /* fsbtodb and dbtofsb shift constant */
u_int32_t lfs_sushift; /* fast mult/div for segusage table */
int32_t lfs_maxsymlinklen; /* max length of an internal symlink */
#define LFS_MIN_SBINTERVAL 5 /* minimum superblock segment spacing */
#define LFS_MAXNUMSB 10 /* superblock disk offsets */
daddr_t lfs_sboffs[LFS_MAXNUMSB];
ufs_daddr_t lfs_sboffs[LFS_MAXNUMSB];
/* Checksum -- last valid disk field. */
u_int32_t lfs_cksum; /* checksum for superblock checking */
/* These fields are set at mount time and are meaningless on disk. */
struct segment *lfs_sp; /* current segment being written */
@ -147,23 +154,21 @@ struct lfs {
u_long lfs_dirops; /* count of active directory ops */
u_long lfs_doifile; /* Write ifile blocks on next write */
u_long lfs_nactive; /* Number of segments since last ckp */
u_char lfs_fmod; /* super block modified flag */
u_char lfs_clean; /* file system is clean flag */
u_char lfs_ronly; /* mounted read-only flag */
u_char lfs_flags; /* currently unused flag */
int8_t lfs_fmod; /* super block modified flag */
int8_t lfs_clean; /* file system is clean flag */
int8_t lfs_ronly; /* mounted read-only flag */
int8_t lfs_flags; /* currently unused flag */
u_char lfs_fsmnt[MNAMELEN]; /* name mounted on */
u_char pad[3]; /* long-align */
/* Checksum; valid on disk. */
u_long lfs_cksum; /* checksum for superblock checking */
int32_t lfs_pad[40]; /* round to 512 bytes */
};
/*
* Inode 0 is the out-of-band inode number, inode 1 is the inode number for
* the IFILE, the root inode is 2 and the lost+found inode is 3.
* Inode 0: out-of-band inode number
* Inode 1: IFILE inode number
* Inode 2: root inode
* Inode 3: lost+found inode number
*/
/* Fixed inode numbers. */
#define LFS_UNUSED_INUM 0 /* out of band inode number */
#define LFS_IFILE_INUM 1 /* IFILE inode number */
#define LOSTFOUNDINO 3 /* lost+found inode number */
@ -182,9 +187,9 @@ struct lfs {
typedef struct ifile IFILE;
struct ifile {
u_long if_version; /* inode version number */
u_int32_t if_version; /* inode version number */
#define LFS_UNUSED_DADDR 0 /* out-of-band daddr */
daddr_t if_daddr; /* inode disk address */
ufs_daddr_t if_daddr; /* inode disk address */
ino_t if_nextfree; /* next-unallocated inode */
};
@ -193,8 +198,8 @@ struct ifile {
* to pass information between the cleaner and the kernel.
*/
typedef struct _cleanerinfo {
u_long clean; /* K: number of clean segments */
u_long dirty; /* K: number of dirty segments */
u_int32_t clean; /* K: number of clean segments */
u_int32_t dirty; /* K: number of dirty segments */
} CLEANERINFO;
#define CLEANSIZE_SU(fs) \
@ -209,16 +214,19 @@ typedef struct _cleanerinfo {
/* On-disk segment summary information */
typedef struct segsum SEGSUM;
struct segsum {
u_long ss_sumsum; /* check sum of summary block */
u_long ss_datasum; /* check sum of data */
daddr_t ss_next; /* next segment */
u_long ss_create; /* creation time stamp */
u_short ss_nfinfo; /* number of file info structures */
u_short ss_ninos; /* number of inodes in summary */
u_int32_t ss_sumsum; /* check sum of summary block */
u_int32_t ss_datasum; /* check sum of data */
u_int32_t ss_magic; /* segment summary magic number */
#define SS_MAGIC 0x061561
ufs_daddr_t ss_next; /* next segment */
u_int32_t ss_create; /* creation time stamp */
u_int16_t ss_nfinfo; /* number of file info structures */
u_int16_t ss_ninos; /* number of inodes in summary */
#define SS_DIROP 0x01 /* segment begins a dirop */
#define SS_CONT 0x02 /* more partials to finish this write*/
u_short ss_flags; /* used for directory operations */
u_short ss_pad; /* extra space */
u_int16_t ss_flags; /* used for directory operations */
u_int16_t ss_pad; /* extra space */
/* FINFO's and inode daddr's... */
};
@ -228,26 +236,48 @@ struct segsum {
/* INOPB is the number of inodes in a secondary storage block. */
#define INOPB(fs) ((fs)->lfs_inopb)
#define blksize(fs) ((fs)->lfs_bsize)
#define blkoff(fs, loc) ((loc) & (fs)->lfs_bmask)
#define blksize(fs, ip, lbn) \
(((lbn) >= NDADDR || (ip)->i_size >= ((lbn) + 1) << (fs)->lfs_bshift) \
? (fs)->lfs_bsize \
: (fragroundup(fs, blkoff(fs, (ip)->i_size))))
#define blkoff(fs, loc) ((int)((loc) & (fs)->lfs_bmask))
#define fragoff(fs, loc) /* calculates (loc % fs->lfs_fsize) */ \
((int)((loc) & (fs)->lfs_ffmask))
#define fsbtodb(fs, b) ((b) << (fs)->lfs_fsbtodb)
#define dbtofsb(fs, b) ((b) >> (fs)->lfs_fsbtodb)
#define fragstodb(fs, b) ((b) << (fs)->lfs_fsbtodb - (fs)->lfs_fbshift)
#define dbtofrags(fs, b) ((b) >> (fs)->lfs_fsbtodb - (fs)->lfs_fbshift)
#define lblkno(fs, loc) ((loc) >> (fs)->lfs_bshift)
#define lblktosize(fs, blk) ((blk) << (fs)->lfs_bshift)
#define numfrags(fs, loc) /* calculates (loc / fs->fs_fsize) */ \
((loc) >> (fs)->lfs_bshift)
#define numfrags(fs, loc) /* calculates (loc / fs->lfs_fsize) */ \
((loc) >> (fs)->lfs_ffshift)
#define blkroundup(fs, size) /* calculates roundup(size, fs->lfs_bsize) */ \
((int)(((size) + (fs)->lfs_bmask) & (~(fs)->lfs_bmask)))
#define fragroundup(fs, size) /* calculates roundup(size, fs->lfs_fsize) */ \
((int)(((size) + (fs)->lfs_ffmask) & (~(fs)->lfs_ffmask)))
#define fragstoblks(fs, frags) /* calculates (frags / fs->lfs_frag) */ \
((frags) >> (fs)->lfs_fbshift)
#define blkstofrags(fs, blks) /* calculates (blks * fs->lfs_frag) */ \
((blks) << (fs)->lfs_fbshift)
#define fragnum(fs, fsb) /* calculates (fsb % fs->lfs_frag) */ \
((fsb) & ((fs)->lfs_frag - 1))
#define blknum(fs, fsb) /* calculates rounddown(fsb, fs->lfs_frag) */ \
((fsb) &~ ((fs)->lfs_frag - 1))
#define dblksize(fs, dip, lbn) \
(((lbn) >= NDADDR || (dip)->di_size >= ((lbn) + 1) << (fs)->lfs_bshift)\
? (fs)->lfs_bsize \
: (fragroundup(fs, blkoff(fs, (dip)->di_size))))
#define datosn(fs, daddr) /* disk address to segment number */ \
(((daddr) - (fs)->lfs_sboffs[0]) / fsbtodb((fs), (fs)->lfs_ssize))
#define sntoda(fs, sn) /* segment number to disk address */ \
((daddr_t)((sn) * ((fs)->lfs_ssize << (fs)->lfs_fsbtodb) + \
((ufs_daddr_t)((sn) * ((fs)->lfs_ssize << (fs)->lfs_fsbtodb) + \
(fs)->lfs_sboffs[0]))
/* Read in the block with the cleaner info from the ifile. */
#define LFS_CLEANERINFO(CP, F, BP) { \
VTOI((F)->lfs_ivnode)->i_flag |= IN_ACCESS; \
if (bread((F)->lfs_ivnode, \
(daddr_t)0, (F)->lfs_bsize, NOCRED, &(BP))) \
(ufs_daddr_t)0, (F)->lfs_bsize, NOCRED, &(BP))) \
panic("lfs: ifile read"); \
(CP) = (CLEANERINFO *)(BP)->b_data; \
}
@ -281,8 +311,8 @@ struct segsum {
* the segment usage table, plus an ifile page.
*/
#define LFS_FITS(fs, db) \
((long)((db + ((fs)->lfs_uinodes + INOPB((fs))) / INOPB((fs)) + \
fsbtodb(fs, 1) + LFS_SUMMARY_SIZE / DEV_BSIZE + \
((int32_t)((db + ((fs)->lfs_uinodes + INOPB((fs))) / \
INOPB((fs)) + fsbtodb(fs, 1) + LFS_SUMMARY_SIZE / DEV_BSIZE + \
(fs)->lfs_segtabsz)) < (fs)->lfs_avail)
/* Determine if a buffer belongs to the ifile */
@ -294,11 +324,12 @@ struct segsum {
*/
typedef struct block_info {
ino_t bi_inode; /* inode # */
daddr_t bi_lbn; /* logical block w/in file */
daddr_t bi_daddr; /* disk address of block */
ufs_daddr_t bi_lbn; /* logical block w/in file */
ufs_daddr_t bi_daddr; /* disk address of block */
time_t bi_segcreate; /* origin segment create time */
int bi_version; /* file version number */
void *bi_bp; /* data buffer */
int bi_size; /* size of the block (if fragment) */
} BLOCK_INFO;
/* In-memory description of a segment about to be written. */
@ -311,15 +342,16 @@ struct segment {
struct finfo *fip; /* current fileinfo pointer */
struct vnode *vp; /* vnode being gathered */
void *segsum; /* segment summary info */
u_long ninodes; /* number of inodes in this segment */
u_long seg_bytes_left; /* bytes left in segment */
u_long sum_bytes_left; /* bytes left in summary block */
u_long seg_number; /* number of this segment */
daddr_t *start_lbp; /* beginning lbn for this set */
u_int32_t ninodes; /* number of inodes in this segment */
u_int32_t seg_bytes_left; /* bytes left in segment */
u_int32_t sum_bytes_left; /* bytes left in summary block */
u_int32_t seg_number; /* number of this segment */
ufs_daddr_t *start_lbp; /* beginning lbn for this set */
#define SEGM_CKP 0x01 /* doing a checkpoint */
#define SEGM_CLEAN 0x02 /* cleaner call; don't sort */
#define SEGM_SYNC 0x04 /* wait for segment */
u_long seg_flags; /* run-time flags for this segment */
u_int16_t seg_flags; /* run-time flags for this segment */
};
#define ISSPACE(F, BB, C) \
@ -336,18 +368,18 @@ struct segment {
#ifdef DOSTATS
/* Statistics Counters */
struct lfs_stats {
int segsused;
int psegwrites;
int psyncwrites;
int pcleanwrites;
int blocktot;
int cleanblocks;
int ncheckpoints;
int nwrites;
int nsync_writes;
int wait_exceeded;
int write_exceeded;
int flush_invoked;
u_int segsused;
u_int psegwrites;
u_int psyncwrites;
u_int pcleanwrites;
u_int blocktot;
u_int cleanblocks;
u_int ncheckpoints;
u_int nwrites;
u_int nsync_writes;
u_int wait_exceeded;
u_int write_exceeded;
u_int flush_invoked;
};
extern struct lfs_stats lfs_stats;
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1991, 1993
* Copyright (c) 1991, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_alloc.c 8.4 (Berkeley) 1/4/94
* @(#)lfs_alloc.c 8.7 (Berkeley) 5/14/95
*/
#include <sys/param.h>
@ -46,6 +46,7 @@
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
#include <ufs/lfs/lfs.h>
#include <ufs/lfs/lfs_extern.h>
@ -68,7 +69,7 @@ lfs_valloc(ap)
struct ifile *ifp;
struct inode *ip;
struct vnode *vp;
daddr_t blkno;
ufs_daddr_t blkno;
ino_t new_ino;
u_long i, max;
int error;
@ -95,7 +96,7 @@ lfs_valloc(ap)
vp = fs->lfs_ivnode;
ip = VTOI(vp);
blkno = lblkno(fs, ip->i_size);
lfs_balloc(vp, fs->lfs_bsize, blkno, &bp);
lfs_balloc(vp, 0, fs->lfs_bsize, blkno, &bp);
ip->i_size += fs->lfs_bsize;
vnode_pager_setsize(vp, (u_long)ip->i_size);
vnode_pager_uncache(vp);
@ -172,14 +173,13 @@ lfs_vcreate(mp, ino, vpp)
/* Initialize the inode. */
MALLOC(ip, struct inode *, sizeof(struct inode), M_LFSNODE, M_WAITOK);
lockinit(&ip->i_lock, PINOD, "lfsinode", 0, 0);
(*vpp)->v_data = ip;
ip->i_vnode = *vpp;
ip->i_devvp = ump->um_devvp;
ip->i_flag = IN_MODIFIED;
ip->i_dev = ump->um_dev;
ip->i_number = ip->i_din.di_inumber = ino;
ip->i_din.di_spare[0] = 0xdeadbeef;
ip->i_din.di_spare[1] = 0xdeadbeef;
ip->i_lfs = ump->um_lfs;
#ifdef QUOTA
for (i = 0; i < MAXQUOTAS; i++)
@ -209,7 +209,7 @@ lfs_vfree(ap)
struct ifile *ifp;
struct inode *ip;
struct lfs *fs;
daddr_t old_iaddr;
ufs_daddr_t old_iaddr;
ino_t ino;
/* Get the inode number and file system. */

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_balloc.c 8.1 (Berkeley) 6/11/93
* @(#)lfs_balloc.c 8.4 (Berkeley) 5/8/95
*/
#include <sys/param.h>
#include <sys/buf.h>
@ -50,18 +50,20 @@
#include <ufs/lfs/lfs_extern.h>
int
lfs_balloc(vp, iosize, lbn, bpp)
lfs_balloc(vp, offset, iosize, lbn, bpp)
struct vnode *vp;
int offset;
u_long iosize;
daddr_t lbn;
ufs_daddr_t lbn;
struct buf **bpp;
{
struct buf *ibp, *bp;
struct inode *ip;
struct lfs *fs;
struct indir indirs[NIADDR+2];
daddr_t daddr;
int bb, error, i, num;
ufs_daddr_t daddr, lastblock;
int bb; /* number of disk blocks in a block disk blocks */
int error, frags, i, nsize, osize, num;
ip = VTOI(vp);
fs = ip->i_lfs;
@ -74,43 +76,90 @@ lfs_balloc(vp, iosize, lbn, bpp)
* or written earlier). If it did, make sure we don't count it as a
* new block or zero out its contents. If it did not, make sure
* we allocate any necessary indirect blocks.
* If we are writing a block beyond the end of the file, we need to
* check if the old last block was a fragment. If it was, we need
* to rewrite it.
*/
*bpp = NULL;
if (error = ufs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL ))
return (error);
*bpp = bp = getblk(vp, lbn, fs->lfs_bsize, 0, 0);
/* Check for block beyond end of file and fragment extension needed. */
lastblock = lblkno(fs, ip->i_size);
if (lastblock < NDADDR && lastblock < lbn) {
osize = blksize(fs, ip, lastblock);
if (osize < fs->lfs_bsize && osize > 0) {
if (error = lfs_fragextend(vp, osize, fs->lfs_bsize,
lastblock, &bp))
return(error);
ip->i_size = (lastblock + 1) * fs->lfs_bsize;
vnode_pager_setsize(vp, (u_long)ip->i_size);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
VOP_BWRITE(bp);
}
}
bb = VFSTOUFS(vp->v_mount)->um_seqinc;
if (daddr == UNASSIGNED)
/* May need to allocate indirect blocks */
for (i = 1; i < num; ++i)
if (!indirs[i].in_exists) {
ibp =
getblk(vp, indirs[i].in_lbn, fs->lfs_bsize,
ibp = getblk(vp, indirs[i].in_lbn, fs->lfs_bsize,
0, 0);
if (!(ibp->b_flags & (B_DONE | B_DELWRI))) {
if ((ibp->b_flags & (B_DONE | B_DELWRI)))
panic ("Indirect block should not exist");
if (!ISSPACE(fs, bb, curproc->p_ucred)){
ibp->b_flags |= B_INVAL;
brelse(ibp);
error = ENOSPC;
return(ENOSPC);
} else {
ip->i_blocks += bb;
ip->i_lfs->lfs_bfree -= bb;
clrbuf(ibp);
error = VOP_BWRITE(ibp);
}
} else
panic ("Indirect block should not exist");
}
if (error) {
if (bp)
brelse(bp);
if(error = VOP_BWRITE(ibp))
return(error);
}
}
/*
* If the block we are writing is a direct block, it's the last
* block in the file, and offset + iosize is less than a full
* block, we can write one or more fragments. There are two cases:
* the block is brand new and we should allocate it the correct
* size or it already exists and contains some fragments and
* may need to extend it.
*/
if (lbn < NDADDR && lblkno(fs, ip->i_size) == lbn) {
nsize = fragroundup(fs, offset + iosize);
frags = numfrags(fs, nsize);
bb = fragstodb(fs, frags);
if (lblktosize(fs, lbn) == ip->i_size)
/* Brand new block or fragment */
*bpp = bp = getblk(vp, lbn, nsize, 0, 0);
else {
/* Extend existing block */
if (error = lfs_fragextend(vp, (int)blksize(fs, ip, lbn),
nsize, lbn, &bp))
return(error);
*bpp = bp;
}
} else {
/*
* Get the existing block from the cache either because the
* block is 1) not a direct block or because it's not the last
* block in the file.
*/
frags = dbtofrags(fs, bb);
*bpp = bp = getblk(vp, lbn, blksize(fs, ip, lbn), 0, 0);
}
/* Now, we may need to allocate the data block */
/*
* The block we are writing may be a brand new block
* in which case we need to do accounting (i.e. check
* for free space and update the inode number of blocks.
*/
if (!(bp->b_flags & (B_CACHE | B_DONE | B_DELWRI))) {
if (daddr == UNASSIGNED)
if (!ISSPACE(fs, bb, curproc->p_ucred)) {
@ -124,13 +173,55 @@ lfs_balloc(vp, iosize, lbn, bpp)
clrbuf(bp);
}
else if (iosize == fs->lfs_bsize)
bp->b_blkno = daddr; /* Skip the I/O */
/* Optimization: I/O is unnecessary. */
bp->b_blkno = daddr;
else {
/*
* We need to read the block to preserve the
* existing bytes.
*/
bp->b_blkno = daddr;
bp->b_flags |= B_READ;
VOP_STRATEGY(bp);
return(biowait(bp));
}
}
return (error);
return (0);
}
lfs_fragextend(vp, osize, nsize, lbn, bpp)
struct vnode *vp;
int osize;
int nsize;
daddr_t lbn;
struct buf **bpp;
{
struct inode *ip;
struct lfs *fs;
long bb;
int error;
ip = VTOI(vp);
fs = ip->i_lfs;
bb = (long)fragstodb(fs, numfrags(fs, nsize - osize));
if (!ISSPACE(fs, bb, curproc->p_ucred)) {
return(ENOSPC);
}
if (error = bread(vp, lbn, osize, NOCRED, bpp)) {
brelse(*bpp);
return(error);
}
#ifdef QUOTA
if (error = chkdq(ip, bb, curproc->p_ucred, 0)) {
brelse(*bpp);
return (error);
}
#endif
ip->i_blocks += bb;
ip->i_flag |= IN_CHANGE | IN_UPDATE;
fs->lfs_bfree -= fragstodb(fs, numfrags(fs, (nsize - osize)));
allocbuf(*bpp, nsize);
bzero((char *)((*bpp)->b_data) + osize, (u_int)(nsize - osize));
return(0);
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_bio.c 8.4 (Berkeley) 12/30/93
* @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
*/
#include <sys/param.h>
@ -76,7 +76,7 @@ lfs_bwrite(ap)
register struct buf *bp = ap->a_bp;
struct lfs *fs;
struct inode *ip;
int error, s;
int db, error, s;
/*
* Set the delayed write flag and use reassignbuf to move the buffer
@ -94,10 +94,12 @@ lfs_bwrite(ap)
*/
if (!(bp->b_flags & B_LOCKED)) {
fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
while (!LFS_FITS(fs, fsbtodb(fs, 1)) && !IS_IFILE(bp) &&
db = fragstodb(fs, numfrags(fs, bp->b_bcount));
while (!LFS_FITS(fs, db) && !IS_IFILE(bp) &&
bp->b_lblkno > 0) {
/* Out of space, need cleaner to run */
wakeup(&lfs_allclean_wakeup);
wakeup(&fs->lfs_nextseg);
if (error = tsleep(&fs->lfs_avail, PCATCH | PUSER,
"cleaner", NULL)) {
brelse(bp);
@ -108,7 +110,7 @@ lfs_bwrite(ap)
if (!(ip->i_flag & IN_MODIFIED))
++fs->lfs_uinodes;
ip->i_flag |= IN_CHANGE | IN_MODIFIED | IN_UPDATE;
fs->lfs_avail -= fsbtodb(fs, 1);
fs->lfs_avail -= db;
++locked_queue_count;
bp->b_flags |= B_DELWRI | B_LOCKED;
bp->b_flags &= ~(B_READ | B_ERROR);
@ -131,7 +133,8 @@ lfs_bwrite(ap)
void
lfs_flush()
{
register struct mount *mp;
register struct mount *mp, *nmp;
struct proc *p = curproc; /* XXX */
#ifdef DOSTATS
++lfs_stats.write_exceeded;
@ -139,10 +142,14 @@ lfs_flush()
if (lfs_writing)
return;
lfs_writing = 1;
for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
/* The lock check below is to avoid races with unmount. */
if (mp->mnt_stat.f_type == MOUNT_LFS &&
(mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_UNMOUNT)) == 0 &&
simple_lock(&mountlist_slock);
for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
nmp = mp->mnt_list.cqe_next;
continue;
}
if (mp->mnt_stat.f_type == lfs_mount_type &&
(mp->mnt_flag & MNT_RDONLY) == 0 &&
!((((struct ufsmount *)mp->mnt_data))->ufsmount_u.lfs)->lfs_dirops ) {
/*
* We set the queue to 0 here because we are about to
@ -156,14 +163,18 @@ lfs_flush()
#endif
lfs_segwrite(mp, 0);
}
simple_lock(&mountlist_slock);
nmp = mp->mnt_list.cqe_next;
vfs_unbusy(mp, p);
}
simple_unlock(&mountlist_slock);
lfs_writing = 0;
}
int
lfs_check(vp, blkno)
struct vnode *vp;
daddr_t blkno;
ufs_daddr_t blkno;
{
extern int lfs_allclean_wakeup;
int error;

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_cksum.c 8.1 (Berkeley) 6/11/93
* @(#)lfs_cksum.c 8.2 (Berkeley) 10/9/94
*/
#include <sys/types.h>
@ -52,7 +52,7 @@ cksum(str, len)
len &= ~(sizeof(u_short) - 1);
for (sum = 0; len; len -= sizeof(u_short)) {
sum ^= *(u_short *)str;
++(u_short *)str;
str = (void *)((u_short *)str + 1);
}
return (sum);
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_extern.h 8.2 (Berkeley) 4/16/94
* @(#)lfs_extern.h 8.6 (Berkeley) 5/8/95
*/
struct fid;
@ -45,10 +45,10 @@ struct mbuf;
__BEGIN_DECLS
u_long cksum __P((void *, size_t)); /* XXX */
int lfs_balloc __P((struct vnode *, u_long, daddr_t, struct buf **));
int lfs_balloc __P((struct vnode *, int, u_long, ufs_daddr_t, struct buf **));
int lfs_blkatoff __P((struct vop_blkatoff_args *));
int lfs_bwrite __P((struct vop_bwrite_args *));
int lfs_check __P((struct vnode *, daddr_t));
int lfs_check __P((struct vnode *, ufs_daddr_t));
int lfs_close __P((struct vop_close_args *));
int lfs_create __P((struct vop_create_args *));
int lfs_fhtovp __P((struct mount *, struct fid *, struct mbuf *,
@ -58,7 +58,7 @@ int lfs_getattr __P((struct vop_getattr_args *));
struct dinode *
lfs_ifind __P((struct lfs *, ino_t, struct dinode *));
int lfs_inactive __P((struct vop_inactive_args *));
int lfs_init __P((void));
int lfs_init __P((struct vfsconf *));
int lfs_initseg __P((struct lfs *));
int lfs_link __P((struct vop_link_args *));
int lfs_makeinode __P((int, struct nameidata *, struct inode **));
@ -68,8 +68,9 @@ int lfs_mount __P((struct mount *,
char *, caddr_t, struct nameidata *, struct proc *));
int lfs_mountroot __P((void));
struct buf *
lfs_newbuf __P((struct vnode *, daddr_t, size_t));
lfs_newbuf __P((struct vnode *, ufs_daddr_t, size_t));
int lfs_read __P((struct vop_read_args *));
int lfs_reclaim __P((struct vop_reclaim_args *));
int lfs_remove __P((struct vop_remove_args *));
int lfs_rmdir __P((struct vop_rmdir_args *));
int lfs_rename __P((struct vop_rename_args *));
@ -79,6 +80,8 @@ int lfs_segwrite __P((struct mount *, int));
int lfs_statfs __P((struct mount *, struct statfs *, struct proc *));
int lfs_symlink __P((struct vop_symlink_args *));
int lfs_sync __P((struct mount *, int, struct ucred *, struct proc *));
#define lfs_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \
size_t, struct proc *)))eopnotsupp)
int lfs_truncate __P((struct vop_truncate_args *));
int lfs_unmount __P((struct mount *, int, struct proc *));
int lfs_update __P((struct vop_update_args *));
@ -96,6 +99,7 @@ void lfs_dump_dinode __P((struct dinode *));
void lfs_dump_super __P((struct lfs *));
#endif
__END_DECLS
extern int lfs_mount_type;
extern int (**lfs_vnodeop_p)();
extern int (**lfs_specop_p)();
#ifdef FIFO

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_inode.c 8.5 (Berkeley) 12/30/93
* @(#)lfs_inode.c 8.9 (Berkeley) 5/8/95
*/
#include <sys/param.h>
@ -53,12 +53,6 @@
#include <ufs/lfs/lfs.h>
#include <ufs/lfs/lfs_extern.h>
int
lfs_init()
{
return (ufs_init());
}
/* Search a block for a specific dinode. */
struct dinode *
lfs_ifind(fs, ino, dip)
@ -96,13 +90,13 @@ lfs_update(ap)
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0)
return (0);
if (ip->i_flag & IN_ACCESS)
ip->i_atime.ts_sec = ap->a_access->tv_sec;
ip->i_atime = ap->a_access->tv_sec;
if (ip->i_flag & IN_UPDATE) {
ip->i_mtime.ts_sec = ap->a_modify->tv_sec;
ip->i_mtime = ap->a_modify->tv_sec;
(ip)->i_modrev++;
}
if (ip->i_flag & IN_CHANGE)
ip->i_ctime.ts_sec = time.tv_sec;
ip->i_ctime = time.tv_sec;
ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE);
if (!(ip->i_flag & IN_MODIFIED))
@ -117,22 +111,22 @@ lfs_update(ap)
#define UPDATE_SEGUSE \
if (lastseg != -1) { \
LFS_SEGENTRY(sup, fs, lastseg, sup_bp); \
if ((num << fs->lfs_bshift) > sup->su_nbytes) \
if (num > sup->su_nbytes) \
panic("lfs_truncate: negative bytes in segment %d\n", \
lastseg); \
sup->su_nbytes -= num << fs->lfs_bshift; \
sup->su_nbytes -= num; \
e1 = VOP_BWRITE(sup_bp); \
blocksreleased += num; \
fragsreleased += numfrags(fs, num); \
}
#define SEGDEC { \
#define SEGDEC(S) { \
if (daddr != 0) { \
if (lastseg != (seg = datosn(fs, daddr))) { \
UPDATE_SEGUSE; \
num = 1; \
num = (S); \
lastseg = seg; \
} else \
++num; \
num += (S); \
} \
}
@ -153,7 +147,7 @@ lfs_truncate(ap)
{
register struct indir *inp;
register int i;
register daddr_t *daddrp;
register ufs_daddr_t *daddrp;
register struct vnode *vp = ap->a_vp;
off_t length = ap->a_length;
struct buf *bp, *sup_bp;
@ -163,9 +157,10 @@ lfs_truncate(ap)
struct lfs *fs;
struct indir a[NIADDR + 2], a_end[NIADDR + 2];
SEGUSE *sup;
daddr_t daddr, lastblock, lbn, olastblock;
long off, a_released, blocksreleased, i_released;
int e1, e2, depth, lastseg, num, offset, seg, size;
ufs_daddr_t daddr, lastblock, lbn, olastblock;
ufs_daddr_t oldsize_lastblock, oldsize_newlast, newsize;
long off, a_released, fragsreleased, i_released;
int e1, e2, depth, lastseg, num, offset, seg, freesize;
ip = VTOI(vp);
tv = time;
@ -201,24 +196,29 @@ lfs_truncate(ap)
* Update the size of the file. If the file is not being truncated to
* a block boundry, the contents of the partial block following the end
* of the file must be zero'ed in case it ever become accessable again
* because of subsequent file growth.
* because of subsequent file growth. For this part of the code,
* oldsize_newlast refers to the old size of the new last block in the file.
*/
offset = blkoff(fs, length);
lbn = lblkno(fs, length);
oldsize_newlast = blksize(fs, ip, lbn);
/* Now set oldsize to the current size of the current last block */
oldsize_lastblock = blksize(fs, ip, olastblock);
if (offset == 0)
ip->i_size = length;
else {
lbn = lblkno(fs, length);
#ifdef QUOTA
if (e1 = getinoquota(ip))
return (e1);
#endif
if (e1 = bread(vp, lbn, fs->lfs_bsize, NOCRED, &bp))
if (e1 = bread(vp, lbn, oldsize_newlast, NOCRED, &bp))
return (e1);
ip->i_size = length;
size = blksize(fs);
(void)vnode_pager_uncache(vp);
bzero((char *)bp->b_data + offset, (u_int)(size - offset));
allocbuf(bp, size);
newsize = blksize(fs, ip, lbn);
bzero((char *)bp->b_data + offset, (u_int)(newsize - offset));
allocbuf(bp, newsize);
if (e1 = VOP_BWRITE(bp))
return (e1);
}
@ -226,20 +226,24 @@ lfs_truncate(ap)
* Modify sup->su_nbyte counters for each deleted block; keep track
* of number of blocks removed for ip->i_blocks.
*/
blocksreleased = 0;
fragsreleased = 0;
num = 0;
lastseg = -1;
for (lbn = olastblock; lbn >= lastblock;) {
/* XXX use run length from bmap array to make this faster */
ufs_bmaparray(vp, lbn, &daddr, a, &depth, NULL);
if (lbn == olastblock)
if (lbn == olastblock) {
for (i = NIADDR + 2; i--;)
a_end[i] = a[i];
freesize = oldsize_lastblock;
} else
freesize = fs->lfs_bsize;
switch (depth) {
case 0: /* Direct block. */
daddr = ip->i_db[lbn];
SEGDEC;
SEGDEC(freesize);
ip->i_db[lbn] = 0;
--lbn;
break;
@ -261,19 +265,20 @@ lfs_truncate(ap)
inp->in_lbn, fs->lfs_bsize, NOCRED, &bp))
panic("lfs_truncate: bread bno %d",
inp->in_lbn);
daddrp = (daddr_t *)bp->b_data + inp->in_off;
daddrp = (ufs_daddr_t *)bp->b_data +
inp->in_off;
for (i = inp->in_off;
i++ <= a_end[depth].in_off;) {
daddr = *daddrp++;
SEGDEC;
SEGDEC(freesize);
}
a_end[depth].in_off = NINDIR(fs) - 1;
if (inp->in_off == 0)
brelse (bp);
else {
bzero((daddr_t *)bp->b_data +
bzero((ufs_daddr_t *)bp->b_data +
inp->in_off, fs->lfs_bsize -
inp->in_off * sizeof(daddr_t));
inp->in_off * sizeof(ufs_daddr_t));
if (e1 = VOP_BWRITE(bp))
return (e1);
}
@ -281,7 +286,7 @@ lfs_truncate(ap)
if (depth == 0 && a[1].in_off == 0) {
off = a[0].in_off;
daddr = ip->i_ib[off];
SEGDEC;
SEGDEC(freesize);
ip->i_ib[off] = 0;
}
if (lbn == lastblock || lbn <= NDADDR)
@ -303,13 +308,14 @@ lfs_truncate(ap)
}
#ifdef DIAGNOSTIC
if (ip->i_blocks < fsbtodb(fs, blocksreleased)) {
printf("lfs_truncate: block count < 0\n");
blocksreleased = ip->i_blocks;
if (ip->i_blocks < fragstodb(fs, fragsreleased)) {
printf("lfs_truncate: frag count < 0\n");
fragsreleased = dbtofrags(fs, ip->i_blocks);
panic("lfs_truncate: frag count < 0\n");
}
#endif
ip->i_blocks -= fsbtodb(fs, blocksreleased);
fs->lfs_bfree += fsbtodb(fs, blocksreleased);
ip->i_blocks -= fragstodb(fs, fragsreleased);
fs->lfs_bfree += fragstodb(fs, fragsreleased);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
/*
* Traverse dirty block list counting number of dirty buffers
@ -320,7 +326,7 @@ lfs_truncate(ap)
i_released = 0;
for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next)
if (bp->b_flags & B_LOCKED) {
++a_released;
a_released += numfrags(fs, bp->b_bcount);
/*
* XXX
* When buffers are created in the cache, their block
@ -333,25 +339,28 @@ lfs_truncate(ap)
* here.
*/
if (bp->b_blkno == bp->b_lblkno)
++i_released;
i_released += numfrags(fs, bp->b_bcount);
}
blocksreleased = fsbtodb(fs, i_released);
fragsreleased = i_released;
#ifdef DIAGNOSTIC
if (blocksreleased > ip->i_blocks) {
if (fragsreleased > dbtofrags(fs, ip->i_blocks)) {
printf("lfs_inode: Warning! %s\n",
"more blocks released from inode than are in inode");
blocksreleased = ip->i_blocks;
"more frags released from inode than are in inode");
fragsreleased = dbtofrags(fs, ip->i_blocks);
panic("lfs_inode: Warning. More frags released\n");
}
#endif
fs->lfs_bfree += blocksreleased;
ip->i_blocks -= blocksreleased;
fs->lfs_bfree += fragstodb(fs, fragsreleased);
ip->i_blocks -= fragstodb(fs, fragsreleased);
#ifdef DIAGNOSTIC
if (length == 0 && ip->i_blocks != 0)
if (length == 0 && ip->i_blocks != 0) {
printf("lfs_inode: Warning! %s%d%s\n",
"Truncation to zero, but ", ip->i_blocks,
" blocks left on inode");
panic("lfs_inode");
}
#endif
fs->lfs_avail += fsbtodb(fs, a_released);
fs->lfs_avail += fragstodb(fs, a_released);
e1 = vinvalbuf(vp, (length > 0) ? V_SAVE : 0, ap->a_cred, ap->a_p,
0, 0);
e2 = VOP_UPDATE(vp, &tv, &tv, 0);

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_segment.c 8.5 (Berkeley) 1/4/94
* @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
*/
#include <sys/param.h>
@ -74,13 +74,13 @@ void lfs_callback __P((struct buf *));
void lfs_gather __P((struct lfs *, struct segment *,
struct vnode *, int (*) __P((struct lfs *, struct buf *))));
int lfs_gatherblock __P((struct segment *, struct buf *, int *));
void lfs_iset __P((struct inode *, daddr_t, time_t));
void lfs_iset __P((struct inode *, ufs_daddr_t, time_t));
int lfs_match_data __P((struct lfs *, struct buf *));
int lfs_match_dindir __P((struct lfs *, struct buf *));
int lfs_match_indir __P((struct lfs *, struct buf *));
int lfs_match_tindir __P((struct lfs *, struct buf *));
void lfs_newseg __P((struct lfs *));
void lfs_shellsort __P((struct buf **, daddr_t *, register int));
void lfs_shellsort __P((struct buf **, ufs_daddr_t *, register int));
void lfs_supercallback __P((struct buf *));
void lfs_updatemeta __P((struct segment *));
int lfs_vref __P((struct vnode *));
@ -158,10 +158,23 @@ lfs_writevnodes(fs, mp, sp, op)
struct inode *ip;
struct vnode *vp;
/* BEGIN HACK */
#define VN_OFFSET (((void *)&vp->v_mntvnodes.le_next) - (void *)vp)
#define BACK_VP(VP) ((struct vnode *)(((void *)VP->v_mntvnodes.le_prev) - VN_OFFSET))
#define BEG_OF_VLIST ((struct vnode *)(((void *)&mp->mnt_vnodelist.lh_first) - VN_OFFSET))
/* Find last vnode. */
loop: for (vp = mp->mnt_vnodelist.lh_first;
vp && vp->v_mntvnodes.le_next != NULL;
vp = vp->v_mntvnodes.le_next);
for (; vp && vp != BEG_OF_VLIST; vp = BACK_VP(vp)) {
/* END HACK */
/*
loop:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
*/
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
@ -207,13 +220,14 @@ lfs_segwrite(mp, flags)
struct mount *mp;
int flags; /* Do a checkpoint. */
{
struct proc *p = curproc; /* XXX */
struct buf *bp;
struct inode *ip;
struct lfs *fs;
struct segment *sp;
struct vnode *vp;
SEGUSE *segusep;
daddr_t ibno;
ufs_daddr_t ibno;
CLEANERINFO *cip;
int clean, do_ckp, error, i;
@ -227,14 +241,15 @@ lfs_segwrite(mp, flags)
LFS_CLEANERINFO(cip, fs, bp);
clean = cip->clean;
brelse(bp);
if (clean <= 2) {
printf ("segs clean: %d\n", clean);
if (clean <= 2 || fs->lfs_avail <= 0) {
/* printf ("segs clean: %d\n", clean); */
wakeup(&lfs_allclean_wakeup);
wakeup(&fs->lfs_nextseg);
if (error = tsleep(&fs->lfs_avail, PRIBIO + 1,
"lfs writer", 0))
return (error);
}
} while (clean <= 2 );
} while (clean <= 2 || fs->lfs_avail <= 0);
/*
* Allocate a segment structure and enough space to hold pointers to
@ -282,7 +297,8 @@ lfs_segwrite(mp, flags)
if (do_ckp || fs->lfs_doifile) {
redo:
vp = fs->lfs_ivnode;
while (vget(vp, 1));
while (vget(vp, LK_EXCLUSIVE, p))
continue;
ip = VTOI(vp);
if (vp->v_dirtyblkhd.lh_first != NULL)
lfs_writefile(fs, sp, vp);
@ -331,7 +347,7 @@ lfs_writefile(fs, sp, vp)
sp->sum_bytes_left < sizeof(struct finfo))
(void) lfs_writeseg(fs, sp);
sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(daddr_t);
sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(ufs_daddr_t);
++((SEGSUM *)(sp->segsum))->ss_nfinfo;
fip = sp->fip;
@ -357,10 +373,10 @@ lfs_writefile(fs, sp, vp)
if (fip->fi_nblocks != 0) {
sp->fip =
(struct finfo *)((caddr_t)fip + sizeof(struct finfo) +
sizeof(daddr_t) * (fip->fi_nblocks - 1));
sizeof(ufs_daddr_t) * (fip->fi_nblocks - 1));
sp->start_lbp = &sp->fip->fi_blocks[0];
} else {
sp->sum_bytes_left += sizeof(struct finfo) - sizeof(daddr_t);
sp->sum_bytes_left += sizeof(struct finfo) - sizeof(ufs_daddr_t);
--((SEGSUM *)(sp->segsum))->ss_nfinfo;
}
}
@ -374,7 +390,7 @@ lfs_writeinode(fs, sp, ip)
struct buf *bp, *ibp;
IFILE *ifp;
SEGUSE *sup;
daddr_t daddr;
ufs_daddr_t daddr;
ino_t ino;
int error, i, ndx;
int redo_ifile = 0;
@ -386,7 +402,7 @@ lfs_writeinode(fs, sp, ip)
if (sp->ibp == NULL) {
/* Allocate a new segment if necessary. */
if (sp->seg_bytes_left < fs->lfs_bsize ||
sp->sum_bytes_left < sizeof(daddr_t))
sp->sum_bytes_left < sizeof(ufs_daddr_t))
(void) lfs_writeseg(fs, sp);
/* Get next inode block. */
@ -402,10 +418,10 @@ lfs_writeinode(fs, sp, ip)
fs->lfs_avail -= fsbtodb(fs, 1);
/* Set remaining space counters. */
sp->seg_bytes_left -= fs->lfs_bsize;
sp->sum_bytes_left -= sizeof(daddr_t);
ndx = LFS_SUMMARY_SIZE / sizeof(daddr_t) -
sp->sum_bytes_left -= sizeof(ufs_daddr_t);
ndx = LFS_SUMMARY_SIZE / sizeof(ufs_daddr_t) -
sp->ninodes / INOPB(fs) - 1;
((daddr_t *)(sp->segsum))[ndx] = daddr;
((ufs_daddr_t *)(sp->segsum))[ndx] = daddr;
}
/* Update the inode times and copy the inode onto the inode page. */
@ -478,8 +494,8 @@ lfs_gatherblock(sp, bp, sptr)
panic ("lfs_gatherblock: Null vp in segment");
#endif
fs = sp->fs;
if (sp->sum_bytes_left < sizeof(daddr_t) ||
sp->seg_bytes_left < fs->lfs_bsize) {
if (sp->sum_bytes_left < sizeof(ufs_daddr_t) ||
sp->seg_bytes_left < bp->b_bcount) {
if (sptr)
splx(*sptr);
lfs_updatemeta(sp);
@ -492,7 +508,7 @@ lfs_gatherblock(sp, bp, sptr)
/* Add the current file to the segment summary. */
++((SEGSUM *)(sp->segsum))->ss_nfinfo;
sp->sum_bytes_left -=
sizeof(struct finfo) - sizeof(daddr_t);
sizeof(struct finfo) - sizeof(ufs_daddr_t);
if (sptr)
*sptr = splbio();
@ -504,8 +520,8 @@ lfs_gatherblock(sp, bp, sptr)
*sp->cbpp++ = bp;
sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno;
sp->sum_bytes_left -= sizeof(daddr_t);
sp->seg_bytes_left -= fs->lfs_bsize;
sp->sum_bytes_left -= sizeof(ufs_daddr_t);
sp->seg_bytes_left -= bp->b_bcount;
return(0);
}
@ -521,7 +537,19 @@ lfs_gather(fs, sp, vp, match)
sp->vp = vp;
s = splbio();
loop: for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next) {
/* This is a hack to see if ordering the blocks in LFS makes a difference. */
/* BEGIN HACK */
#define BUF_OFFSET (((void *)&bp->b_vnbufs.le_next) - (void *)bp)
#define BACK_BUF(BP) ((struct buf *)(((void *)BP->b_vnbufs.le_prev) - BUF_OFFSET))
#define BEG_OF_LIST ((struct buf *)(((void *)&vp->v_dirtyblkhd.lh_first) - BUF_OFFSET))
/*loop: for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next) {*/
/* Find last buffer. */
loop: for (bp = vp->v_dirtyblkhd.lh_first; bp && bp->b_vnbufs.le_next != NULL;
bp = bp->b_vnbufs.le_next);
for (; bp && bp != BEG_OF_LIST; bp = BACK_BUF(bp)) {
/* END HACK */
if (bp->b_flags & B_BUSY || !match(fs, bp) ||
bp->b_flags & B_GATHERED)
continue;
@ -554,11 +582,13 @@ lfs_updatemeta(sp)
struct vnode *vp;
struct indir a[NIADDR + 2], *ap;
struct inode *ip;
daddr_t daddr, lbn, off;
int db_per_fsb, error, i, nblocks, num;
ufs_daddr_t daddr, lbn, off;
int error, i, nblocks, num;
vp = sp->vp;
nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
if (nblocks < 0)
panic("This is a bad thing\n");
if (vp == NULL || nblocks == 0)
return;
@ -566,16 +596,24 @@ lfs_updatemeta(sp)
if (!(sp->seg_flags & SEGM_CLEAN))
lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks);
/*
* Record the length of the last block in case it's a fragment.
* If there are indirect blocks present, they sort last. An
* indirect block will be lfs_bsize and its presence indicates
* that you cannot have fragments.
*/
sp->fip->fi_lastlength = sp->start_bpp[nblocks - 1]->b_bcount;
/*
* Assign disk addresses, and update references to the logical
* block and the segment usage information.
*/
fs = sp->fs;
db_per_fsb = fsbtodb(fs, 1);
for (i = nblocks; i--; ++sp->start_bpp) {
lbn = *sp->start_lbp++;
(*sp->start_bpp)->b_blkno = off = fs->lfs_offset;
fs->lfs_offset += db_per_fsb;
fs->lfs_offset +=
fragstodb(fs, numfrags(fs, (*sp->start_bpp)->b_bcount));
if (error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL))
panic("lfs_updatemeta: ufs_bmaparray %d", error);
@ -597,11 +635,10 @@ lfs_updatemeta(sp)
* to get counted for the inode.
*/
if (bp->b_blkno == -1 && !(bp->b_flags & B_CACHE)) {
printf ("Updatemeta allocating indirect block: shouldn't happen\n");
ip->i_blocks += btodb(fs->lfs_bsize);
fs->lfs_bfree -= btodb(fs->lfs_bsize);
ip->i_blocks += fsbtodb(fs, 1);
fs->lfs_bfree -= fragstodb(fs, fs->lfs_frag);
}
((daddr_t *)bp->b_data)[ap->in_off] = off;
((ufs_daddr_t *)bp->b_data)[ap->in_off] = off;
VOP_BWRITE(bp);
}
@ -610,14 +647,16 @@ printf ("Updatemeta allocating indirect block: shouldn't happen\n");
!(daddr >= fs->lfs_lastpseg && daddr <= off)) {
LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
#ifdef DIAGNOSTIC
if (sup->su_nbytes < fs->lfs_bsize) {
if (sup->su_nbytes < (*sp->start_bpp)->b_bcount) {
/* XXX -- Change to a panic. */
printf("lfs: negative bytes (segment %d)\n",
datosn(fs, daddr));
printf("lfs: bp = 0x%x, addr = 0x%x\n",
bp, bp->b_un.b_addr);
panic ("Negative Bytes");
}
#endif
sup->su_nbytes -= fs->lfs_bsize;
sup->su_nbytes -= (*sp->start_bpp)->b_bcount;
error = VOP_BWRITE(bp);
}
}
@ -643,6 +682,7 @@ lfs_initseg(fs)
if (!LFS_PARTIAL_FITS(fs)) {
/* Wake up any cleaning procs waiting on this file system. */
wakeup(&lfs_allclean_wakeup);
wakeup(&fs->lfs_nextseg);
lfs_newseg(fs);
repeat = 1;
@ -684,11 +724,13 @@ lfs_initseg(fs)
ssp = sp->segsum;
ssp->ss_next = fs->lfs_nextseg;
ssp->ss_nfinfo = ssp->ss_ninos = 0;
ssp->ss_magic = SS_MAGIC;
/* Set pointer to first FINFO, initialize it. */
sp->fip = (struct finfo *)(sp->segsum + sizeof(SEGSUM));
sp->fip = (struct finfo *)((caddr_t)sp->segsum + sizeof(SEGSUM));
sp->fip->fi_nblocks = 0;
sp->start_lbp = &sp->fip->fi_blocks[0];
sp->fip->fi_lastlength = 0;
sp->seg_bytes_left -= LFS_SUMMARY_SIZE;
sp->sum_bytes_left = LFS_SUMMARY_SIZE - sizeof(SEGSUM);
@ -750,9 +792,8 @@ lfs_writeseg(fs, sp)
SEGUSE *sup;
SEGSUM *ssp;
dev_t i_dev;
size_t size;
u_long *datap, *dp;
int ch_per_blk, do_again, i, nblocks, num, s;
int do_again, i, nblocks, s;
int (*strategy)__P((struct vop_strategy_args *));
struct vop_strategy_args vop_strategy_a;
u_short ninos;
@ -766,12 +807,16 @@ lfs_writeseg(fs, sp)
if ((nblocks = sp->cbpp - sp->bpp) == 1)
return (0);
ssp = (SEGSUM *)sp->segsum;
/* Update the segment usage information. */
LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
/* Loop through all blocks, except the segment summary. */
for (bpp = sp->bpp; ++bpp < sp->cbpp; )
sup->su_nbytes += (*bpp)->b_bcount;
ssp = (SEGSUM *)sp->segsum;
ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
sup->su_nbytes += nblocks - 1 - ninos << fs->lfs_bshift;
sup->su_nbytes += ssp->ss_ninos * sizeof(struct dinode);
sup->su_nbytes += LFS_SUMMARY_SIZE;
sup->su_lastmod = time.tv_sec;
@ -824,23 +869,21 @@ lfs_writeseg(fs, sp)
* easily make the buffers contiguous in kernel memory and if that's
* fast enough.
*/
ch_per_blk = MAXPHYS / fs->lfs_bsize;
for (bpp = sp->bpp, i = nblocks; i;) {
num = ch_per_blk;
if (num > i)
num = i;
i -= num;
size = num * fs->lfs_bsize;
cbp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp,
(*bpp)->b_blkno, size);
(*bpp)->b_blkno, MAXPHYS);
cbp->b_dev = i_dev;
cbp->b_flags |= B_ASYNC | B_BUSY;
cbp->b_bcount = 0;
s = splbio();
++fs->lfs_iocount;
for (p = cbp->b_data; num--;) {
bp = *bpp++;
for (p = cbp->b_data; i && cbp->b_bcount < MAXPHYS; i--) {
bp = *bpp;
if (bp->b_bcount > (MAXPHYS - cbp->b_bcount))
break;
bpp++;
/*
* Fake buffers from the cleaner are marked as B_INVAL.
* We need to copy the data from user space rather than
@ -853,6 +896,7 @@ lfs_writeseg(fs, sp)
} else
bcopy(bp->b_data, p, bp->b_bcount);
p += bp->b_bcount;
cbp->b_bcount += bp->b_bcount;
if (bp->b_flags & B_LOCKED)
--locked_queue_count;
bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
@ -872,7 +916,6 @@ lfs_writeseg(fs, sp)
}
++cbp->b_vp->v_numoutput;
splx(s);
cbp->b_bcount = p - (char *)cbp->b_data;
/*
* XXXX This is a gross and disgusting hack. Since these
* buffers are physically addressed, they hang off the
@ -992,7 +1035,7 @@ lfs_match_tindir(fs, bp)
struct buf *
lfs_newbuf(vp, daddr, size)
struct vnode *vp;
daddr_t daddr;
ufs_daddr_t daddr;
size_t size;
{
struct buf *bp;
@ -1059,7 +1102,7 @@ lfs_supercallback(bp)
void
lfs_shellsort(bp_array, lb_array, nmemb)
struct buf **bp_array;
daddr_t *lb_array;
ufs_daddr_t *lb_array;
register int nmemb;
{
static int __rsshell_increments[] = { 4, 1, 0 };
@ -1088,24 +1131,36 @@ lfs_shellsort(bp_array, lb_array, nmemb)
lfs_vref(vp)
register struct vnode *vp;
{
struct proc *p = curproc; /* XXX */
if (vp->v_flag & VXLOCK)
if (vp->v_flag & VXLOCK) /* XXX */
return(1);
return (vget(vp, 0));
return (vget(vp, 0, p));
}
/*
* This is vrele except that we do not want to VOP_INACTIVE this vnode. We
* inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
*/
void
lfs_vunref(vp)
register struct vnode *vp;
{
extern int lfs_no_inactive;
struct proc *p = curproc; /* XXX */
extern struct simplelock vnode_free_list_slock; /* XXX */
extern TAILQ_HEAD(freelst, vnode) vnode_free_list; /* XXX */
simple_lock(&vp->v_interlock);
vp->v_usecount--;
if (vp->v_usecount > 0) {
simple_unlock(&vp->v_interlock);
return;
}
/*
* This is vrele except that we do not want to VOP_INACTIVE
* this vnode. Rather than inline vrele here, we use a global
* flag to tell lfs_inactive not to run. Yes, its gross.
* insert at tail of LRU list
*/
lfs_no_inactive = 1;
vrele(vp);
lfs_no_inactive = 0;
simple_lock(&vnode_free_list_slock);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
simple_unlock(&vp->v_interlock);
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_subr.c 8.2 (Berkeley) 9/21/93
* @(#)lfs_subr.c 8.4 (Berkeley) 5/8/95
*/
#include <sys/param.h>
@ -63,13 +63,13 @@ lfs_blkatoff(ap)
register struct lfs *fs;
struct inode *ip;
struct buf *bp;
daddr_t lbn;
ufs_daddr_t lbn;
int bsize, error;
ip = VTOI(ap->a_vp);
fs = ip->i_lfs;
lbn = lblkno(fs, ap->a_offset);
bsize = blksize(fs);
bsize = blksize(fs, ip, lbn);
*ap->a_bpp = NULL;
if (error = bread(ap->a_vp, lbn, bsize, NOCRED, &bp)) {
@ -109,7 +109,8 @@ lfs_seglock(fs, flags)
sp = fs->lfs_sp = malloc(sizeof(struct segment), M_SEGMENT, M_WAITOK);
sp->bpp = malloc(((LFS_SUMMARY_SIZE - sizeof(SEGSUM)) /
sizeof(daddr_t) + 1) * sizeof(struct buf *), M_SEGMENT, M_WAITOK);
sizeof(ufs_daddr_t) + 1) * sizeof(struct buf *),
M_SEGMENT, M_WAITOK);
sp->seg_flags = flags;
sp->vp = NULL;
(void) lfs_initseg(fs);

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_syscalls.c 8.5 (Berkeley) 4/20/94
* @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95
*/
#include <sys/param.h>
@ -64,6 +64,10 @@ if (sp->sum_bytes_left < (s)) { \
}
struct buf *lfs_fakebuf __P((struct vnode *, int, size_t, caddr_t));
int debug_cleaner = 0;
int clean_vnlocked = 0;
int clean_inlocked = 0;
/*
* lfs_markv:
*
@ -99,7 +103,7 @@ lfs_markv(p, uap, retval)
fsid_t fsid;
void *start;
ino_t lastino;
daddr_t b_daddr, v_daddr;
ufs_daddr_t b_daddr, v_daddr;
u_long bsize;
int cnt, error;
@ -108,7 +112,7 @@ lfs_markv(p, uap, retval)
if (error = copyin(uap->fsidp, &fsid, sizeof(fsid_t)))
return (error);
if ((mntp = getvfs(&fsid)) == NULL)
if ((mntp = vfs_getvfs(&fsid)) == NULL)
return (EINVAL);
cnt = uap->blkcnt;
@ -135,7 +139,7 @@ lfs_markv(p, uap, retval)
if (sp->fip->fi_nblocks == 0) {
DEC_FINFO(sp);
sp->sum_bytes_left +=
sizeof(FINFO) - sizeof(daddr_t);
sizeof(FINFO) - sizeof(ufs_daddr_t);
} else {
lfs_updatemeta(sp);
BUMP_FIP(sp);
@ -147,7 +151,7 @@ lfs_markv(p, uap, retval)
/* Start a new file */
CHECK_SEG(sizeof(FINFO));
sp->sum_bytes_left -= sizeof(FINFO) - sizeof(daddr_t);
sp->sum_bytes_left -= sizeof(FINFO) - sizeof(ufs_daddr_t);
INC_FINFO(sp);
sp->start_lbp = &sp->fip->fi_blocks[0];
sp->vp = NULL;
@ -172,6 +176,7 @@ lfs_markv(p, uap, retval)
#ifdef DIAGNOSTIC
printf("lfs_markv: VFS_VGET failed (%d)\n",
blkp->bi_inode);
panic("lfs_markv VFS_VGET FAILED");
#endif
lastino = LFS_UNUSED_INUM;
v_daddr = LFS_UNUSED_DADDR;
@ -202,7 +207,7 @@ lfs_markv(p, uap, retval)
bp = getblk(vp, blkp->bi_lbn, bsize, 0, 0);
if (!(bp->b_flags & (B_DELWRI | B_DONE | B_CACHE)) &&
(error = copyin(blkp->bi_bp, bp->b_data,
bsize)))
blkp->bi_size)))
goto err2;
if (error = VOP_BWRITE(bp))
goto err2;
@ -213,7 +218,7 @@ lfs_markv(p, uap, retval)
if (sp->fip->fi_nblocks == 0) {
DEC_FINFO(sp);
sp->sum_bytes_left +=
sizeof(FINFO) - sizeof(daddr_t);
sizeof(FINFO) - sizeof(ufs_daddr_t);
} else
lfs_updatemeta(sp);
@ -267,10 +272,11 @@ lfs_bmapv(p, uap, retval)
{
BLOCK_INFO *blkp;
struct mount *mntp;
struct ufsmount *ump;
struct vnode *vp;
fsid_t fsid;
void *start;
daddr_t daddr;
ufs_daddr_t daddr;
int cnt, error, step;
if (error = suser(p->p_ucred, &p->p_acflag))
@ -278,7 +284,7 @@ lfs_bmapv(p, uap, retval)
if (error = copyin(uap->fsidp, &fsid, sizeof(fsid_t)))
return (error);
if ((mntp = getvfs(&fsid)) == NULL)
if ((mntp = vfs_getvfs(&fsid)) == NULL)
return (EINVAL);
cnt = uap->blkcnt;
@ -291,8 +297,16 @@ lfs_bmapv(p, uap, retval)
for (step = cnt; step--; ++blkp) {
if (blkp->bi_lbn == LFS_UNUSED_LBN)
continue;
/* Could be a deadlock ? */
if (VFS_VGET(mntp, blkp->bi_inode, &vp))
/*
* A regular call to VFS_VGET could deadlock
* here. Instead, we try an unlocked access.
*/
ump = VFSTOUFS(mntp);
if ((vp =
ufs_ihashlookup(ump->um_dev, blkp->bi_inode)) != NULL) {
if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &daddr, NULL))
daddr = LFS_UNUSED_DADDR;
} else if (VFS_VGET(mntp, blkp->bi_inode, &vp))
daddr = LFS_UNUSED_DADDR;
else {
if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &daddr, NULL))
@ -337,7 +351,7 @@ lfs_segclean(p, uap, retval)
if (error = copyin(uap->fsidp, &fsid, sizeof(fsid_t)))
return (error);
if ((mntp = getvfs(&fsid)) == NULL)
if ((mntp = vfs_getvfs(&fsid)) == NULL)
return (EINVAL);
fs = VFSTOUFS(mntp)->um_lfs;
@ -402,14 +416,14 @@ lfs_segwait(p, uap, retval)
if (fsid == (fsid_t)-1)
addr = &lfs_allclean_wakeup;
else {
if ((mntp = getvfs(&fsid)) == NULL)
if ((mntp = vfs_getvfs(&fsid)) == NULL)
return (EINVAL);
addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
}
#else
if (error = copyin(uap->fsidp, &fsid, sizeof(fsid_t)))
return (error);
if ((mntp = getvfs(&fsid)) == NULL)
if ((mntp = vfs_getvfs(&fsid)) == NULL)
addr = &lfs_allclean_wakeup;
else
addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
@ -441,7 +455,7 @@ int
lfs_fastvget(mp, ino, daddr, vpp, dinp)
struct mount *mp;
ino_t ino;
daddr_t daddr;
ufs_daddr_t daddr;
struct vnode **vpp;
struct dinode *dinp;
{
@ -462,15 +476,13 @@ lfs_fastvget(mp, ino, daddr, vpp, dinp)
if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
lfs_vref(*vpp);
if ((*vpp)->v_flag & VXLOCK)
printf ("Cleaned vnode VXLOCKED\n");
clean_vnlocked++;
ip = VTOI(*vpp);
if (ip->i_flags & IN_LOCKED)
printf("cleaned vnode locked\n");
if (!(ip->i_flag & IN_MODIFIED)) {
if (lockstatus(&ip->i_lock))
clean_inlocked++;
if (!(ip->i_flag & IN_MODIFIED))
++ump->um_lfs->lfs_uinodes;
ip->i_flag |= IN_MODIFIED;
}
ip->i_flag |= IN_MODIFIED;
return (0);
}
@ -522,9 +534,6 @@ lfs_fastvget(mp, ino, daddr, vpp, dinp)
brelse(bp);
}
/* Inode was just read from user space or disk, make sure it's locked */
ip->i_flag |= IN_LOCKED;
/*
* Initialize the vnode from the inode, check for aliases. In all
* cases re-init ip, the underlying vnode/inode may have changed.

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_vfsops.c 8.7 (Berkeley) 4/16/94
* @(#)lfs_vfsops.c 8.20 (Berkeley) 6/10/95
*/
#include <sys/param.h>
@ -73,12 +73,42 @@ struct vfsops lfs_vfsops = {
lfs_fhtovp,
lfs_vptofh,
lfs_init,
lfs_sysctl,
};
int
/*
* Called by main() when ufs is going to be mounted as root.
*/
lfs_mountroot()
{
panic("lfs_mountroot"); /* XXX -- implement */
extern struct vnode *rootvp;
struct fs *fs;
struct mount *mp;
struct proc *p = curproc; /* XXX */
int error;
/*
* Get vnodes for swapdev and rootdev.
*/
if ((error = bdevvp(swapdev, &swapdev_vp)) ||
(error = bdevvp(rootdev, &rootvp))) {
printf("lfs_mountroot: can't setup bdevvp's");
return (error);
}
if (error = vfs_rootmountalloc("lfs", "root_device", &mp))
return (error);
if (error = lfs_mountfs(rootvp, mp, p)) {
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free(mp, M_MOUNT);
return (error);
}
simple_lock(&mountlist_slock);
CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
simple_unlock(&mountlist_slock);
(void)lfs_statfs(mp, &mp->mnt_stat, p);
vfs_unbusy(mp, p);
return (0);
}
/*
@ -99,6 +129,7 @@ lfs_mount(mp, path, data, ndp, p)
register struct lfs *fs; /* LFS */
u_int size;
int error;
mode_t accessmode;
if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)))
return (error);
@ -113,15 +144,23 @@ lfs_mount(mp, path, data, ndp, p)
*/
if (mp->mnt_flag & MNT_UPDATE) {
ump = VFSTOUFS(mp);
#ifdef NOTLFS /* LFS */
fs = ump->um_fs;
if (fs->fs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
fs->fs_ronly = 0;
#else
fs = ump->um_lfs;
if (fs->lfs_ronly && (mp->mnt_flag & MNT_RDONLY) == 0)
if (fs->lfs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
/*
* If upgrade to read-write by non-root, then verify
* that user has necessary permissions on the device.
*/
if (p->p_ucred->cr_uid != 0) {
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY,
p);
if (error = VOP_ACCESS(ump->um_devvp,
VREAD | VWRITE, p->p_ucred, p)) {
VOP_UNLOCK(ump->um_devvp, 0, p);
return (error);
}
VOP_UNLOCK(ump->um_devvp, 0, p);
}
fs->lfs_ronly = 0;
#endif
}
if (args.fspec == 0) {
/*
* Process export requests.
@ -145,6 +184,21 @@ lfs_mount(mp, path, data, ndp, p)
vrele(devvp);
return (ENXIO);
}
/*
* If mount by non-root, then verify that user has necessary
* permissions on the device.
*/
if (p->p_ucred->cr_uid != 0) {
accessmode = VREAD;
if ((mp->mnt_flag & MNT_RDONLY) == 0)
accessmode |= VWRITE;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) {
vput(devvp);
return (error);
}
VOP_UNLOCK(devvp, 0, p);
}
if ((mp->mnt_flag & MNT_UPDATE) == 0)
error = lfs_mountfs(devvp, mp, p); /* LFS */
else {
@ -199,7 +253,9 @@ lfs_mountfs(devvp, mp, p)
struct partinfo dpart;
dev_t dev;
int error, i, ronly, size;
struct ucred *cred;
cred = p ? p->p_ucred : NOCRED;
/*
* Disallow multiple mounts of the same device.
* Disallow mounting of a device that is currently in use
@ -210,14 +266,14 @@ lfs_mountfs(devvp, mp, p)
return (error);
if (vcount(devvp) > 1 && devvp != rootvp)
return (EBUSY);
if (error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0))
if (error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0))
return (error);
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))
return (error);
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
size = DEV_BSIZE;
else {
size = dpart.disklab->d_secsize;
@ -234,7 +290,7 @@ lfs_mountfs(devvp, mp, p)
ump = NULL;
/* Read in the superblock. */
if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, NOCRED, &bp))
if (error = bread(devvp, LFS_LABELPAD / size, LFS_SBPAD, cred, &bp))
goto out;
fs = (struct lfs *)bp->b_data;
@ -272,7 +328,8 @@ lfs_mountfs(devvp, mp, p)
dev = devvp->v_rdev;
mp->mnt_data = (qaddr_t)ump;
mp->mnt_stat.f_fsid.val[0] = (long)dev;
mp->mnt_stat.f_fsid.val[1] = MOUNT_LFS;
mp->mnt_stat.f_fsid.val[1] = lfs_mount_type;
mp->mnt_maxsymlinklen = fs->lfs_maxsymlinklen;
mp->mnt_flag |= MNT_LOCAL;
ump->um_mountp = mp;
ump->um_dev = dev;
@ -300,7 +357,7 @@ lfs_mountfs(devvp, mp, p)
out:
if (bp)
brelse(bp);
(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
if (ump) {
free(ump->um_lfs, M_UFSMNT);
free(ump, M_UFSMNT);
@ -323,11 +380,8 @@ lfs_unmount(mp, mntflags, p)
int i, error, flags, ronly;
flags = 0;
if (mntflags & MNT_FORCE) {
if (!doforce || (mp->mnt_flag & MNT_ROOTFS))
return (EINVAL);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
}
ump = VFSTOUFS(mp);
fs = ump->um_lfs;
@ -383,17 +437,23 @@ lfs_statfs(mp, sbp, p)
fs = ump->um_lfs;
if (fs->lfs_magic != LFS_MAGIC)
panic("lfs_statfs: magic");
sbp->f_type = MOUNT_LFS;
sbp->f_bsize = fs->lfs_bsize;
sbp->f_bsize = fs->lfs_fsize;
sbp->f_iosize = fs->lfs_bsize;
sbp->f_blocks = dbtofsb(fs,fs->lfs_dsize);
sbp->f_bfree = dbtofsb(fs, fs->lfs_bfree);
sbp->f_bavail = (fs->lfs_dsize * (100 - fs->lfs_minfree) / 100) -
(fs->lfs_dsize - fs->lfs_bfree);
sbp->f_bavail = dbtofsb(fs, sbp->f_bavail);
sbp->f_blocks = dbtofrags(fs,fs->lfs_dsize);
sbp->f_bfree = dbtofrags(fs, fs->lfs_bfree);
/*
* To compute the available space. Subtract the minimum free
* from the total number of blocks in the file system. Set avail
* to the smaller of this number and fs->lfs_bfree.
*/
sbp->f_bavail = fs->lfs_dsize * (100 - fs->lfs_minfree) / 100;
sbp->f_bavail =
sbp->f_bavail > fs->lfs_bfree ? fs->lfs_bfree : sbp->f_bavail;
sbp->f_bavail = dbtofrags(fs, sbp->f_bavail);
sbp->f_files = fs->lfs_nfiles;
sbp->f_ffree = sbp->f_bfree * INOPB(fs);
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy((caddr_t)mp->mnt_stat.f_mntonname,
(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
@ -442,7 +502,7 @@ lfs_vget(mp, ino, vpp)
struct ifile *ifp;
struct vnode *vp;
struct ufsmount *ump;
daddr_t daddr;
ufs_daddr_t daddr;
dev_t dev;
int error;
@ -571,3 +631,17 @@ lfs_vptofh(vp, fhp)
ufhp->ufid_gen = ip->i_gen;
return (0);
}
/*
* Initialize the filesystem, most work done by ufs_init.
*/
int lfs_mount_type;
int
lfs_init(vfsp)
struct vfsconf *vfsp;
{
lfs_mount_type = vfsp->vfc_typenum;
return (ufs_init(vfsp));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1986, 1989, 1991, 1993
* Copyright (c) 1986, 1989, 1991, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lfs_vnops.c 8.5 (Berkeley) 12/30/93
* @(#)lfs_vnops.c 8.13 (Berkeley) 6/10/95
*/
#include <sys/param.h>
@ -67,6 +67,7 @@ struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
{ &vop_default_desc, vn_default_error },
{ &vop_lookup_desc, ufs_lookup }, /* lookup */
{ &vop_create_desc, ufs_create }, /* create */
{ &vop_whiteout_desc, ufs_whiteout }, /* whiteout */
{ &vop_mknod_desc, ufs_mknod }, /* mknod */
{ &vop_open_desc, ufs_open }, /* open */
{ &vop_close_desc, lfs_close }, /* close */
@ -75,8 +76,10 @@ struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
{ &vop_setattr_desc, ufs_setattr }, /* setattr */
{ &vop_read_desc, lfs_read }, /* read */
{ &vop_write_desc, lfs_write }, /* write */
{ &vop_lease_desc, ufs_lease_check }, /* lease */
{ &vop_ioctl_desc, ufs_ioctl }, /* ioctl */
{ &vop_select_desc, ufs_select }, /* select */
{ &vop_revoke_desc, ufs_revoke }, /* revoke */
{ &vop_mmap_desc, ufs_mmap }, /* mmap */
{ &vop_fsync_desc, lfs_fsync }, /* fsync */
{ &vop_seek_desc, ufs_seek }, /* seek */
@ -89,8 +92,8 @@ struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
{ &vop_readdir_desc, ufs_readdir }, /* readdir */
{ &vop_readlink_desc, ufs_readlink }, /* readlink */
{ &vop_abortop_desc, ufs_abortop }, /* abortop */
{ &vop_inactive_desc, lfs_inactive }, /* inactive */
{ &vop_reclaim_desc, ufs_reclaim }, /* reclaim */
{ &vop_inactive_desc, ufs_inactive }, /* inactive */
{ &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
{ &vop_lock_desc, ufs_lock }, /* lock */
{ &vop_unlock_desc, ufs_unlock }, /* unlock */
{ &vop_bmap_desc, ufs_bmap }, /* bmap */
@ -123,8 +126,10 @@ struct vnodeopv_entry_desc lfs_specop_entries[] = {
{ &vop_setattr_desc, ufs_setattr }, /* setattr */
{ &vop_read_desc, ufsspec_read }, /* read */
{ &vop_write_desc, ufsspec_write }, /* write */
{ &vop_lease_desc, spec_lease_check }, /* lease */
{ &vop_ioctl_desc, spec_ioctl }, /* ioctl */
{ &vop_select_desc, spec_select }, /* select */
{ &vop_revoke_desc, spec_revoke }, /* revoke */
{ &vop_mmap_desc, spec_mmap }, /* mmap */
{ &vop_fsync_desc, spec_fsync }, /* fsync */
{ &vop_seek_desc, spec_seek }, /* seek */
@ -137,8 +142,8 @@ struct vnodeopv_entry_desc lfs_specop_entries[] = {
{ &vop_readdir_desc, spec_readdir }, /* readdir */
{ &vop_readlink_desc, spec_readlink }, /* readlink */
{ &vop_abortop_desc, spec_abortop }, /* abortop */
{ &vop_inactive_desc, lfs_inactive }, /* inactive */
{ &vop_reclaim_desc, ufs_reclaim }, /* reclaim */
{ &vop_inactive_desc, ufs_inactive }, /* inactive */
{ &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
{ &vop_lock_desc, ufs_lock }, /* lock */
{ &vop_unlock_desc, ufs_unlock }, /* unlock */
{ &vop_bmap_desc, spec_bmap }, /* bmap */
@ -172,8 +177,10 @@ struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
{ &vop_setattr_desc, ufs_setattr }, /* setattr */
{ &vop_read_desc, ufsfifo_read }, /* read */
{ &vop_write_desc, ufsfifo_write }, /* write */
{ &vop_lease_desc, fifo_lease_check }, /* lease */
{ &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
{ &vop_select_desc, fifo_select }, /* select */
{ &vop_revoke_desc, fifo_revoke }, /* revoke */
{ &vop_mmap_desc, fifo_mmap }, /* mmap */
{ &vop_fsync_desc, fifo_fsync }, /* fsync */
{ &vop_seek_desc, fifo_seek }, /* seek */
@ -186,8 +193,8 @@ struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
{ &vop_readdir_desc, fifo_readdir }, /* readdir */
{ &vop_readlink_desc, fifo_readlink }, /* readlink */
{ &vop_abortop_desc, fifo_abortop }, /* abortop */
{ &vop_inactive_desc, lfs_inactive }, /* inactive */
{ &vop_reclaim_desc, ufs_reclaim }, /* reclaim */
{ &vop_inactive_desc, ufs_inactive }, /* inactive */
{ &vop_reclaim_desc, lfs_reclaim }, /* reclaim */
{ &vop_lock_desc, ufs_lock }, /* lock */
{ &vop_unlock_desc, ufs_unlock }, /* unlock */
{ &vop_bmap_desc, fifo_bmap }, /* bmap */
@ -371,10 +378,10 @@ lfs_link(ap)
{
int ret;
SET_DIROP(VTOI(ap->a_vp)->i_lfs);
MARK_VNODE(ap->a_vp);
SET_DIROP(VTOI(ap->a_tdvp)->i_lfs);
MARK_VNODE(ap->a_tdvp);
ret = ufs_link(ap);
SET_ENDOP(VTOI(ap->a_vp)->i_lfs);
SET_ENDOP(VTOI(ap->a_tdvp)->i_lfs);
return (ret);
}
@ -422,9 +429,12 @@ lfs_getattr(ap)
vap->va_gid = ip->i_gid;
vap->va_rdev = (dev_t)ip->i_rdev;
vap->va_size = ip->i_din.di_size;
vap->va_atime = ip->i_atime;
vap->va_mtime = ip->i_mtime;
vap->va_ctime = ip->i_ctime;
vap->va_atime.ts_sec = ip->i_atime;
vap->va_atime.ts_nsec = ip->i_atimensec;
vap->va_mtime.ts_sec = ip->i_mtime;
vap->va_mtime.ts_nsec = ip->i_mtimensec;
vap->va_ctime.ts_sec = ip->i_ctime;
vap->va_ctime.ts_nsec = ip->i_ctimensec;
vap->va_flags = ip->i_flags;
vap->va_gen = ip->i_gen;
/* this doesn't belong here */
@ -460,28 +470,33 @@ lfs_close(ap)
register struct inode *ip = VTOI(vp);
int mod;
if (vp->v_usecount > 1 && !(ip->i_flag & IN_LOCKED)) {
simple_lock(&vp->v_interlock);
if (vp->v_usecount > 1) {
mod = ip->i_flag & IN_MODIFIED;
ITIMES(ip, &time, &time);
if (!mod && ip->i_flag & IN_MODIFIED)
ip->i_lfs->lfs_uinodes++;
}
simple_unlock(&vp->v_interlock);
return (0);
}
/*
* Stub inactive routine that avoid calling ufs_inactive in some cases.
* Reclaim an inode so that it can be used for other purposes.
*/
int lfs_no_inactive = 0;
int
lfs_inactive(ap)
struct vop_inactive_args /* {
lfs_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
int error;
if (lfs_no_inactive)
if (error = ufs_reclaim(vp, ap->a_p))
return (error);
FREE(vp->v_data, M_LFSNODE);
vp->v_data = NULL;
return (0);
return (ufs_inactive(ap));
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)mfs_extern.h 8.1 (Berkeley) 6/11/93
* @(#)mfs_extern.h 8.4 (Berkeley) 3/30/95
*/
struct buf;
@ -40,6 +40,7 @@ struct proc;
struct statfs;
struct ucred;
struct vnode;
struct vfsconf;
__BEGIN_DECLS
int mfs_badop __P((void));
@ -47,13 +48,14 @@ int mfs_bmap __P((struct vop_bmap_args *));
int mfs_close __P((struct vop_close_args *));
void mfs_doio __P((struct buf *bp, caddr_t base));
int mfs_inactive __P((struct vop_inactive_args *)); /* XXX */
int mfs_reclaim __P((struct vop_reclaim_args *)); /* XXX */
int mfs_init __P((void));
int mfs_reclaim __P((struct vop_reclaim_args *));
int mfs_init __P((struct vfsconf *));
int mfs_ioctl __P((struct vop_ioctl_args *));
int mfs_mount __P((struct mount *mp,
char *path, caddr_t data, struct nameidata *ndp, struct proc *p));
int mfs_open __P((struct vop_open_args *));
int mfs_print __P((struct vop_print_args *)); /* XXX */
#define mfs_revoke vop_revoke
int mfs_start __P((struct mount *mp, int flags, struct proc *p));
int mfs_statfs __P((struct mount *mp, struct statfs *sbp, struct proc *p));
int mfs_strategy __P((struct vop_strategy_args *)); /* XXX */

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)mfs_vfsops.c 8.4 (Berkeley) 4/16/94
* @(#)mfs_vfsops.c 8.11 (Berkeley) 6/19/95
*/
#include <sys/param.h>
@ -77,36 +77,32 @@ struct vfsops mfs_vfsops = {
ffs_fhtovp,
ffs_vptofh,
mfs_init,
ffs_sysctl,
};
/*
* Called by main() when mfs is going to be mounted as root.
*
* Name is updated by mount(8) after booting.
*/
#define ROOTNAME "mfs_root"
mfs_mountroot()
{
extern struct vnode *rootvp;
register struct fs *fs;
register struct mount *mp;
struct fs *fs;
struct mount *mp;
struct proc *p = curproc; /* XXX */
struct ufsmount *ump;
struct mfsnode *mfsp;
u_int size;
int error;
/*
* Get vnodes for swapdev and rootdev.
*/
if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp))
panic("mfs_mountroot: can't setup bdevvp's");
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
mp->mnt_op = &mfs_vfsops;
mp->mnt_flag = MNT_RDONLY;
if ((error = bdevvp(swapdev, &swapdev_vp)) ||
(error = bdevvp(rootdev, &rootvp))) {
printf("mfs_mountroot: can't setup bdevvp's");
return (error);
}
if (error = vfs_rootmountalloc("mfs", "mfs_root", &mp))
return (error);
mfsp = malloc(sizeof *mfsp, M_MFSNODE, M_WAITOK);
rootvp->v_data = mfsp;
rootvp->v_op = mfs_vnodeop_p;
@ -117,30 +113,20 @@ mfs_mountroot()
mfsp->mfs_pid = p->p_pid;
mfsp->mfs_buflist = (struct buf *)0;
if (error = ffs_mountfs(rootvp, mp, p)) {
mp->mnt_vfc->vfc_refcount--;
vfs_unbusy(mp, p);
free(mp, M_MOUNT);
free(mfsp, M_MFSNODE);
return (error);
}
if (error = vfs_lock(mp)) {
(void)ffs_unmount(mp, 0, p);
free(mp, M_MOUNT);
free(mfsp, M_MFSNODE);
return (error);
}
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mp->mnt_flag |= MNT_ROOTFS;
mp->mnt_vnodecovered = NULLVP;
simple_lock(&mountlist_slock);
CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
simple_unlock(&mountlist_slock);
ump = VFSTOUFS(mp);
fs = ump->um_fs;
bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt));
fs->fs_fsmnt[0] = '/';
bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname,
MNAMELEN);
(void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
&size);
bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
(void) copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
(void)ffs_statfs(mp, &mp->mnt_stat, p);
vfs_unlock(mp);
vfs_unbusy(mp, p);
inittodr((time_t)0);
return (0);
}
@ -202,11 +188,7 @@ mfs_mount(mp, path, data, ndp, p)
flags = WRITECLOSE;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
if (vfs_busy(mp))
return (EBUSY);
error = ffs_flushfiles(mp, flags, p);
vfs_unbusy(mp);
if (error)
if (error = ffs_flushfiles(mp, flags, p))
return (error);
}
if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR))
@ -269,7 +251,6 @@ mfs_start(mp, flags, p)
register struct mfsnode *mfsp = VTOMFS(vp);
register struct buf *bp;
register caddr_t base;
int error = 0;
base = mfsp->mfs_baseoff;
while (mfsp->mfs_buflist != (struct buf *)(-1)) {
@ -284,11 +265,11 @@ mfs_start(mp, flags, p)
* otherwise we will loop here, as tsleep will always return
* EINTR/ERESTART.
*/
if (error = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0))
if (dounmount(mp, 0, p) != 0)
if (tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0) &&
dounmount(mp, 0, p) != 0)
CLRSIG(p, CURSIG(p));
}
return (error);
return (0);
}
/*
@ -302,6 +283,6 @@ mfs_statfs(mp, sbp, p)
int error;
error = ffs_statfs(mp, sbp, p);
sbp->f_type = MOUNT_MFS;
sbp->f_type = mp->mnt_vfc->vfc_typenum;
return (error);
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)mfs_vnops.c 8.3 (Berkeley) 9/21/93
* @(#)mfs_vnops.c 8.11 (Berkeley) 5/22/95
*/
#include <sys/param.h>
@ -51,12 +51,6 @@
#include <ufs/mfs/mfsiom.h>
#include <ufs/mfs/mfs_extern.h>
#if !defined(hp300) && !defined(i386) && !defined(mips) && !defined(sparc) && !defined(luna68k)
static int mfsmap_want; /* 1 => need kernel I/O resources */
struct map mfsmap[MFS_MAPSIZE];
extern char mfsiobuf[];
#endif
/*
* mfs vnode operations.
*/
@ -75,6 +69,7 @@ struct vnodeopv_entry_desc mfs_vnodeop_entries[] = {
{ &vop_write_desc, mfs_write }, /* write */
{ &vop_ioctl_desc, mfs_ioctl }, /* ioctl */
{ &vop_select_desc, mfs_select }, /* select */
{ &vop_revoke_desc, mfs_revoke }, /* revoke */
{ &vop_mmap_desc, mfs_mmap }, /* mmap */
{ &vop_fsync_desc, spec_fsync }, /* fsync */
{ &vop_seek_desc, mfs_seek }, /* seek */
@ -189,85 +184,6 @@ mfs_strategy(ap)
return (0);
}
#if defined(vax) || defined(tahoe)
/*
* Memory file system I/O.
*
* Essentially play ubasetup() and disk interrupt service routine by
* doing the copies to or from the memfs process. If doing physio
* (i.e. pagein), we must map the I/O through the kernel virtual
* address space.
*/
void
mfs_doio(bp, base)
register struct buf *bp;
caddr_t base;
{
register struct pte *pte, *ppte;
register caddr_t vaddr;
int off, npf, npf2, reg;
caddr_t kernaddr, offset;
/*
* For phys I/O, map the b_data into kernel virtual space using
* the Mfsiomap pte's.
*/
if ((bp->b_flags & B_PHYS) == 0) {
kernaddr = bp->b_data;
} else {
if (bp->b_flags & (B_PAGET | B_UAREA | B_DIRTY))
panic("swap on memfs?");
off = (int)bp->b_data & PGOFSET;
npf = btoc(bp->b_bcount + off);
/*
* Get some mapping page table entries
*/
while ((reg = rmalloc(mfsmap, (long)npf)) == 0) {
mfsmap_want++;
sleep((caddr_t)&mfsmap_want, PZERO-1);
}
reg--;
pte = vtopte(bp->b_proc, btop(bp->b_data));
/*
* Do vmaccess() but with the Mfsiomap page table.
*/
ppte = &Mfsiomap[reg];
vaddr = &mfsiobuf[reg * NBPG];
kernaddr = vaddr + off;
for (npf2 = npf; npf2; npf2--) {
mapin(ppte, (u_int)vaddr, pte->pg_pfnum,
(int)(PG_V|PG_KW));
#if defined(tahoe)
if ((bp->b_flags & B_READ) == 0)
mtpr(P1DC, vaddr);
#endif
ppte++;
pte++;
vaddr += NBPG;
}
}
offset = base + (bp->b_blkno << DEV_BSHIFT);
if (bp->b_flags & B_READ)
bp->b_error = copyin(offset, kernaddr, bp->b_bcount);
else
bp->b_error = copyout(kernaddr, offset, bp->b_bcount);
if (bp->b_error)
bp->b_flags |= B_ERROR;
/*
* Release pte's used by physical I/O.
*/
if (bp->b_flags & B_PHYS) {
rmfree(mfsmap, (long)npf, (long)++reg);
if (mfsmap_want) {
mfsmap_want = 0;
wakeup((caddr_t)&mfsmap_want);
}
}
biodone(bp);
}
#endif /* vax || tahoe */
#if defined(hp300) || defined(i386) || defined(mips) || defined(sparc) || defined(luna68k)
/*
* Memory file system I/O.
*
@ -288,7 +204,6 @@ mfs_doio(bp, base)
bp->b_flags |= B_ERROR;
biodone(bp);
}
#endif
/*
* This is a noop, simply returning what one has been given.
@ -297,9 +212,9 @@ int
mfs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
ufs_daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
ufs_daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
@ -308,6 +223,8 @@ mfs_bmap(ap)
*ap->a_vpp = ap->a_vp;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
return (0);
}
@ -368,13 +285,16 @@ int
mfs_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
register struct mfsnode *mfsp = VTOMFS(ap->a_vp);
struct vnode *vp = ap->a_vp;
struct mfsnode *mfsp = VTOMFS(vp);
if (mfsp->mfs_buflist && mfsp->mfs_buflist != (struct buf *)(-1))
panic("mfs_inactive: not inactive (mfs_buflist %x)",
mfsp->mfs_buflist);
VOP_UNLOCK(vp, 0, ap->a_p);
return (0);
}
@ -387,9 +307,10 @@ mfs_reclaim(ap)
struct vnode *a_vp;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
FREE(ap->a_vp->v_data, M_MFSNODE);
ap->a_vp->v_data = NULL;
FREE(vp->v_data, M_MFSNODE);
vp->v_data = NULL;
return (0);
}
@ -423,10 +344,9 @@ mfs_badop()
/*
* Memory based filesystem initialization.
*/
mfs_init()
mfs_init(vfsp)
struct vfsconf *vfsp;
{
#if !defined(hp300) && !defined(i386) && !defined(mips) && !defined(sparc) && !defined(luna68k)
rminit(mfsmap, (long)MFS_MAPREG, (long)1, "mfs mapreg", MFS_MAPSIZE);
#endif
return;
}

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)mfsnode.h 8.2 (Berkeley) 8/11/93
* @(#)mfsnode.h 8.3 (Berkeley) 5/19/95
*/
/*
@ -73,9 +73,9 @@ struct mfsnode {
#define mfs_readdir ((int (*) __P((struct vop_readdir_args *)))mfs_badop)
#define mfs_readlink ((int (*) __P((struct vop_readlink_args *)))mfs_badop)
#define mfs_abortop ((int (*) __P((struct vop_abortop_args *)))mfs_badop)
#define mfs_lock ((int (*) __P((struct vop_lock_args *)))nullop)
#define mfs_unlock ((int (*) __P((struct vop_unlock_args *)))nullop)
#define mfs_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
#define mfs_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock)
#define mfs_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock)
#define mfs_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
#define mfs_pathconf ((int (*) __P((struct vop_pathconf_args *)))mfs_badop)
#define mfs_advlock ((int (*) __P((struct vop_advlock_args *)))mfs_badop)
#define mfs_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))mfs_badop)

View File

@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)dinode.h 8.3 (Berkeley) 1/21/94
* @(#)dinode.h 8.9 (Berkeley) 3/29/95
*/
/*
@ -46,33 +46,47 @@
*/
#define ROOTINO ((ino_t)2)
/*
* The Whiteout inode# is a dummy non-zero inode number which will
* never be allocated to a real file. It is used as a place holder
* in the directory entry which has been tagged as a DT_W entry.
* See the comments about ROOTINO above.
*/
#define WINO ((ino_t)1)
/*
* A dinode contains all the meta-data associated with a UFS file.
* This structure defines the on-disk format of a dinode.
* This structure defines the on-disk format of a dinode. Since
* this structure describes an on-disk structure, all its fields
* are defined by types with precise widths.
*/
typedef int32_t ufs_daddr_t;
#define NDADDR 12 /* Direct addresses in inode. */
#define NIADDR 3 /* Indirect addresses in inode. */
struct dinode {
u_short di_mode; /* 0: IFMT and permissions. */
short di_nlink; /* 2: File link count. */
u_int16_t di_mode; /* 0: IFMT, permissions; see below. */
int16_t di_nlink; /* 2: File link count. */
union {
u_short oldids[2]; /* 4: Ffs: old user and group ids. */
ino_t inumber; /* 4: Lfs: inode number. */
u_int16_t oldids[2]; /* 4: Ffs: old user and group ids. */
int32_t inumber; /* 4: Lfs: inode number. */
} di_u;
u_quad_t di_size; /* 8: File byte count. */
struct timespec di_atime; /* 16: Last access time. */
struct timespec di_mtime; /* 24: Last modified time. */
struct timespec di_ctime; /* 32: Last inode change time. */
daddr_t di_db[NDADDR]; /* 40: Direct disk blocks. */
daddr_t di_ib[NIADDR]; /* 88: Indirect disk blocks. */
u_long di_flags; /* 100: Status flags (chflags). */
long di_blocks; /* 104: Blocks actually held. */
long di_gen; /* 108: Generation number. */
u_long di_uid; /* 112: File owner. */
u_long di_gid; /* 116: File group. */
long di_spare[2]; /* 120: Reserved; currently unused */
u_int64_t di_size; /* 8: File byte count. */
int32_t di_atime; /* 16: Last access time. */
int32_t di_atimensec; /* 20: Last access time. */
int32_t di_mtime; /* 24: Last modified time. */
int32_t di_mtimensec; /* 28: Last modified time. */
int32_t di_ctime; /* 32: Last inode change time. */
int32_t di_ctimensec; /* 36: Last inode change time. */
ufs_daddr_t di_db[NDADDR]; /* 40: Direct disk blocks. */
ufs_daddr_t di_ib[NIADDR]; /* 88: Indirect disk blocks. */
u_int32_t di_flags; /* 100: Status flags (chflags). */
u_int32_t di_blocks; /* 104: Blocks actually held. */
int32_t di_gen; /* 108: Generation number. */
u_int32_t di_uid; /* 112: File owner. */
u_int32_t di_gid; /* 116: File group. */
int32_t di_spare[2]; /* 120: Reserved; currently unused */
};
/*
@ -87,9 +101,9 @@ struct dinode {
#define di_ouid di_u.oldids[0]
#define di_rdev di_db[0]
#define di_shortlink di_db
#define MAXSYMLINKLEN ((NDADDR + NIADDR) * sizeof(daddr_t))
#define MAXSYMLINKLEN ((NDADDR + NIADDR) * sizeof(ufs_daddr_t))
/* File modes. */
/* File permissions. */
#define IEXEC 0000100 /* Executable. */
#define IWRITE 0000200 /* Writeable. */
#define IREAD 0000400 /* Readable. */
@ -106,3 +120,4 @@ struct dinode {
#define IFREG 0100000 /* Regular file. */
#define IFLNK 0120000 /* Symbolic link. */
#define IFSOCK 0140000 /* UNIX domain socket. */
#define IFWHT 0160000 /* Whiteout. */

View File

@ -35,12 +35,20 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)dir.h 8.2 (Berkeley) 1/21/94
* @(#)dir.h 8.5 (Berkeley) 4/27/95
*/
#ifndef _DIR_H_
#define _DIR_H_
/*
* Theoretically, directories can be more than 2Gb in length, however, in
* practice this seems unlikely. So, we define the type doff_t as a 32-bit
* quantity to keep down the cost of doing lookup on a 32-bit machine.
*/
#define doff_t int32_t
#define MAXDIRSIZE (0x7fffffff)
/*
* A directory consists of some number of blocks of DIRBLKSIZ
* bytes, where DIRBLKSIZ is chosen such that it can be transferred
@ -70,11 +78,11 @@
#define MAXNAMLEN 255
struct direct {
u_long d_ino; /* inode number of entry */
u_short d_reclen; /* length of this record */
u_char d_type; /* file type, see below */
u_char d_namlen; /* length of string in d_name */
char d_name[MAXNAMLEN + 1]; /* name with length <= MAXNAMLEN */
u_int32_t d_ino; /* inode number of entry */
u_int16_t d_reclen; /* length of this record */
u_int8_t d_type; /* file type, see below */
u_int8_t d_namlen; /* length of string in d_name */
char d_name[MAXNAMLEN + 1];/* name with length <= MAXNAMLEN */
};
/*
@ -88,6 +96,7 @@ struct direct {
#define DT_REG 8
#define DT_LNK 10
#define DT_SOCK 12
#define DT_WHT 14
/*
* Convert between stat structure types and directory types.
@ -104,30 +113,29 @@ struct direct {
#if (BYTE_ORDER == LITTLE_ENDIAN)
#define DIRSIZ(oldfmt, dp) \
((oldfmt) ? \
((sizeof (struct direct) - (MAXNAMLEN+1)) + (((dp)->d_type+1 + 3) &~ 3)) : \
((sizeof (struct direct) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3)))
((sizeof(struct direct) - (MAXNAMLEN+1)) + (((dp)->d_type+1 + 3) &~ 3)) : \
((sizeof(struct direct) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3)))
#else
#define DIRSIZ(oldfmt, dp) \
((sizeof (struct direct) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3))
((sizeof(struct direct) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3))
#endif
#define OLDDIRFMT 1
#define NEWDIRFMT 0
/*
* Template for manipulating directories.
* Should use struct direct's, but the name field
* is MAXNAMLEN - 1, and this just won't do.
* Template for manipulating directories. Should use struct direct's,
* but the name field is MAXNAMLEN - 1, and this just won't do.
*/
struct dirtemplate {
u_long dot_ino;
short dot_reclen;
u_char dot_type;
u_char dot_namlen;
u_int32_t dot_ino;
int16_t dot_reclen;
u_int8_t dot_type;
u_int8_t dot_namlen;
char dot_name[4]; /* must be multiple of 4 */
u_long dotdot_ino;
short dotdot_reclen;
u_char dotdot_type;
u_char dotdot_namlen;
u_int32_t dotdot_ino;
int16_t dotdot_reclen;
u_int8_t dotdot_type;
u_int8_t dotdot_namlen;
char dotdot_name[4]; /* ditto */
};
@ -135,13 +143,13 @@ struct dirtemplate {
* This is the old format of directories, sanz type element.
*/
struct odirtemplate {
u_long dot_ino;
short dot_reclen;
u_short dot_namlen;
u_int32_t dot_ino;
int16_t dot_reclen;
u_int16_t dot_namlen;
char dot_name[4]; /* must be multiple of 4 */
u_long dotdot_ino;
short dotdot_reclen;
u_short dotdot_namlen;
u_int32_t dotdot_ino;
int16_t dotdot_reclen;
u_int16_t dotdot_namlen;
char dotdot_name[4]; /* ditto */
};
#endif /* !_DIR_H_ */

View File

@ -35,59 +35,49 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)inode.h 8.4 (Berkeley) 1/21/94
* @(#)inode.h 8.9 (Berkeley) 5/14/95
*/
#include <ufs/ufs/dir.h>
#include <ufs/ufs/dinode.h>
/*
* Theoretically, directories can be more than 2Gb in length, however, in
* practice this seems unlikely. So, we define the type doff_t as a long
* to keep down the cost of doing lookup on a 32-bit machine. If you are
* porting to a 64-bit architecture, you should make doff_t the same as off_t.
*/
#define doff_t long
/*
* The inode is used to describe each active (or recently active)
* file in the UFS filesystem. It is composed of two types of
* information. The first part is the information that is needed
* only while the file is active (such as the identity of the file
* and linkage to speed its lookup). The second part is the
* permannent meta-data associated with the file which is read
* in from the permanent dinode from long term storage when the
* file becomes active, and is put back when the file is no longer
* being used.
* The inode is used to describe each active (or recently active) file in the
* UFS filesystem. It is composed of two types of information. The first part
* is the information that is needed only while the file is active (such as
* the identity of the file and linkage to speed its lookup). The second part
* is * the permanent meta-data associated with the file which is read in
* from the permanent dinode from long term storage when the file becomes
* active, and is put back when the file is no longer being used.
*/
struct inode {
struct inode *i_next; /* Hash chain forward. */
struct inode **i_prev; /* Hash chain back. */
struct vnode *i_vnode; /* Vnode associated with this inode. */
struct vnode *i_devvp; /* Vnode for block I/O. */
u_long i_flag; /* I* flags. */
LIST_ENTRY(inode) i_hash;/* Hash chain. */
struct vnode *i_vnode;/* Vnode associated with this inode. */
struct vnode *i_devvp;/* Vnode for block I/O. */
u_int32_t i_flag; /* flags, see below */
dev_t i_dev; /* Device associated with the inode. */
ino_t i_number; /* The identity of the inode. */
union { /* Associated filesystem. */
struct fs *fs; /* FFS */
struct lfs *lfs; /* LFS */
} inode_u;
#define i_fs inode_u.fs
#define i_lfs inode_u.lfs
struct dquot *i_dquot[MAXQUOTAS]; /* Dquot structures. */
u_quad_t i_modrev; /* Revision level for lease. */
struct lockf *i_lockf; /* Head of byte-level lock list. */
pid_t i_lockholder; /* DEBUG: holder of inode lock. */
pid_t i_lockwaiter; /* DEBUG: latest blocked for inode lock. */
u_quad_t i_modrev; /* Revision level for NFS lease. */
struct lockf *i_lockf;/* Head of byte-level lock list. */
struct lock i_lock; /* Inode lock. */
/*
* Side effects; used during directory lookup.
*/
long i_count; /* Size of free slot in directory. */
int32_t i_count; /* Size of free slot in directory. */
doff_t i_endoff; /* End of useful stuff in directory. */
doff_t i_diroff; /* Offset in dir, where we found last entry. */
doff_t i_offset; /* Offset of free space in directory. */
ino_t i_ino; /* Inode number of found directory. */
u_long i_reclen; /* Size of found directory entry. */
long i_spare[11]; /* Spares to round up to 128 bytes. */
u_int32_t i_reclen; /* Size of found directory entry. */
/*
* The on-disk dinode itself.
*/
@ -95,8 +85,10 @@ struct inode {
};
#define i_atime i_din.di_atime
#define i_atimensec i_din.di_atimensec
#define i_blocks i_din.di_blocks
#define i_ctime i_din.di_ctime
#define i_ctimensec i_din.di_ctimensec
#define i_db i_din.di_db
#define i_flags i_din.di_flags
#define i_gen i_din.di_gen
@ -104,6 +96,7 @@ struct inode {
#define i_ib i_din.di_ib
#define i_mode i_din.di_mode
#define i_mtime i_din.di_mtime
#define i_mtimensec i_din.di_mtimensec
#define i_nlink i_din.di_nlink
#define i_rdev i_din.di_rdev
#define i_shortlink i_din.di_shortlink
@ -113,14 +106,11 @@ struct inode {
/* These flags are kept in i_flag. */
#define IN_ACCESS 0x0001 /* Access time update request. */
#define IN_CHANGE 0x0002 /* Inode change time update request. */
#define IN_EXLOCK 0x0004 /* File has exclusive lock. */
#define IN_LOCKED 0x0008 /* Inode lock. */
#define IN_LWAIT 0x0010 /* Process waiting on file lock. */
#define IN_MODIFIED 0x0020 /* Inode has been modified. */
#define IN_RENAME 0x0040 /* Inode is being renamed. */
#define IN_SHLOCK 0x0080 /* File has shared lock. */
#define IN_UPDATE 0x0100 /* Modification time update request. */
#define IN_WANTED 0x0200 /* Inode is wanted by a process. */
#define IN_UPDATE 0x0004 /* Modification time update request. */
#define IN_MODIFIED 0x0008 /* Inode has been modified. */
#define IN_RENAME 0x0010 /* Inode is being renamed. */
#define IN_SHLOCK 0x0020 /* File has shared lock. */
#define IN_EXLOCK 0x0040 /* File has exclusive lock. */
#ifdef KERNEL
/*
@ -128,7 +118,7 @@ struct inode {
* ufs_getlbns and used by truncate and bmap code.
*/
struct indir {
daddr_t in_lbn; /* Logical block number. */
ufs_daddr_t in_lbn; /* Logical block number. */
int in_off; /* Offset in buffer. */
int in_exists; /* Flag if the block exists. */
};
@ -141,22 +131,22 @@ struct indir {
if ((ip)->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) { \
(ip)->i_flag |= IN_MODIFIED; \
if ((ip)->i_flag & IN_ACCESS) \
(ip)->i_atime.ts_sec = (t1)->tv_sec; \
(ip)->i_atime = (t1)->tv_sec; \
if ((ip)->i_flag & IN_UPDATE) { \
(ip)->i_mtime.ts_sec = (t2)->tv_sec; \
(ip)->i_mtime = (t2)->tv_sec; \
(ip)->i_modrev++; \
} \
if ((ip)->i_flag & IN_CHANGE) \
(ip)->i_ctime.ts_sec = time.tv_sec; \
(ip)->i_ctime = time.tv_sec; \
(ip)->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE); \
} \
}
/* This overlays the fid structure (see mount.h). */
struct ufid {
u_short ufid_len; /* Length of structure. */
u_short ufid_pad; /* Force long alignment. */
u_int16_t ufid_len; /* Length of structure. */
u_int16_t ufid_pad; /* Force 32-bit alignment. */
ino_t ufid_ino; /* File number (ino). */
long ufid_gen; /* Generation number. */
int32_t ufid_gen; /* Generation number. */
};
#endif /* KERNEL */

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)lockf.h 8.1 (Berkeley) 6/11/93
* @(#)lockf.h 8.2 (Berkeley) 10/26/94
*/
/*
@ -42,15 +42,18 @@
* the inode structure. Locks are sorted by the starting byte of the lock for
* efficiency.
*/
TAILQ_HEAD(locklist, lockf);
struct lockf {
short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
short lf_flags; /* Semantics: F_POSIX, F_FLOCK, F_WAIT */
short lf_type; /* Lock type: F_RDLCK, F_WRLCK */
off_t lf_start; /* The byte # of the start of the lock */
off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/
caddr_t lf_id; /* The id of the resource holding the lock */
off_t lf_start; /* Byte # of the start of the lock */
off_t lf_end; /* Byte # of the end of the lock (-1=EOF) */
caddr_t lf_id; /* Id of the resource holding the lock */
struct inode *lf_inode; /* Back pointer to the inode */
struct lockf *lf_next; /* A pointer to the next lock on this inode */
struct lockf *lf_block; /* The list of blocked locks */
struct lockf *lf_next; /* Pointer to the next lock on this inode */
struct locklist lf_blkhd; /* List of requests blocked on this lock */
TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
};
/* Maximum length of sleep chains to traverse to try and detect deadlock. */

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)quota.h 8.1 (Berkeley) 6/11/93
* @(#)quota.h 8.3 (Berkeley) 8/19/94
*/
#ifndef _QUOTA_
@ -48,8 +48,8 @@
* failure). The timer is started when the user crosses their soft limit, it
* is reset when they go below their soft limit.
*/
#define MAX_IQ_TIME (7*24*60*60) /* 1 week */
#define MAX_DQ_TIME (7*24*60*60) /* 1 week */
#define MAX_IQ_TIME (7*24*60*60) /* seconds in 1 week */
#define MAX_DQ_TIME (7*24*60*60) /* seconds in 1 week */
/*
* The following constants define the usage of the quota file array in the
@ -97,12 +97,12 @@
* structure).
*/
struct dqblk {
u_long dqb_bhardlimit; /* absolute limit on disk blks alloc */
u_long dqb_bsoftlimit; /* preferred limit on disk blks */
u_long dqb_curblocks; /* current block count */
u_long dqb_ihardlimit; /* maximum # allocated inodes + 1 */
u_long dqb_isoftlimit; /* preferred inode limit */
u_long dqb_curinodes; /* current # allocated inodes */
u_int32_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
u_int32_t dqb_bsoftlimit; /* preferred limit on disk blks */
u_int32_t dqb_curblocks; /* current block count */
u_int32_t dqb_ihardlimit; /* maximum # allocated inodes + 1 */
u_int32_t dqb_isoftlimit; /* preferred inode limit */
u_int32_t dqb_curinodes; /* current # allocated inodes */
time_t dqb_btime; /* time limit for excessive disk use */
time_t dqb_itime; /* time limit for excessive files */
};
@ -114,13 +114,13 @@ struct dqblk {
* used entries.
*/
struct dquot {
struct dquot *dq_forw, **dq_back; /* hash list */
struct dquot *dq_freef, **dq_freeb; /* free list */
short dq_flags; /* flags, see below */
short dq_cnt; /* count of active references */
short dq_spare; /* unused spare padding */
short dq_type; /* quota type of this dquot */
u_long dq_id; /* identifier this applies to */
LIST_ENTRY(dquot) dq_hash; /* hash list */
TAILQ_ENTRY(dquot) dq_freelist; /* free list */
u_int16_t dq_flags; /* flags, see below */
u_int16_t dq_cnt; /* count of active references */
u_int16_t dq_spare; /* unused spare padding */
u_int16_t dq_type; /* quota type of this dquot */
u_int32_t dq_id; /* identifier this applies to */
struct ufsmount *dq_ump; /* filesystem that this is taken from */
struct dqblk dq_dqb; /* actual usage & quotas */
};
@ -146,11 +146,11 @@ struct dquot {
#define dq_itime dq_dqb.dqb_itime
/*
* If the system has never checked for a quota for this file, then it is set
* to NODQUOT. Once a write attempt is made the inode pointer is set to
* reference a dquot structure.
* If the system has never checked for a quota for this file, then it is
* set to NODQUOT. Once a write attempt is made the inode pointer is set
* to reference a dquot structure.
*/
#define NODQUOT ((struct dquot *) 0)
#define NODQUOT NULL
/*
* Flags to chkdq() and chkiq()

View File

@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_bmap.c 8.6 (Berkeley) 1/21/94
* @(#)ufs_bmap.c 8.7 (Berkeley) 3/21/95
*/
#include <sys/param.h>
@ -62,9 +62,9 @@ int
ufs_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
ufs_daddr_t a_bn;
struct vnode **a_vpp;
daddr_t *a_bnp;
ufs_daddr_t *a_bnp;
int *a_runp;
} */ *ap;
{
@ -98,8 +98,8 @@ ufs_bmap(ap)
int
ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
struct vnode *vp;
register daddr_t bn;
daddr_t *bnp;
ufs_daddr_t bn;
ufs_daddr_t *bnp;
struct indir *ap;
int *nump;
int *runp;
@ -110,7 +110,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
struct mount *mp;
struct vnode *devvp;
struct indir a[NIADDR], *xap;
daddr_t daddr;
ufs_daddr_t daddr;
long metalbn;
int error, maxrun, num;
@ -194,12 +194,13 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
}
}
daddr = ((daddr_t *)bp->b_data)[xap->in_off];
daddr = ((ufs_daddr_t *)bp->b_data)[xap->in_off];
if (num == 1 && daddr && runp)
for (bn = xap->in_off + 1;
bn < MNINDIR(ump) && *runp < maxrun &&
is_sequential(ump, ((daddr_t *)bp->b_data)[bn - 1],
((daddr_t *)bp->b_data)[bn]);
is_sequential(ump,
((ufs_daddr_t *)bp->b_data)[bn - 1],
((ufs_daddr_t *)bp->b_data)[bn]);
++bn, ++*runp);
}
if (bp)
@ -222,7 +223,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
int
ufs_getlbns(vp, bn, ap, nump)
struct vnode *vp;
register daddr_t bn;
ufs_daddr_t bn;
struct indir *ap;
int *nump;
{

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_extern.h 8.3 (Berkeley) 4/16/94
* @(#)ufs_extern.h 8.10 (Berkeley) 5/14/95
*/
struct buf;
@ -44,10 +44,11 @@ struct mount;
struct nameidata;
struct proc;
struct ucred;
struct ufs_args;
struct uio;
struct vattr;
struct vfsconf;
struct vnode;
struct ufs_args;
__BEGIN_DECLS
void diskerr
@ -75,7 +76,7 @@ int ufs_dirremove __P((struct vnode *, struct componentname*));
int ufs_dirrewrite
__P((struct inode *, struct inode *, struct componentname *));
int ufs_getattr __P((struct vop_getattr_args *));
int ufs_getlbns __P((struct vnode *, daddr_t, struct indir *, int *));
int ufs_getlbns __P((struct vnode *, ufs_daddr_t, struct indir *, int *));
struct vnode *
ufs_ihashget __P((dev_t, ino_t));
void ufs_ihashinit __P((void));
@ -84,9 +85,15 @@ struct vnode *
ufs_ihashlookup __P((dev_t, ino_t));
void ufs_ihashrem __P((struct inode *));
int ufs_inactive __P((struct vop_inactive_args *));
int ufs_init __P((void));
int ufs_init __P((struct vfsconf *));
int ufs_ioctl __P((struct vop_ioctl_args *));
int ufs_islocked __P((struct vop_islocked_args *));
#ifdef NFS
int lease_check __P((struct vop_lease_args *));
#define ufs_lease_check lease_check
#else
#define ufs_lease_check ((int (*) __P((struct vop_lease_args *)))nullop)
#endif
int ufs_link __P((struct vop_link_args *));
int ufs_lock __P((struct vop_lock_args *));
int ufs_lookup __P((struct vop_lookup_args *));
@ -99,9 +106,10 @@ int ufs_pathconf __P((struct vop_pathconf_args *));
int ufs_print __P((struct vop_print_args *));
int ufs_readdir __P((struct vop_readdir_args *));
int ufs_readlink __P((struct vop_readlink_args *));
int ufs_reclaim __P((struct vop_reclaim_args *));
int ufs_reclaim __P((struct vnode *, struct proc *));
int ufs_remove __P((struct vop_remove_args *));
int ufs_rename __P((struct vop_rename_args *));
#define ufs_revoke vop_revoke
int ufs_rmdir __P((struct vop_rmdir_args *));
int ufs_root __P((struct mount *, struct vnode **));
int ufs_seek __P((struct vop_seek_args *));
@ -111,6 +119,7 @@ int ufs_start __P((struct mount *, int, struct proc *));
int ufs_strategy __P((struct vop_strategy_args *));
int ufs_symlink __P((struct vop_symlink_args *));
int ufs_unlock __P((struct vop_unlock_args *));
int ufs_whiteout __P((struct vop_whiteout_args *));
int ufs_vinit __P((struct mount *,
int (**)(), int (**)(), struct vnode **));
int ufsspec_close __P((struct vop_close_args *));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
* Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_ihash.c 8.4 (Berkeley) 12/30/93
* @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95
*/
#include <sys/param.h>
@ -46,9 +46,10 @@
/*
* Structures associated with inode cacheing.
*/
struct inode **ihashtbl;
LIST_HEAD(ihashhead, inode) *ihashtbl;
u_long ihash; /* size of hash table - 1 */
#define INOHASH(device, inum) (((device) + (inum)) & ihash)
#define INOHASH(device, inum) (&ihashtbl[((device) + (inum)) & ihash])
struct simplelock ufs_ihash_slock;
/*
* Initialize inode hash table.
@ -58,6 +59,7 @@ ufs_ihashinit()
{
ihashtbl = hashinit(desiredvnodes, M_UFSMNT, &ihash);
simple_lock_init(&ufs_ihash_slock);
}
/*
@ -65,19 +67,21 @@ ufs_ihashinit()
* to it. If it is in core, return it, even if it is locked.
*/
struct vnode *
ufs_ihashlookup(device, inum)
dev_t device;
ufs_ihashlookup(dev, inum)
dev_t dev;
ino_t inum;
{
register struct inode *ip;
struct inode *ip;
for (ip = ihashtbl[INOHASH(device, inum)];; ip = ip->i_next) {
if (ip == NULL)
return (NULL);
if (inum == ip->i_number && device == ip->i_dev)
simple_lock(&ufs_ihash_slock);
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next)
if (inum == ip->i_number && dev == ip->i_dev)
break;
simple_unlock(&ufs_ihash_slock);
if (ip)
return (ITOV(ip));
}
/* NOTREACHED */
return (NULLVP);
}
/*
@ -85,54 +89,47 @@ ufs_ihashlookup(device, inum)
* to it. If it is in core, but locked, wait for it.
*/
struct vnode *
ufs_ihashget(device, inum)
dev_t device;
ufs_ihashget(dev, inum)
dev_t dev;
ino_t inum;
{
register struct inode *ip;
struct proc *p = curproc; /* XXX */
struct inode *ip;
struct vnode *vp;
for (;;)
for (ip = ihashtbl[INOHASH(device, inum)];; ip = ip->i_next) {
if (ip == NULL)
return (NULL);
if (inum == ip->i_number && device == ip->i_dev) {
if (ip->i_flag & IN_LOCKED) {
ip->i_flag |= IN_WANTED;
sleep(ip, PINOD);
break;
}
loop:
simple_lock(&ufs_ihash_slock);
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) {
if (inum == ip->i_number && dev == ip->i_dev) {
vp = ITOV(ip);
if (!vget(vp, 1))
simple_lock(&vp->v_interlock);
simple_unlock(&ufs_ihash_slock);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
goto loop;
return (vp);
break;
}
}
/* NOTREACHED */
simple_unlock(&ufs_ihash_slock);
return (NULL);
}
/*
* Insert the inode into the hash table, and return it locked.
* Insert the inode into the hash table, and return it locked.
*/
void
ufs_ihashins(ip)
struct inode *ip;
{
struct inode **ipp, *iq;
struct proc *p = curproc; /* XXX */
struct ihashhead *ipp;
ipp = &ihashtbl[INOHASH(ip->i_dev, ip->i_number)];
if (iq = *ipp)
iq->i_prev = &ip->i_next;
ip->i_next = iq;
ip->i_prev = ipp;
*ipp = ip;
if (ip->i_flag & IN_LOCKED)
panic("ufs_ihashins: already locked");
if (curproc)
ip->i_lockholder = curproc->p_pid;
else
ip->i_lockholder = -1;
ip->i_flag |= IN_LOCKED;
/* lock the inode, then put it on the appropriate hash list */
lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct simplelock *)0, p);
simple_lock(&ufs_ihash_slock);
ipp = INOHASH(ip->i_dev, ip->i_number);
LIST_INSERT_HEAD(ipp, ip, i_hash);
simple_unlock(&ufs_ihash_slock);
}
/*
@ -140,15 +137,15 @@ ufs_ihashins(ip)
*/
void
ufs_ihashrem(ip)
register struct inode *ip;
struct inode *ip;
{
register struct inode *iq;
struct inode *iq;
if (iq = ip->i_next)
iq->i_prev = ip->i_prev;
*ip->i_prev = iq;
simple_lock(&ufs_ihash_slock);
LIST_REMOVE(ip, i_hash);
#ifdef DIAGNOSTIC
ip->i_next = NULL;
ip->i_prev = NULL;
ip->i_hash.le_next = NULL;
ip->i_hash.le_prev = NULL;
#endif
simple_unlock(&ufs_ihash_slock);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1991, 1993
* Copyright (c) 1991, 1993, 1995
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_inode.c 8.4 (Berkeley) 1/21/94
* @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95
*/
#include <sys/param.h>
@ -54,24 +54,6 @@
u_long nextgennumber; /* Next generation number to assign. */
int prtactive = 0; /* 1 => print out reclaim of active vnodes */
int
ufs_init()
{
static int first = 1;
if (!first)
return (0);
first = 0;
#ifdef DIAGNOSTIC
if ((sizeof(struct inode) - 1) & sizeof(struct inode))
printf("ufs_init: bad size %d\n", sizeof(struct inode));
#endif
ufs_ihashinit();
dqinit();
return (0);
}
/*
* Last reference to an inode. If necessary, write or delete it.
*/
@ -79,40 +61,30 @@ int
ufs_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct inode *ip = VTOI(vp);
struct vnode *vp = ap->a_vp;
struct inode *ip = VTOI(vp);
struct proc *p = ap->a_p;
struct timeval tv;
int mode, error;
int mode, error = 0;
extern int prtactive;
if (prtactive && vp->v_usecount != 0)
vprint("ffs_inactive: pushing active", vp);
/* Get rid of inodes related to stale file handles. */
if (ip->i_mode == 0) {
if ((vp->v_flag & VXLOCK) == 0)
vgone(vp);
return (0);
}
error = 0;
#ifdef DIAGNOSTIC
if (VOP_ISLOCKED(vp))
panic("ffs_inactive: locked inode");
if (curproc)
ip->i_lockholder = curproc->p_pid;
else
ip->i_lockholder = -1;
#endif
ip->i_flag |= IN_LOCKED;
/*
* Ignore inodes related to stale file handles.
*/
if (ip->i_mode == 0)
goto out;
if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
#ifdef QUOTA
if (!getinoquota(ip))
(void)chkiq(ip, -1, NOCRED, 0);
#endif
error = VOP_TRUNCATE(vp, (off_t)0, 0, NOCRED, NULL);
error = VOP_TRUNCATE(vp, (off_t)0, 0, NOCRED, p);
ip->i_rdev = 0;
mode = ip->i_mode;
ip->i_mode = 0;
@ -123,13 +95,14 @@ ufs_inactive(ap)
tv = time;
VOP_UPDATE(vp, &tv, &tv, 0);
}
VOP_UNLOCK(vp);
out:
VOP_UNLOCK(vp, 0, p);
/*
* If we are done with the inode, reclaim it
* so that it can be reused immediately.
*/
if (vp->v_usecount == 0 && ip->i_mode == 0)
vgone(vp);
if (ip->i_mode == 0)
vrecycle(vp, (struct simplelock *)0, p);
return (error);
}
@ -137,14 +110,13 @@ ufs_inactive(ap)
* Reclaim an inode so that it can be used for other purposes.
*/
int
ufs_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
} */ *ap;
ufs_reclaim(vp, p)
struct vnode *vp;
struct proc *p;
{
register struct vnode *vp = ap->a_vp;
register struct inode *ip;
int i, type;
int i;
extern int prtactive;
if (prtactive && vp->v_usecount != 0)
vprint("ufs_reclaim: pushing active", vp);
@ -169,20 +141,5 @@ ufs_reclaim(ap)
}
}
#endif
switch (vp->v_mount->mnt_stat.f_type) {
case MOUNT_UFS:
type = M_FFSNODE;
break;
case MOUNT_MFS:
type = M_MFSNODE;
break;
case MOUNT_LFS:
type = M_LFSNODE;
break;
default:
panic("ufs_reclaim: not ufs file");
}
FREE(vp->v_data, type);
vp->v_data = NULL;
return (0);
}

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
* @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
*/
#include <sys/param.h>
@ -57,7 +57,10 @@
int maxlockdepth = MAXDEPTH;
#ifdef LOCKF_DEBUG
#include <vm/vm.h>
#include <sys/sysctl.h>
int lockf_debug = 0;
struct ctldebug debug4 = { "lockf_debug", &lockf_debug };
#endif
#define NOLOCKF (struct lockf *)0
@ -149,7 +152,7 @@ lf_setlock(lock)
* Remember who blocked us (for deadlock detection).
*/
lock->lf_next = block;
lf_addblock(block, lock);
TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
#ifdef LOCKF_DEBUG
if (lockf_debug & 1) {
lf_print("lf_setlock: blocking on", block);
@ -158,23 +161,16 @@ lf_setlock(lock)
#endif /* LOCKF_DEBUG */
if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) {
/*
* Delete ourselves from the waiting to lock list.
* We may have been awakened by a signal (in
* which case we must remove ourselves from the
* blocked list) and/or by another process
* releasing a lock (in which case we have already
* been removed from the blocked list and our
* lf_next field set to NOLOCKF).
*/
for (block = lock->lf_next;
block != NOLOCKF;
block = block->lf_block) {
if (block->lf_block != lock)
continue;
block->lf_block = block->lf_block->lf_block;
break;
}
/*
* If we did not find ourselves on the list, but
* are still linked onto a lock list, then something
* is very wrong.
*/
if (block == NOLOCKF && lock->lf_next != NOLOCKF)
panic("lf_setlock: lost lock");
if (lock->lf_next)
TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock,
lf_block);
free(lock, M_LOCKF);
return (error);
}
@ -250,9 +246,12 @@ lf_setlock(lock)
overlap->lf_type == F_WRLCK) {
lf_wakelock(overlap);
} else {
ltmp = lock->lf_block;
lock->lf_block = overlap->lf_block;
lf_addblock(lock, ltmp);
while (ltmp = overlap->lf_blkhd.tqh_first) {
TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
lf_block);
TAILQ_INSERT_TAIL(&lock->lf_blkhd,
ltmp, lf_block);
}
}
/*
* Add the new lock if necessary and delete the overlap.
@ -547,34 +546,6 @@ lf_findoverlap(lf, lock, type, prev, overlap)
return (0);
}
/*
* Add a lock to the end of the blocked list.
*/
void
lf_addblock(lock, blocked)
struct lockf *lock;
struct lockf *blocked;
{
register struct lockf *lf;
if (blocked == NOLOCKF)
return;
#ifdef LOCKF_DEBUG
if (lockf_debug & 2) {
lf_print("addblock: adding", blocked);
lf_print("to blocked list of", lock);
}
#endif /* LOCKF_DEBUG */
if ((lf = lock->lf_block) == NOLOCKF) {
lock->lf_block = blocked;
return;
}
while (lf->lf_block != NOLOCKF)
lf = lf->lf_block;
lf->lf_block = blocked;
return;
}
/*
* Split a lock and a contained region into
* two or three locks as necessary.
@ -613,7 +584,7 @@ lf_split(lock1, lock2)
MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
splitlock->lf_start = lock2->lf_end + 1;
splitlock->lf_block = NOLOCKF;
TAILQ_INIT(&splitlock->lf_blkhd);
lock1->lf_end = lock2->lf_start - 1;
/*
* OK, now link it in
@ -630,14 +601,10 @@ void
lf_wakelock(listhead)
struct lockf *listhead;
{
register struct lockf *blocklist, *wakelock;
register struct lockf *wakelock;
blocklist = listhead->lf_block;
listhead->lf_block = NOLOCKF;
while (blocklist != NOLOCKF) {
wakelock = blocklist;
blocklist = blocklist->lf_block;
wakelock->lf_block = NOLOCKF;
while (wakelock = listhead->lf_blkhd.tqh_first) {
TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
wakelock->lf_next = NOLOCKF;
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
@ -651,7 +618,6 @@ lf_wakelock(listhead)
/*
* Print out a lock.
*/
void
lf_print(tag, lock)
char *tag;
register struct lockf *lock;
@ -670,18 +636,17 @@ lf_print(tag, lock)
lock->lf_type == F_WRLCK ? "exclusive" :
lock->lf_type == F_UNLCK ? "unlock" :
"unknown", lock->lf_start, lock->lf_end);
if (lock->lf_block)
printf(" block 0x%x\n", lock->lf_block);
if (lock->lf_blkhd.tqh_first)
printf(" block 0x%x\n", lock->lf_blkhd.tqh_first);
else
printf("\n");
}
void
lf_printlist(tag, lock)
char *tag;
struct lockf *lock;
{
register struct lockf *lf;
register struct lockf *lf, *blk;
printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
tag, lock->lf_inode->i_number,
@ -698,9 +663,22 @@ lf_printlist(tag, lock)
lf->lf_type == F_WRLCK ? "exclusive" :
lf->lf_type == F_UNLCK ? "unlock" :
"unknown", lf->lf_start, lf->lf_end);
if (lf->lf_block)
printf(" block 0x%x\n", lf->lf_block);
for (blk = lf->lf_blkhd.tqh_first; blk;
blk = blk->lf_block.tqe_next) {
printf("\n\t\tlock request 0x%lx for ", blk);
if (blk->lf_flags & F_POSIX)
printf("proc %d",
((struct proc *)(blk->lf_id))->p_pid);
else
printf("id 0x%x", blk->lf_id);
printf(", %s, start %d, end %d",
blk->lf_type == F_RDLCK ? "shared" :
blk->lf_type == F_WRLCK ? "exclusive" :
blk->lf_type == F_UNLCK ? "unlock" :
"unknown", blk->lf_start, blk->lf_end);
if (blk->lf_blkhd.tqh_first)
panic("lf_printlist: bad list");
}
printf("\n");
}
}

View File

@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_lookup.c 8.6 (Berkeley) 4/1/94
* @(#)ufs_lookup.c 8.15 (Berkeley) 6/16/95
*/
#include <sys/param.h>
@ -126,6 +126,7 @@ ufs_lookup(ap)
struct ucred *cred = cnp->cn_cred;
int flags = cnp->cn_flags;
int nameiop = cnp->cn_nameiop;
struct proc *p = cnp->cn_proc;
bp = NULL;
slotoffset = -1;
@ -142,6 +143,9 @@ ufs_lookup(ap)
return (ENOTDIR);
if (error = VOP_ACCESS(vdp, VEXEC, cred, cnp->cn_proc))
return (error);
if ((flags & ISLASTCN) && (vdp->v_mount->mnt_flag & MNT_RDONLY) &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
return (EROFS);
/*
* We now have a segment name to search for, and a directory to search.
@ -168,14 +172,14 @@ ufs_lookup(ap)
VREF(vdp);
error = 0;
} else if (flags & ISDOTDOT) {
VOP_UNLOCK(pdp);
error = vget(vdp, 1);
VOP_UNLOCK(pdp, 0, p);
error = vget(vdp, LK_EXCLUSIVE, p);
if (!error && lockparent && (flags & ISLASTCN))
error = VOP_LOCK(pdp);
error = vn_lock(pdp, LK_EXCLUSIVE, p);
} else {
error = vget(vdp, 1);
error = vget(vdp, LK_EXCLUSIVE, p);
if (!lockparent || error || !(flags & ISLASTCN))
VOP_UNLOCK(pdp);
VOP_UNLOCK(pdp, 0, p);
}
/*
* Check that the capability number did not change
@ -186,9 +190,9 @@ ufs_lookup(ap)
return (0);
vput(vdp);
if (lockparent && pdp != vdp && (flags & ISLASTCN))
VOP_UNLOCK(pdp);
VOP_UNLOCK(pdp, 0, p);
}
if (error = VOP_LOCK(pdp))
if (error = vn_lock(pdp, LK_EXCLUSIVE, p))
return (error);
vdp = pdp;
dp = VTOI(pdp);
@ -329,6 +333,17 @@ ufs_lookup(ap)
* reclen in ndp->ni_ufs area, and release
* directory buffer.
*/
if (vdp->v_mount->mnt_maxsymlinklen > 0 &&
ep->d_type == DT_WHT) {
slotstatus = FOUND;
slotoffset = dp->i_offset;
slotsize = ep->d_reclen;
dp->i_reclen = slotsize;
enduseful = dp->i_size;
ap->a_cnp->cn_flags |= ISWHITEOUT;
numdirpasses--;
goto notfound;
}
dp->i_ino = ep->d_ino;
dp->i_reclen = ep->d_reclen;
brelse(bp);
@ -341,7 +356,7 @@ ufs_lookup(ap)
if (ep->d_ino)
enduseful = dp->i_offset;
}
/* notfound: */
notfound:
/*
* If we started in the middle of the directory and failed
* to find our target, we must check the beginning as well.
@ -359,7 +374,10 @@ ufs_lookup(ap)
* directory has not been removed, then can consider
* allowing file to be created.
*/
if ((nameiop == CREATE || nameiop == RENAME) &&
if ((nameiop == CREATE || nameiop == RENAME ||
(nameiop == DELETE &&
(ap->a_cnp->cn_flags & DOWHITEOUT) &&
(ap->a_cnp->cn_flags & ISWHITEOUT))) &&
(flags & ISLASTCN) && dp->i_nlink != 0) {
/*
* Access for write is interpreted as allowing
@ -380,6 +398,12 @@ ufs_lookup(ap)
dp->i_offset = roundup(dp->i_size, DIRBLKSIZ);
dp->i_count = 0;
enduseful = dp->i_offset;
} else if (nameiop == DELETE) {
dp->i_offset = slotoffset;
if ((dp->i_offset & (DIRBLKSIZ - 1)) == 0)
dp->i_count = 0;
else
dp->i_count = dp->i_offset - prevoff;
} else {
dp->i_offset = slotoffset;
dp->i_count = slotsize;
@ -403,7 +427,7 @@ ufs_lookup(ap)
*/
cnp->cn_flags |= SAVENAME;
if (!lockparent)
VOP_UNLOCK(vdp);
VOP_UNLOCK(vdp, 0, p);
return (EJUSTRETURN);
}
/*
@ -473,13 +497,14 @@ ufs_lookup(ap)
if ((dp->i_mode & ISVTX) &&
cred->cr_uid != 0 &&
cred->cr_uid != dp->i_uid &&
tdp->v_type != VLNK &&
VTOI(tdp)->i_uid != cred->cr_uid) {
vput(tdp);
return (EPERM);
}
*vpp = tdp;
if (!lockparent)
VOP_UNLOCK(vdp);
VOP_UNLOCK(vdp, 0, p);
return (0);
}
@ -489,8 +514,7 @@ ufs_lookup(ap)
* Must get inode of directory entry to verify it's a
* regular file, or empty directory.
*/
if (nameiop == RENAME && wantparent &&
(flags & ISLASTCN)) {
if (nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
if (error = VOP_ACCESS(vdp, VWRITE, cred, cnp->cn_proc))
return (error);
/*
@ -504,7 +528,7 @@ ufs_lookup(ap)
*vpp = tdp;
cnp->cn_flags |= SAVENAME;
if (!lockparent)
VOP_UNLOCK(vdp);
VOP_UNLOCK(vdp, 0, p);
return (0);
}
@ -529,13 +553,13 @@ ufs_lookup(ap)
*/
pdp = vdp;
if (flags & ISDOTDOT) {
VOP_UNLOCK(pdp); /* race to get the inode */
VOP_UNLOCK(pdp, 0, p); /* race to get the inode */
if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) {
VOP_LOCK(pdp);
vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY, p);
return (error);
}
if (lockparent && (flags & ISLASTCN) &&
(error = VOP_LOCK(pdp))) {
(error = vn_lock(pdp, LK_EXCLUSIVE, p))) {
vput(tdp);
return (error);
}
@ -547,7 +571,7 @@ ufs_lookup(ap)
if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp))
return (error);
if (!lockparent || !(flags & ISLASTCN))
VOP_UNLOCK(pdp);
VOP_UNLOCK(pdp, 0, p);
*vpp = tdp;
}
@ -606,6 +630,8 @@ ufs_dirbadentry(dp, ep, entryoffsetinblock)
printf("First bad\n");
goto bad;
}
if (ep->d_ino == 0)
return (0);
for (i = 0; i < namlen; i++)
if (ep->d_name[i] == '\0') {
/*return (1); */
@ -614,9 +640,9 @@ ufs_dirbadentry(dp, ep, entryoffsetinblock)
}
if (ep->d_name[i])
goto bad;
return (ep->d_name[i]);
return (0);
bad:
return(1);
return (1);
}
/*
@ -633,15 +659,8 @@ ufs_direnter(ip, dvp, cnp)
struct vnode *dvp;
register struct componentname *cnp;
{
register struct direct *ep, *nep;
register struct inode *dp;
struct buf *bp;
struct direct newdir;
struct iovec aiov;
struct uio auio;
u_int dsize;
int error, loc, newentrysize, spacefree;
char *dirbuf;
#ifdef DIAGNOSTIC
if ((cnp->cn_flags & SAVENAME) == 0)
@ -661,7 +680,32 @@ ufs_direnter(ip, dvp, cnp)
newdir.d_type = tmp; }
# endif
}
newentrysize = DIRSIZ(FSFMT(dvp), &newdir);
return (ufs_direnter2(dvp, &newdir, cnp->cn_cred, cnp->cn_proc));
}
/*
* Common entry point for directory entry removal used by ufs_direnter
* and ufs_whiteout
*/
ufs_direnter2(dvp, dirp, cr, p)
struct vnode *dvp;
struct direct *dirp;
struct ucred *cr;
struct proc *p;
{
int newentrysize;
struct inode *dp;
struct buf *bp;
struct iovec aiov;
struct uio auio;
u_int dsize;
struct direct *ep, *nep;
int error, loc, spacefree;
char *dirbuf;
dp = VTOI(dvp);
newentrysize = DIRSIZ(FSFMT(dvp), dirp);
if (dp->i_count == 0) {
/*
* If dp->i_count is 0, then namei could find no
@ -670,22 +714,22 @@ ufs_direnter(ip, dvp, cnp)
* new entry into a fresh block.
*/
if (dp->i_offset & (DIRBLKSIZ - 1))
panic("ufs_direnter: newblk");
panic("ufs_direnter2: newblk");
auio.uio_offset = dp->i_offset;
newdir.d_reclen = DIRBLKSIZ;
dirp->d_reclen = DIRBLKSIZ;
auio.uio_resid = newentrysize;
aiov.iov_len = newentrysize;
aiov.iov_base = (caddr_t)&newdir;
aiov.iov_base = (caddr_t)dirp;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_procp = (struct proc *)0;
error = VOP_WRITE(dvp, &auio, IO_SYNC, cnp->cn_cred);
error = VOP_WRITE(dvp, &auio, IO_SYNC, cr);
if (DIRBLKSIZ >
VFSTOUFS(dvp->v_mount)->um_mountp->mnt_stat.f_bsize)
/* XXX should grow with balloc() */
panic("ufs_direnter: frag size");
panic("ufs_direnter2: frag size");
else if (!error) {
dp->i_size = roundup(dp->i_size, DIRBLKSIZ);
dp->i_flag |= IN_CHANGE;
@ -745,23 +789,24 @@ ufs_direnter(ip, dvp, cnp)
* Update the pointer fields in the previous entry (if any),
* copy in the new entry, and write out the block.
*/
if (ep->d_ino == 0) {
if (ep->d_ino == 0 ||
(ep->d_ino == WINO &&
bcmp(ep->d_name, dirp->d_name, dirp->d_namlen) == 0)) {
if (spacefree + dsize < newentrysize)
panic("ufs_direnter: compact1");
newdir.d_reclen = spacefree + dsize;
panic("ufs_direnter2: compact1");
dirp->d_reclen = spacefree + dsize;
} else {
if (spacefree < newentrysize)
panic("ufs_direnter: compact2");
newdir.d_reclen = spacefree;
panic("ufs_direnter2: compact2");
dirp->d_reclen = spacefree;
ep->d_reclen = dsize;
ep = (struct direct *)((char *)ep + dsize);
}
bcopy((caddr_t)&newdir, (caddr_t)ep, (u_int)newentrysize);
bcopy((caddr_t)dirp, (caddr_t)ep, (u_int)newentrysize);
error = VOP_BWRITE(bp);
dp->i_flag |= IN_CHANGE | IN_UPDATE;
if (!error && dp->i_endoff && dp->i_endoff < dp->i_size)
error = VOP_TRUNCATE(dvp, (off_t)dp->i_endoff, IO_SYNC,
cnp->cn_cred, cnp->cn_proc);
error = VOP_TRUNCATE(dvp, (off_t)dp->i_endoff, IO_SYNC, cr, p);
return (error);
}
@ -788,6 +833,21 @@ ufs_dirremove(dvp, cnp)
int error;
dp = VTOI(dvp);
if (cnp->cn_flags & DOWHITEOUT) {
/*
* Whiteout entry: set d_ino to WINO.
*/
if (error =
VOP_BLKATOFF(dvp, (off_t)dp->i_offset, (char **)&ep, &bp))
return (error);
ep->d_ino = WINO;
ep->d_type = DT_WHT;
error = VOP_BWRITE(bp);
dp->i_flag |= IN_CHANGE | IN_UPDATE;
return (error);
}
if (dp->i_count == 0) {
/*
* First entry in block: set d_ino to zero.
@ -871,7 +931,7 @@ ufs_dirempty(ip, parentino, cred)
if (dp->d_reclen == 0)
return (0);
/* skip empty entries */
if (dp->d_ino == 0)
if (dp->d_ino == 0 || dp->d_ino == WINO)
continue;
/* accept only "." and ".." */
# if (BYTE_ORDER == LITTLE_ENDIAN)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1982, 1986, 1990, 1993
* Copyright (c) 1982, 1986, 1990, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_quota.c 8.2 (Berkeley) 12/30/93
* @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
*/
#include <sys/param.h>
#include <sys/kernel.h>
@ -361,8 +361,8 @@ quotaon(p, mp, type, fname)
register int type;
caddr_t fname;
{
register struct ufsmount *ump = VFSTOUFS(mp);
register struct vnode *vp, **vpp;
struct ufsmount *ump = VFSTOUFS(mp);
struct vnode *vp, **vpp;
struct vnode *nextvp;
struct dquot *dq;
int error;
@ -373,15 +373,11 @@ quotaon(p, mp, type, fname)
if (error = vn_open(&nd, FREAD|FWRITE, 0))
return (error);
vp = nd.ni_vp;
VOP_UNLOCK(vp);
VOP_UNLOCK(vp, 0, p);
if (vp->v_type != VREG) {
(void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
return (EACCES);
}
if (vfs_busy(mp)) {
(void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p);
return (EBUSY);
}
if (*vpp != vp)
quotaoff(p, mp, type);
ump->um_qflags[type] |= QTF_OPENING;
@ -413,7 +409,7 @@ quotaon(p, mp, type, fname)
nextvp = vp->v_mntvnodes.le_next;
if (vp->v_writecount == 0)
continue;
if (vget(vp, 1))
if (vget(vp, LK_EXCLUSIVE, p))
goto again;
if (error = getinoquota(VTOI(vp))) {
vput(vp);
@ -426,7 +422,6 @@ quotaon(p, mp, type, fname)
ump->um_qflags[type] &= ~QTF_OPENING;
if (error)
quotaoff(p, mp, type);
vfs_unbusy(mp);
return (error);
}
@ -439,15 +434,13 @@ quotaoff(p, mp, type)
struct mount *mp;
register int type;
{
register struct vnode *vp;
struct vnode *vp;
struct vnode *qvp, *nextvp;
struct ufsmount *ump = VFSTOUFS(mp);
register struct dquot *dq;
register struct inode *ip;
struct dquot *dq;
struct inode *ip;
int error;
if ((mp->mnt_flag & MNT_MPBUSY) == 0)
panic("quotaoff: not busy");
if ((qvp = ump->um_quotas[type]) == NULLVP)
return (0);
ump->um_qflags[type] |= QTF_CLOSING;
@ -458,7 +451,7 @@ quotaoff(p, mp, type)
again:
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) {
nextvp = vp->v_mntvnodes.le_next;
if (vget(vp, 1))
if (vget(vp, LK_EXCLUSIVE, p))
goto again;
ip = VTOI(vp);
dq = ip->i_dquot[type];
@ -616,16 +609,15 @@ qsync(mp)
struct mount *mp;
{
struct ufsmount *ump = VFSTOUFS(mp);
register struct vnode *vp, *nextvp;
register struct dquot *dq;
register int i;
struct proc *p = curproc; /* XXX */
struct vnode *vp, *nextvp;
struct dquot *dq;
int i, error;
/*
* Check if the mount point has any quotas.
* If not, simply return.
*/
if ((mp->mnt_flag & MNT_MPBUSY) == 0)
panic("qsync: not busy");
for (i = 0; i < MAXQUOTAS; i++)
if (ump->um_quotas[i] != NULLVP)
break;
@ -635,36 +627,48 @@ qsync(mp)
* Search vnodes associated with this mount point,
* synchronizing any modified dquot structures.
*/
simple_lock(&mntvnode_slock);
again:
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) {
nextvp = vp->v_mntvnodes.le_next;
if (VOP_ISLOCKED(vp))
continue;
if (vget(vp, 1))
if (vp->v_mount != mp)
goto again;
nextvp = vp->v_mntvnodes.le_next;
simple_lock(&vp->v_interlock);
simple_unlock(&mntvnode_slock);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
simple_lock(&mntvnode_slock);
if (error == ENOENT)
goto again;
continue;
}
for (i = 0; i < MAXQUOTAS; i++) {
dq = VTOI(vp)->i_dquot[i];
if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
dqsync(vp, dq);
}
vput(vp);
if (vp->v_mntvnodes.le_next != nextvp || vp->v_mount != mp)
simple_lock(&mntvnode_slock);
if (vp->v_mntvnodes.le_next != nextvp)
goto again;
}
simple_unlock(&mntvnode_slock);
return (0);
}
/*
* Code pertaining to management of the in-core dquot data structures.
*/
struct dquot **dqhashtbl;
#define DQHASH(dqvp, id) \
(&dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash])
LIST_HEAD(dqhash, dquot) *dqhashtbl;
u_long dqhash;
/*
* Dquot free list.
*/
#define DQUOTINC 5 /* minimum free dquots desired */
struct dquot *dqfreel, **dqback = &dqfreel;
TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
long numdquot, desireddquot = DQUOTINC;
/*
@ -675,6 +679,7 @@ dqinit()
{
dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
TAILQ_INIT(&dqfreelist);
}
/*
@ -689,8 +694,10 @@ dqget(vp, id, ump, type, dqp)
register int type;
struct dquot **dqp;
{
register struct dquot *dq, *dp, **dpp;
register struct vnode *dqvp;
struct proc *p = curproc; /* XXX */
struct dquot *dq;
struct dqhash *dqh;
struct vnode *dqvp;
struct iovec aiov;
struct uio auio;
int error;
@ -703,8 +710,8 @@ dqget(vp, id, ump, type, dqp)
/*
* Check the cache first.
*/
dpp = &dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash];
for (dq = *dpp; dq; dq = dq->dq_forw) {
dqh = DQHASH(dqvp, id);
for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) {
if (dq->dq_id != id ||
dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
continue;
@ -712,13 +719,8 @@ dqget(vp, id, ump, type, dqp)
* Cache hit with no references. Take
* the structure off the free list.
*/
if (dq->dq_cnt == 0) {
if ((dp = dq->dq_freef) != NODQUOT)
dp->dq_freeb = dq->dq_freeb;
else
dqback = dq->dq_freeb;
*dq->dq_freeb = dp;
}
if (dq->dq_cnt == 0)
TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
DQREF(dq);
*dqp = dq;
return (0);
@ -726,41 +728,30 @@ dqget(vp, id, ump, type, dqp)
/*
* Not in cache, allocate a new one.
*/
if (dqfreel == NODQUOT && numdquot < MAXQUOTAS * desiredvnodes)
if (dqfreelist.tqh_first == NODQUOT &&
numdquot < MAXQUOTAS * desiredvnodes)
desireddquot += DQUOTINC;
if (numdquot < desireddquot) {
dq = (struct dquot *)malloc(sizeof *dq, M_DQUOT, M_WAITOK);
bzero((char *)dq, sizeof *dq);
numdquot++;
} else {
if ((dq = dqfreel) == NULL) {
if ((dq = dqfreelist.tqh_first) == NULL) {
tablefull("dquot");
*dqp = NODQUOT;
return (EUSERS);
}
if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
panic("free dquot isn't");
if ((dp = dq->dq_freef) != NODQUOT)
dp->dq_freeb = &dqfreel;
else
dqback = &dqfreel;
dqfreel = dp;
dq->dq_freef = NULL;
dq->dq_freeb = NULL;
if (dp = dq->dq_forw)
dp->dq_back = dq->dq_back;
*dq->dq_back = dp;
TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
LIST_REMOVE(dq, dq_hash);
}
/*
* Initialize the contents of the dquot structure.
*/
if (vp != dqvp)
VOP_LOCK(dqvp);
if (dp = *dpp)
dp->dq_back = &dq->dq_forw;
dq->dq_forw = dp;
dq->dq_back = dpp;
*dpp = dq;
vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, p);
LIST_INSERT_HEAD(dqh, dq, dq_hash);
DQREF(dq);
dq->dq_flags = DQ_LOCK;
dq->dq_id = id;
@ -779,7 +770,7 @@ dqget(vp, id, ump, type, dqp)
if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
bzero((caddr_t)&dq->dq_dqb, sizeof(struct dqblk));
if (vp != dqvp)
VOP_UNLOCK(dqvp);
VOP_UNLOCK(dqvp, 0, p);
if (dq->dq_flags & DQ_WANT)
wakeup((caddr_t)dq);
dq->dq_flags = 0;
@ -788,11 +779,7 @@ dqget(vp, id, ump, type, dqp)
* quota structure and reflect problem to caller.
*/
if (error) {
if (dp = dq->dq_forw)
dp->dq_back = dq->dq_back;
*dq->dq_back = dp;
dq->dq_forw = NULL;
dq->dq_back = NULL;
LIST_REMOVE(dq, dq_hash);
dqrele(vp, dq);
*dqp = NODQUOT;
return (error);
@ -844,15 +831,7 @@ dqrele(vp, dq)
(void) dqsync(vp, dq);
if (--dq->dq_cnt > 0)
return;
if (dqfreel != NODQUOT) {
*dqback = dq;
dq->dq_freeb = dqback;
} else {
dqfreel = dq;
dq->dq_freeb = &dqfreel;
}
dq->dq_freef = NODQUOT;
dqback = &dq->dq_freef;
TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
}
/*
@ -861,8 +840,9 @@ dqrele(vp, dq)
int
dqsync(vp, dq)
struct vnode *vp;
register struct dquot *dq;
struct dquot *dq;
{
struct proc *p = curproc; /* XXX */
struct vnode *dqvp;
struct iovec aiov;
struct uio auio;
@ -875,13 +855,13 @@ dqsync(vp, dq)
if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
panic("dqsync: file");
if (vp != dqvp)
VOP_LOCK(dqvp);
vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, p);
while (dq->dq_flags & DQ_LOCK) {
dq->dq_flags |= DQ_WANT;
sleep((caddr_t)dq, PINOD+2);
if ((dq->dq_flags & DQ_MOD) == 0) {
if (vp != dqvp)
VOP_UNLOCK(dqvp);
VOP_UNLOCK(dqvp, 0, p);
return (0);
}
}
@ -902,7 +882,7 @@ dqsync(vp, dq)
wakeup((caddr_t)dq);
dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT);
if (vp != dqvp)
VOP_UNLOCK(dqvp);
VOP_UNLOCK(dqvp, 0, p);
return (error);
}
@ -913,25 +893,22 @@ void
dqflush(vp)
register struct vnode *vp;
{
register struct dquot *dq, *dp, **dpp, *nextdq;
register struct dquot *dq, *nextdq;
struct dqhash *dqh;
/*
* Move all dquot's that used to refer to this quota
* file off their hash chains (they will eventually
* fall off the head of the free list and be re-used).
*/
for (dpp = &dqhashtbl[dqhash]; dpp >= dqhashtbl; dpp--) {
for (dq = *dpp; dq; dq = nextdq) {
nextdq = dq->dq_forw;
for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
for (dq = dqh->lh_first; dq; dq = nextdq) {
nextdq = dq->dq_hash.le_next;
if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
continue;
if (dq->dq_cnt)
panic("dqflush: stray dquot");
if (dp = dq->dq_forw)
dp->dq_back = dq->dq_back;
*dq->dq_back = dp;
dq->dq_forw = NULL;
dq->dq_back = NULL;
LIST_REMOVE(dq, dq_hash);
dq->dq_ump = (struct ufsmount *)0;
}
}

View File

@ -30,11 +30,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.7 (Berkeley) 1/21/94
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
*/
#ifdef LFS_READWRITE
#define BLKSIZE(a, b, c) blksize(a)
#define BLKSIZE(a, b, c) blksize(a, b, c)
#define FS struct lfs
#define I_FS i_lfs
#define READ lfs_read
@ -70,7 +70,7 @@ READ(ap)
register struct uio *uio;
register FS *fs;
struct buf *bp;
daddr_t lbn, nextlbn;
ufs_daddr_t lbn, nextlbn;
off_t bytesinfile;
long size, xfersize, blkoffset;
int error;
@ -92,7 +92,7 @@ READ(ap)
panic("%s: type %d", READ_S, vp->v_type);
#endif
fs = ip->I_FS;
if ((u_quad_t)uio->uio_offset > fs->fs_maxfilesize)
if ((u_int64_t)uio->uio_offset > fs->fs_maxfilesize)
return (EFBIG);
for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
@ -112,7 +112,7 @@ READ(ap)
(void)lfs_check(vp, lbn);
error = cluster_read(vp, ip->i_size, lbn, size, NOCRED, &bp);
#else
if (lblktosize(fs, nextlbn) > ip->i_size)
if (lblktosize(fs, nextlbn) >= ip->i_size)
error = bread(vp, lbn, size, NOCRED, &bp);
else if (doclusterread)
error = cluster_read(vp,
@ -173,7 +173,7 @@ WRITE(ap)
register FS *fs;
struct buf *bp;
struct proc *p;
daddr_t lbn;
ufs_daddr_t lbn;
off_t osize;
int blkoffset, error, flags, ioflag, resid, size, xfersize;
@ -206,7 +206,7 @@ WRITE(ap)
fs = ip->I_FS;
if (uio->uio_offset < 0 ||
(u_quad_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
(u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
return (EFBIG);
/*
* Maybe this should be above the vnode op call, but so long as
@ -232,7 +232,7 @@ WRITE(ap)
xfersize = uio->uio_resid;
#ifdef LFS_READWRITE
(void)lfs_check(vp, lbn);
error = lfs_balloc(vp, xfersize, lbn, &bp);
error = lfs_balloc(vp, blkoffset, xfersize, lbn, &bp);
#else
if (fs->fs_bsize > xfersize)
flags |= B_CLRBUF;

View File

@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_vfsops.c 8.4 (Berkeley) 4/16/94
* @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
*/
#include <sys/param.h>
@ -53,11 +53,6 @@
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
/*
* Flag to permit forcible unmounting.
*/
int doforce = 1;
/*
* Make a filesystem operational.
* Nothing to do at the moment.
@ -111,8 +106,9 @@ ufs_quotactl(mp, cmds, uid, arg, p)
cmd = cmds >> SUBCMDSHIFT;
switch (cmd) {
case Q_GETQUOTA:
case Q_SYNC:
break;
case Q_GETQUOTA:
if (uid == p->p_cred->p_ruid)
break;
/* fall through */
@ -121,45 +117,66 @@ ufs_quotactl(mp, cmds, uid, arg, p)
return (error);
}
type = cmd & SUBCMDMASK;
type = cmds & SUBCMDMASK;
if ((u_int)type >= MAXQUOTAS)
return (EINVAL);
if (vfs_busy(mp, LK_NOWAIT, 0, p))
return (0);
switch (cmd) {
case Q_QUOTAON:
return (quotaon(p, mp, type, arg));
error = quotaon(p, mp, type, arg);
break;
case Q_QUOTAOFF:
if (vfs_busy(mp))
return (0);
error = quotaoff(p, mp, type);
vfs_unbusy(mp);
return (error);
break;
case Q_SETQUOTA:
return (setquota(mp, uid, type, arg));
error = setquota(mp, uid, type, arg);
break;
case Q_SETUSE:
return (setuse(mp, uid, type, arg));
error = setuse(mp, uid, type, arg);
break;
case Q_GETQUOTA:
return (getquota(mp, uid, type, arg));
error = getquota(mp, uid, type, arg);
break;
case Q_SYNC:
if (vfs_busy(mp))
return (0);
error = qsync(mp);
vfs_unbusy(mp);
return (error);
break;
default:
return (EINVAL);
error = EINVAL;
break;
}
/* NOTREACHED */
vfs_unbusy(mp, p);
return (error);
#endif
}
/*
* Initial UFS filesystems, done only once.
*/
int
ufs_init(vfsp)
struct vfsconf *vfsp;
{
static int done;
if (done)
return (0);
done = 1;
ufs_ihashinit();
#ifdef QUOTA
dqinit();
#endif
return (0);
}
/*
* This is the generic part of fhtovp called after the underlying
* filesystem has validated the file handle.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1982, 1986, 1989, 1993
* Copyright (c) 1982, 1986, 1989, 1993, 1995
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
@ -35,7 +35,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_vnops.c 8.10 (Berkeley) 4/1/94
* @(#)ufs_vnops.c 8.27 (Berkeley) 5/27/95
*/
#include <sys/param.h>
@ -69,8 +69,8 @@ static int ufs_chown
__P((struct vnode *, uid_t, gid_t, struct ucred *, struct proc *));
union _qcvt {
quad_t qcvt;
long val[2];
int64_t qcvt;
int32_t val[2];
};
#define SETHIGH(q, h) { \
union _qcvt tmp; \
@ -119,9 +119,9 @@ ufs_mknod(ap)
struct vattr *a_vap;
} */ *ap;
{
register struct vattr *vap = ap->a_vap;
register struct vnode **vpp = ap->a_vpp;
register struct inode *ip;
struct vattr *vap = ap->a_vap;
struct vnode **vpp = ap->a_vpp;
struct inode *ip;
int error;
if (error =
@ -192,8 +192,10 @@ ufs_close(ap)
register struct vnode *vp = ap->a_vp;
register struct inode *ip = VTOI(vp);
if (vp->v_usecount > 1 && !(ip->i_flag & IN_LOCKED))
simple_lock(&vp->v_interlock);
if (vp->v_usecount > 1)
ITIMES(ip, &time, &time);
simple_unlock(&vp->v_interlock);
return (0);
}
@ -206,30 +208,32 @@ ufs_access(ap)
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct inode *ip = VTOI(vp);
register struct ucred *cred = ap->a_cred;
struct vnode *vp = ap->a_vp;
struct inode *ip = VTOI(vp);
struct ucred *cred = ap->a_cred;
mode_t mask, mode = ap->a_mode;
register gid_t *gp;
int i, error;
#ifdef DIAGNOSTIC
if (!VOP_ISLOCKED(vp)) {
vprint("ufs_access: not locked", vp);
panic("ufs_access: not locked");
}
#endif
#ifdef QUOTA
if (mode & VWRITE)
/*
* Disallow write attempts on read-only file systems;
* unless the file is a socket, fifo, or a block or
* character device resident on the file system.
*/
if (mode & VWRITE) {
switch (vp->v_type) {
case VDIR:
case VLNK:
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
#ifdef QUOTA
if (error = getinoquota(ip))
return (error);
#endif
break;
}
#endif
}
/* If immutable bit set, nobody gets to write it. */
if ((mode & VWRITE) && (ip->i_flags & IMMUTABLE))
@ -300,9 +304,12 @@ ufs_getattr(ap)
vap->va_gid = ip->i_gid;
vap->va_rdev = (dev_t)ip->i_rdev;
vap->va_size = ip->i_din.di_size;
vap->va_atime = ip->i_atime;
vap->va_mtime = ip->i_mtime;
vap->va_ctime = ip->i_ctime;
vap->va_atime.ts_sec = ip->i_atime;
vap->va_atime.ts_nsec = ip->i_atimensec;
vap->va_mtime.ts_sec = ip->i_mtime;
vap->va_mtime.ts_nsec = ip->i_mtimensec;
vap->va_ctime.ts_sec = ip->i_ctime;
vap->va_ctime.ts_nsec = ip->i_ctimensec;
vap->va_flags = ip->i_flags;
vap->va_gen = ip->i_gen;
/* this doesn't belong here */
@ -312,7 +319,7 @@ ufs_getattr(ap)
vap->va_blocksize = MAXBSIZE;
else
vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
vap->va_bytes = dbtob(ip->i_blocks);
vap->va_bytes = dbtob((u_quad_t)ip->i_blocks);
vap->va_type = vp->v_type;
vap->va_filerev = ip->i_modrev;
return (0);
@ -330,11 +337,11 @@ ufs_setattr(ap)
struct proc *a_p;
} */ *ap;
{
register struct vattr *vap = ap->a_vap;
register struct vnode *vp = ap->a_vp;
register struct inode *ip = VTOI(vp);
register struct ucred *cred = ap->a_cred;
register struct proc *p = ap->a_p;
struct vattr *vap = ap->a_vap;
struct vnode *vp = ap->a_vp;
struct inode *ip = VTOI(vp);
struct ucred *cred = ap->a_cred;
struct proc *p = ap->a_p;
struct timeval atimeval, mtimeval;
int error;
@ -348,6 +355,8 @@ ufs_setattr(ap)
return (EINVAL);
}
if (vap->va_flags != VNOVAL) {
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
if (cred->cr_uid != ip->i_uid &&
(error = suser(cred, &p->p_acflag)))
return (error);
@ -357,7 +366,8 @@ ufs_setattr(ap)
return (EPERM);
ip->i_flags = vap->va_flags;
} else {
if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND))
if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND) ||
(vap->va_flags & UF_SETTABLE) != vap->va_flags)
return (EPERM);
ip->i_flags &= SF_SETTABLE;
ip->i_flags |= (vap->va_flags & UF_SETTABLE);
@ -371,17 +381,34 @@ ufs_setattr(ap)
/*
* Go through the fields and update iff not VNOVAL.
*/
if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL)
if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
if (error = ufs_chown(vp, vap->va_uid, vap->va_gid, cred, p))
return (error);
}
if (vap->va_size != VNOVAL) {
if (vp->v_type == VDIR)
/*
* Disallow write attempts on read-only file systems;
* unless the file is a socket, fifo, or a block or
* character device resident on the file system.
*/
switch (vp->v_type) {
case VDIR:
return (EISDIR);
case VLNK:
case VREG:
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
break;
}
if (error = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p))
return (error);
}
ip = VTOI(vp);
if (vap->va_atime.ts_sec != VNOVAL || vap->va_mtime.ts_sec != VNOVAL) {
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
if (cred->cr_uid != ip->i_uid &&
(error = suser(cred, &p->p_acflag)) &&
((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
@ -399,8 +426,11 @@ ufs_setattr(ap)
return (error);
}
error = 0;
if (vap->va_mode != (mode_t)VNOVAL)
if (vap->va_mode != (mode_t)VNOVAL) {
if (vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
error = ufs_chmod(vp, (int)vap->va_mode, cred, p);
}
return (error);
}
@ -466,7 +496,7 @@ ufs_chown(vp, uid, gid, cred, p)
* the caller must be superuser or the call fails.
*/
if ((cred->cr_uid != ip->i_uid || uid != ip->i_uid ||
!groupmember((gid_t)gid, cred)) &&
(gid != ip->i_gid && !groupmember((gid_t)gid, cred))) &&
(error = suser(cred, &p->p_acflag)))
return (error);
ogid = ip->i_gid;
@ -622,9 +652,9 @@ ufs_remove(ap)
struct componentname *a_cnp;
} */ *ap;
{
register struct inode *ip;
register struct vnode *vp = ap->a_vp;
register struct vnode *dvp = ap->a_dvp;
struct inode *ip;
struct vnode *vp = ap->a_vp;
struct vnode *dvp = ap->a_dvp;
int error;
ip = VTOI(vp);
@ -657,10 +687,11 @@ ufs_link(ap)
struct componentname *a_cnp;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct vnode *tdvp = ap->a_tdvp;
register struct componentname *cnp = ap->a_cnp;
register struct inode *ip;
struct vnode *vp = ap->a_vp;
struct vnode *tdvp = ap->a_tdvp;
struct componentname *cnp = ap->a_cnp;
struct proc *p = cnp->cn_proc;
struct inode *ip;
struct timeval tv;
int error;
@ -668,196 +699,99 @@ ufs_link(ap)
if ((cnp->cn_flags & HASBUF) == 0)
panic("ufs_link: no name");
#endif
if (vp->v_mount != tdvp->v_mount) {
VOP_ABORTOP(vp, cnp);
if (tdvp->v_mount != vp->v_mount) {
VOP_ABORTOP(tdvp, cnp);
error = EXDEV;
goto out2;
}
if (vp != tdvp && (error = VOP_LOCK(tdvp))) {
VOP_ABORTOP(vp, cnp);
if (tdvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE, p))) {
VOP_ABORTOP(tdvp, cnp);
goto out2;
}
ip = VTOI(tdvp);
ip = VTOI(vp);
if ((nlink_t)ip->i_nlink >= LINK_MAX) {
VOP_ABORTOP(vp, cnp);
VOP_ABORTOP(tdvp, cnp);
error = EMLINK;
goto out1;
}
if (ip->i_flags & (IMMUTABLE | APPEND)) {
VOP_ABORTOP(vp, cnp);
VOP_ABORTOP(tdvp, cnp);
error = EPERM;
goto out1;
}
ip->i_nlink++;
ip->i_flag |= IN_CHANGE;
tv = time;
error = VOP_UPDATE(tdvp, &tv, &tv, 1);
error = VOP_UPDATE(vp, &tv, &tv, 1);
if (!error)
error = ufs_direnter(ip, vp, cnp);
error = ufs_direnter(ip, tdvp, cnp);
if (error) {
ip->i_nlink--;
ip->i_flag |= IN_CHANGE;
}
FREE(cnp->cn_pnbuf, M_NAMEI);
out1:
if (vp != tdvp)
VOP_UNLOCK(tdvp);
if (tdvp != vp)
VOP_UNLOCK(vp, 0, p);
out2:
vput(vp);
vput(tdvp);
return (error);
}
/*
* relookup - lookup a path name component
* Used by lookup to re-aquire things.
* whiteout vnode call
*/
int
relookup(dvp, vpp, cnp)
struct vnode *dvp, **vpp;
struct componentname *cnp;
ufs_whiteout(ap)
struct vop_whiteout_args /* {
struct vnode *a_dvp;
struct componentname *a_cnp;
int a_flags;
} */ *ap;
{
register struct vnode *dp = 0; /* the directory we are searching */
int docache; /* == 0 do not cache last component */
int wantparent; /* 1 => wantparent or lockparent flag */
int rdonly; /* lookup read-only flag bit */
int error = 0;
#ifdef NAMEI_DIAGNOSTIC
int newhash; /* DEBUG: check name hash */
char *cp; /* DEBUG: check name ptr/len */
#endif
struct vnode *dvp = ap->a_dvp;
struct componentname *cnp = ap->a_cnp;
struct direct newdir;
int error;
/*
* Setup: break out flag bits into variables.
*/
wantparent = cnp->cn_flags & (LOCKPARENT|WANTPARENT);
docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE;
if (cnp->cn_nameiop == DELETE ||
(wantparent && cnp->cn_nameiop != CREATE))
docache = 0;
rdonly = cnp->cn_flags & RDONLY;
cnp->cn_flags &= ~ISSYMLINK;
dp = dvp;
VOP_LOCK(dp);
/* dirloop: */
/*
* Search a new directory.
*
* The cn_hash value is for use by vfs_cache.
* The last component of the filename is left accessible via
* cnp->cn_nameptr for callers that need the name. Callers needing
* the name set the SAVENAME flag. When done, they assume
* responsibility for freeing the pathname buffer.
*/
#ifdef NAMEI_DIAGNOSTIC
for (newhash = 0, cp = cnp->cn_nameptr; *cp != 0 && *cp != '/'; cp++)
newhash += (unsigned char)*cp;
if (newhash != cnp->cn_hash)
panic("relookup: bad hash");
if (cnp->cn_namelen != cp - cnp->cn_nameptr)
panic ("relookup: bad len");
if (*cp != 0)
panic("relookup: not last component");
printf("{%s}: ", cnp->cn_nameptr);
#endif
/*
* Check for degenerate name (e.g. / or "")
* which is a way of talking about a directory,
* e.g. like "/." or ".".
*/
if (cnp->cn_nameptr[0] == '\0') {
if (cnp->cn_nameiop != LOOKUP || wantparent) {
error = EISDIR;
goto bad;
}
if (dp->v_type != VDIR) {
error = ENOTDIR;
goto bad;
}
if (!(cnp->cn_flags & LOCKLEAF))
VOP_UNLOCK(dp);
*vpp = dp;
if (cnp->cn_flags & SAVESTART)
panic("lookup: SAVESTART");
switch (ap->a_flags) {
case LOOKUP:
/* 4.4 format directories support whiteout operations */
if (dvp->v_mount->mnt_maxsymlinklen > 0)
return (0);
}
return (EOPNOTSUPP);
if (cnp->cn_flags & ISDOTDOT)
panic ("relookup: lookup on dot-dot");
/*
* We now have a segment name to search for, and a directory to search.
*/
if (error = VOP_LOOKUP(dp, vpp, cnp)) {
case CREATE:
/* create a new directory whiteout */
#ifdef DIAGNOSTIC
if (*vpp != NULL)
panic("leaf should be empty");
if ((cnp->cn_flags & SAVENAME) == 0)
panic("ufs_whiteout: missing name");
if (dvp->v_mount->mnt_maxsymlinklen <= 0)
panic("ufs_whiteout: old format filesystem");
#endif
if (error != EJUSTRETURN)
goto bad;
/*
* If creating and at end of pathname, then can consider
* allowing file to be created.
*/
if (rdonly || (dvp->v_mount->mnt_flag & MNT_RDONLY)) {
error = EROFS;
goto bad;
}
/* ASSERT(dvp == ndp->ni_startdir) */
if (cnp->cn_flags & SAVESTART)
VREF(dvp);
/*
* We return with ni_vp NULL to indicate that the entry
* doesn't currently exist, leaving a pointer to the
* (possibly locked) directory inode in ndp->ni_dvp.
*/
return (0);
}
dp = *vpp;
newdir.d_ino = WINO;
newdir.d_namlen = cnp->cn_namelen;
bcopy(cnp->cn_nameptr, newdir.d_name, (unsigned)cnp->cn_namelen + 1);
newdir.d_type = DT_WHT;
error = ufs_direnter2(dvp, &newdir, cnp->cn_cred, cnp->cn_proc);
break;
case DELETE:
/* remove an existing directory whiteout */
#ifdef DIAGNOSTIC
/*
* Check for symbolic link
*/
if (dp->v_type == VLNK && (cnp->cn_flags & FOLLOW))
panic ("relookup: symlink found.\n");
if (dvp->v_mount->mnt_maxsymlinklen <= 0)
panic("ufs_whiteout: old format filesystem");
#endif
/*
* Check for read-only file systems.
*/
if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) {
/*
* Disallow directory write attempts on read-only
* file systems.
*/
if (rdonly || (dp->v_mount->mnt_flag & MNT_RDONLY) ||
(wantparent &&
(dvp->v_mount->mnt_flag & MNT_RDONLY))) {
error = EROFS;
goto bad2;
cnp->cn_flags &= ~DOWHITEOUT;
error = ufs_dirremove(dvp, cnp);
break;
}
if (cnp->cn_flags & HASBUF) {
FREE(cnp->cn_pnbuf, M_NAMEI);
cnp->cn_flags &= ~HASBUF;
}
/* ASSERT(dvp == ndp->ni_startdir) */
if (cnp->cn_flags & SAVESTART)
VREF(dvp);
if (!wantparent)
vrele(dvp);
if ((cnp->cn_flags & LOCKLEAF) == 0)
VOP_UNLOCK(dp);
return (0);
bad2:
if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN))
VOP_UNLOCK(dvp);
vrele(dvp);
bad:
vput(dp);
*vpp = NULL;
return (error);
}
@ -900,10 +834,11 @@ ufs_rename(ap)
struct vnode *tvp = ap->a_tvp;
register struct vnode *tdvp = ap->a_tdvp;
struct vnode *fvp = ap->a_fvp;
register struct vnode *fdvp = ap->a_fdvp;
register struct componentname *tcnp = ap->a_tcnp;
register struct componentname *fcnp = ap->a_fcnp;
register struct inode *ip, *xp, *dp;
struct vnode *fdvp = ap->a_fdvp;
struct componentname *tcnp = ap->a_tcnp;
struct componentname *fcnp = ap->a_fcnp;
struct proc *p = fcnp->cn_proc;
struct inode *ip, *xp, *dp;
struct dirtemplate dirbuf;
struct timeval tv;
int doingdirectory = 0, oldparent = 0, newparent = 0;
@ -948,25 +883,29 @@ ufs_rename(ap)
error = EINVAL;
goto abortit;
}
VOP_ABORTOP(fdvp, fcnp);
vrele(fdvp);
vrele(fvp);
/* Release destination completely. */
VOP_ABORTOP(tdvp, tcnp);
vput(tdvp);
vput(tvp);
tcnp->cn_flags &= ~MODMASK;
tcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
if ((tcnp->cn_flags & SAVESTART) == 0)
/* Delete source. */
vrele(fdvp);
vrele(fvp);
fcnp->cn_flags &= ~MODMASK;
fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
if ((fcnp->cn_flags & SAVESTART) == 0)
panic("ufs_rename: lost from startdir");
tcnp->cn_nameiop = DELETE;
(void) relookup(tdvp, &tvp, tcnp);
return (VOP_REMOVE(tdvp, tvp, tcnp));
fcnp->cn_nameiop = DELETE;
(void) relookup(fdvp, &fvp, fcnp);
return (VOP_REMOVE(fdvp, fvp, fcnp));
}
if (error = VOP_LOCK(fvp))
if (error = vn_lock(fvp, LK_EXCLUSIVE, p))
goto abortit;
dp = VTOI(fdvp);
ip = VTOI(fvp);
if ((ip->i_flags & (IMMUTABLE | APPEND)) || (dp->i_flags & APPEND)) {
VOP_UNLOCK(fvp);
VOP_UNLOCK(fvp, 0, p);
error = EPERM;
goto abortit;
}
@ -977,7 +916,7 @@ ufs_rename(ap)
if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
dp == ip || (fcnp->cn_flags&ISDOTDOT) ||
(ip->i_flag & IN_RENAME)) {
VOP_UNLOCK(fvp);
VOP_UNLOCK(fvp, 0, p);
error = EINVAL;
goto abortit;
}
@ -1006,7 +945,7 @@ ufs_rename(ap)
ip->i_flag |= IN_CHANGE;
tv = time;
if (error = VOP_UPDATE(fvp, &tv, &tv, 1)) {
VOP_UNLOCK(fvp);
VOP_UNLOCK(fvp, 0, p);
goto bad;
}
@ -1021,7 +960,7 @@ ufs_rename(ap)
* call to checkpath().
*/
error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_proc);
VOP_UNLOCK(fvp);
VOP_UNLOCK(fvp, 0, p);
if (oldparent != dp->i_number)
newparent = dp->i_number;
if (doingdirectory && newparent) {
@ -1242,7 +1181,9 @@ ufs_rename(ap)
vput(ITOV(xp));
vput(ITOV(dp));
out:
if (VOP_LOCK(fvp) == 0) {
if (doingdirectory)
ip->i_flag &= ~IN_RENAME;
if (vn_lock(fvp, LK_EXCLUSIVE, p) == 0) {
ip->i_nlink--;
ip->i_flag |= IN_CHANGE;
vput(fvp);
@ -1319,6 +1260,8 @@ ufs_mkdir(ap)
ip->i_mode = dmode;
tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */
ip->i_nlink = 2;
if (cnp->cn_flags & ISWHITEOUT)
ip->i_flags |= UF_OPAQUE;
tv = time;
error = VOP_UPDATE(tvp, &tv, &tv, 1);
@ -1389,10 +1332,10 @@ ufs_rmdir(ap)
struct componentname *a_cnp;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct vnode *dvp = ap->a_dvp;
register struct componentname *cnp = ap->a_cnp;
register struct inode *ip, *dp;
struct vnode *vp = ap->a_vp;
struct vnode *dvp = ap->a_dvp;
struct componentname *cnp = ap->a_cnp;
struct inode *ip, *dp;
int error;
ip = VTOI(vp);
@ -1506,16 +1449,22 @@ ufs_readdir(ap)
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
int *ncookies;
u_long **a_cookies;
} */ *ap;
{
register struct uio *uio = ap->a_uio;
int count, lost, error;
int error;
size_t count, lost;
off_t off = uio->uio_offset;
count = uio->uio_resid;
count &= ~(DIRBLKSIZ - 1);
lost = uio->uio_resid - count;
if (count < DIRBLKSIZ || (uio->uio_offset & (DIRBLKSIZ -1)))
/* Make sure we don't return partial entries. */
count -= (uio->uio_offset + count) & (DIRBLKSIZ -1);
if (count <= 0)
return (EINVAL);
lost = uio->uio_resid - count;
uio->uio_resid = count;
uio->uio_iov->iov_len = count;
# if (BYTE_ORDER == LITTLE_ENDIAN)
@ -1560,7 +1509,43 @@ ufs_readdir(ap)
# else
error = VOP_READ(ap->a_vp, uio, 0, ap->a_cred);
# endif
if (!error && ap->a_ncookies) {
struct dirent *dp, *dpstart;
off_t offstart;
u_long *cookies;
int ncookies;
/*
* Only the NFS server uses cookies, and it loads the
* directory block into system space, so we can just look at
* it directly.
*/
if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
panic("ufs_readdir: lost in space");
dpstart = (struct dirent *)
(uio->uio_iov->iov_base - (uio->uio_offset - off));
offstart = off;
for (dp = dpstart, ncookies = 0; off < uio->uio_offset; ) {
if (dp->d_reclen == 0)
break;
off += dp->d_reclen;
ncookies++;
dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
}
lost += uio->uio_offset - off;
uio->uio_offset = off;
MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP,
M_WAITOK);
*ap->a_ncookies = ncookies;
*ap->a_cookies = cookies;
for (off = offstart, dp = dpstart; off < uio->uio_offset; ) {
*(cookies++) = off;
off += dp->d_reclen;
dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
}
}
uio->uio_resid += lost;
*ap->a_eofflag = VTOI(ap->a_vp)->i_size <= uio->uio_offset;
return (error);
}
@ -1611,78 +1596,31 @@ int
ufs_lock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct inode *ip;
struct proc *p = curproc; /* XXX */
struct vnode *vp = ap->a_vp;
start:
while (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
sleep((caddr_t)vp, PINOD);
}
if (vp->v_tag == VT_NON)
return (ENOENT);
ip = VTOI(vp);
if (ip->i_flag & IN_LOCKED) {
ip->i_flag |= IN_WANTED;
#ifdef DIAGNOSTIC
if (p) {
if (p->p_pid == ip->i_lockholder)
panic("locking against myself");
ip->i_lockwaiter = p->p_pid;
} else
ip->i_lockwaiter = -1;
#endif
(void) sleep((caddr_t)ip, PINOD);
goto start;
}
#ifdef DIAGNOSTIC
ip->i_lockwaiter = 0;
if (ip->i_lockholder != 0)
panic("lockholder (%d) != 0", ip->i_lockholder);
if (p && p->p_pid == 0)
printf("locking by process 0\n");
if (p)
ip->i_lockholder = p->p_pid;
else
ip->i_lockholder = -1;
#endif
ip->i_flag |= IN_LOCKED;
return (0);
return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags, &vp->v_interlock,
ap->a_p));
}
/*
* Unlock an inode. If WANT bit is on, wakeup.
* Unlock an inode.
*/
int lockcount = 90;
int
ufs_unlock(ap)
struct vop_unlock_args /* {
struct vnode *a_vp;
int a_flags;
struct proc *a_p;
} */ *ap;
{
register struct inode *ip = VTOI(ap->a_vp);
struct proc *p = curproc; /* XXX */
struct vnode *vp = ap->a_vp;
#ifdef DIAGNOSTIC
if ((ip->i_flag & IN_LOCKED) == 0) {
vprint("ufs_unlock: unlocked inode", ap->a_vp);
panic("ufs_unlock NOT LOCKED");
}
if (p && p->p_pid != ip->i_lockholder && p->p_pid > -1 &&
ip->i_lockholder > -1 && lockcount++ < 100)
panic("unlocker (%d) != lock holder (%d)",
p->p_pid, ip->i_lockholder);
ip->i_lockholder = 0;
#endif
ip->i_flag &= ~IN_LOCKED;
if (ip->i_flag & IN_WANTED) {
ip->i_flag &= ~IN_WANTED;
wakeup((caddr_t)ip);
}
return (0);
return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags | LK_RELEASE,
&vp->v_interlock, ap->a_p));
}
/*
@ -1695,9 +1633,7 @@ ufs_islocked(ap)
} */ *ap;
{
if (VTOI(ap->a_vp)->i_flag & IN_LOCKED)
return (1);
return (0);
return (lockstatus(&VTOI(ap->a_vp)->i_lock));
}
/*
@ -1757,12 +1693,7 @@ ufs_print(ap)
if (vp->v_type == VFIFO)
fifo_printinfo(vp);
#endif /* FIFO */
printf("%s\n", (ip->i_flag & IN_LOCKED) ? " (LOCKED)" : "");
if (ip->i_lockholder == 0)
return (0);
printf("\towner pid %d", ip->i_lockholder);
if (ip->i_lockwaiter)
printf(" waiting pid %d", ip->i_lockwaiter);
lockmgr_printinfo(&ip->i_lock);
printf("\n");
return (0);
}
@ -1821,10 +1752,13 @@ ufsspec_close(ap)
struct proc *a_p;
} */ *ap;
{
register struct inode *ip = VTOI(ap->a_vp);
struct vnode *vp = ap->a_vp;
struct inode *ip = VTOI(vp);
if (ap->a_vp->v_usecount > 1 && !(ip->i_flag & IN_LOCKED))
simple_lock(&vp->v_interlock);
if (ap->a_vp->v_usecount > 1)
ITIMES(ip, &time, &time);
simple_unlock(&vp->v_interlock);
return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
}
@ -1885,10 +1819,13 @@ ufsfifo_close(ap)
} */ *ap;
{
extern int (**fifo_vnodeop_p)();
register struct inode *ip = VTOI(ap->a_vp);
struct vnode *vp = ap->a_vp;
struct inode *ip = VTOI(vp);
if (ap->a_vp->v_usecount > 1 && !(ip->i_flag & IN_LOCKED))
simple_lock(&vp->v_interlock);
if (ap->a_vp->v_usecount > 1)
ITIMES(ip, &time, &time);
simple_unlock(&vp->v_interlock);
return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
}
#endif /* FIFO */
@ -1994,7 +1931,7 @@ ufs_advlock(ap)
lock->lf_inode = ip;
lock->lf_type = fl->l_type;
lock->lf_next = (struct lockf *)0;
lock->lf_block = (struct lockf *)0;
TAILQ_INIT(&lock->lf_blkhd);
lock->lf_flags = ap->a_flags;
/*
* Do the requested operation.
@ -2031,6 +1968,7 @@ ufs_vinit(mntp, specops, fifoops, vpp)
int (**fifoops)();
struct vnode **vpp;
{
struct proc *p = curproc; /* XXX */
struct inode *ip;
struct vnode *vp, *nvp;
@ -2043,9 +1981,9 @@ ufs_vinit(mntp, specops, fifoops, vpp)
if (nvp = checkalias(vp, ip->i_rdev, mntp)) {
/*
* Discard unneeded vnode, but save its inode.
* Note that the lock is carried over in the inode
* to the replacement vnode.
*/
ufs_ihashrem(ip);
VOP_UNLOCK(vp);
nvp->v_data = vp->v_data;
vp->v_data = NULL;
vp->v_op = spec_vnodeop_p;
@ -2056,7 +1994,6 @@ ufs_vinit(mntp, specops, fifoops, vpp)
*/
vp = nvp;
ip->i_vnode = vp;
ufs_ihashins(ip);
}
break;
case VFIFO:
@ -2131,6 +2068,9 @@ ufs_makeinode(mode, dvp, vpp, cnp)
suser(cnp->cn_cred, NULL))
ip->i_mode &= ~ISGID;
if (cnp->cn_flags & ISWHITEOUT)
ip->i_flags |= UF_OPAQUE;
/*
* Make sure inode goes to disk before directory entry.
*/

View File

@ -30,9 +30,30 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufsmount.h 8.2 (Berkeley) 1/12/94
* @(#)ufsmount.h 8.6 (Berkeley) 3/30/95
*/
/*
* Arguments to mount UFS-based filesystems
*/
struct ufs_args {
char *fspec; /* block special device to mount */
struct export_args export; /* network export information */
};
#ifdef MFS
/*
* Arguments to mount MFS
*/
struct mfs_args {
char *fspec; /* name to export for statfs */
struct export_args export; /* if exported MFSes are supported */
caddr_t base; /* base of file system in memory */
u_long size; /* size of file system */
};
#endif /* MFS */
#ifdef KERNEL
struct buf;
struct inode;
struct nameidata;
@ -47,12 +68,14 @@ struct ufsmount {
struct mount *um_mountp; /* filesystem vfs structure */
dev_t um_dev; /* device mounted */
struct vnode *um_devvp; /* block device mounted vnode */
union { /* pointer to superblock */
struct lfs *lfs; /* LFS */
struct fs *fs; /* FFS */
} ufsmount_u;
#define um_fs ufsmount_u.fs
#define um_lfs ufsmount_u.lfs
struct vnode *um_quotas[MAXQUOTAS]; /* pointer to quota files */
struct ucred *um_cred[MAXQUOTAS]; /* quota file access cred */
u_long um_nindir; /* indirect ptrs per block */
@ -62,7 +85,9 @@ struct ufsmount {
time_t um_itime[MAXQUOTAS]; /* inode quota time limit */
char um_qflags[MAXQUOTAS]; /* quota specific flags */
struct netexport um_export; /* export information */
int64_t um_savedmaxfilesize; /* XXX - limit maxfilesize */
};
/*
* Flags describing the state of quotas.
*/
@ -76,8 +101,7 @@ struct ufsmount {
* Macros to access file system parameters in the ufsmount structure.
* Used by ufs_bmap.
*/
#define MNINDIR(ump) ((ump)->um_nindir)
#define blkptrtodb(ump, b) ((b) << (ump)->um_bptrtodb)
#define is_sequential(ump, a, b) ((b) == (a) + ump->um_seqinc)
#define MNINDIR(ump) ((ump)->um_nindir)
#endif /* KERNEL */