freebsd-nq/sys/gnu/fs/ext2fs/ext2_bmap.c
Jeff Roberson 1c4bcd050a - Move rusage from being per-process in struct pstats to per-thread in
td_ru.  This removes the requirement for per-process synchronization in
   statclock() and mi_switch().  This was previously supported by
   sched_lock which is going away.  All modifications to rusage are now
   done in the context of the owning thread.  reads proceed without locks.
 - Aggregate exiting threads rusage in thread_exit() such that the exiting
   thread's rusage is not lost.
 - Provide a new routine, rufetch() to fetch an aggregate of all rusage
   structures from all threads in a process.  This routine must be used
   in any place requiring a rusage from a process prior to it's exit.  The
   exited process's rusage is still available via p_ru.
 - Aggregate tick statistics only on demand via rufetch() or when a thread
   exits.  Tick statistics are kept in the thread and protected by sched_lock
   until it exits.

Initial patch by:	attilio
Reviewed by:		attilio, bde (some objections), arch (mostly silent)
2007-06-01 01:12:45 +00:00

336 lines
9.4 KiB
C

/*-
* Copyright (c) 1989, 1991, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_bmap.c 8.7 (Berkeley) 3/21/95
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/resourcevar.h>
#include <sys/stat.h>
#include <gnu/fs/ext2fs/inode.h>
#include <gnu/fs/ext2fs/ext2_fs.h>
#include <gnu/fs/ext2fs/ext2_fs_sb.h>
#include <gnu/fs/ext2fs/ext2_mount.h>
#include <gnu/fs/ext2fs/ext2_extern.h>
/*
* Bmap converts a the logical block number of a file to its physical block
* number on the disk. The conversion is done by using the logical block
* number to index into the array of block pointers described by the dinode.
*/
int
ext2_bmap(ap)
struct vop_bmap_args /* {
struct vnode *a_vp;
daddr_t a_bn;
struct bufobj **a_bop;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
} */ *ap;
{
int32_t blkno;
int error;
/*
* Check for underlying vnode requests and ensure that logical
* to physical mapping is requested.
*/
if (ap->a_bop != NULL)
*ap->a_bop = &VTOI(ap->a_vp)->i_devvp->v_bufobj;
if (ap->a_bnp == NULL)
return (0);
error = ext2_bmaparray(ap->a_vp, ap->a_bn, &blkno,
ap->a_runp, ap->a_runb);
*ap->a_bnp = blkno;
return (error);
}
/*
* Indirect blocks are now on the vnode for the file. They are given negative
* logical block numbers. Indirect blocks are addressed by the negative
* address of the first data block to which they point. Double indirect blocks
* are addressed by one less than the address of the first indirect block to
* which they point. Triple indirect blocks are addressed by one less than
* the address of the first double indirect block to which they point.
*
* ufs_bmaparray does the bmap conversion, and if requested returns the
* array of logical blocks which must be traversed to get to a block.
* Each entry contains the offset into that block that gets you to the
* next block and the disk address of the block (if it is assigned).
*/
int
ext2_bmaparray(vp, bn, bnp, runp, runb)
struct vnode *vp;
int32_t bn;
int32_t *bnp;
int *runp;
int *runb;
{
struct inode *ip;
struct buf *bp;
struct ext2mount *ump;
struct mount *mp;
struct vnode *devvp;
struct indir a[NIADDR+1], *ap;
int32_t daddr;
long metalbn;
int error, num, maxrun = 0, bsize;
int *nump;
ap = NULL;
ip = VTOI(vp);
mp = vp->v_mount;
ump = VFSTOEXT2(mp);
devvp = ump->um_devvp;
bsize = EXT2_BLOCK_SIZE(ump->um_e2fs);
if (runp) {
maxrun = mp->mnt_iosize_max / bsize - 1;
*runp = 0;
}
if (runb) {
*runb = 0;
}
ap = a;
nump = &num;
error = ext2_getlbns(vp, bn, ap, nump);
if (error)
return (error);
num = *nump;
if (num == 0) {
*bnp = blkptrtodb(ump, ip->i_db[bn]);
if (*bnp == 0) {
*bnp = -1;
} else if (runp) {
int32_t bnb = bn;
for (++bn; bn < NDADDR && *runp < maxrun &&
is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
++bn, ++*runp);
bn = bnb;
if (runb && (bn > 0)) {
for (--bn; (bn >= 0) && (*runb < maxrun) &&
is_sequential(ump, ip->i_db[bn],
ip->i_db[bn+1]);
--bn, ++*runb);
}
}
return (0);
}
/* Get disk address out of indirect block array */
daddr = ip->i_ib[ap->in_off];
for (bp = NULL, ++ap; --num; ++ap) {
/*
* Exit the loop if there is no disk address assigned yet and
* the indirect block isn't in the cache, or if we were
* looking for an indirect block and we've found it.
*/
metalbn = ap->in_lbn;
if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn)
break;
/*
* If we get here, we've either got the block in the cache
* or we have a disk address for it, go fetch it.
*/
if (bp)
bqrelse(bp);
ap->in_exists = 1;
bp = getblk(vp, metalbn, bsize, 0, 0, 0);
if ((bp->b_flags & B_CACHE) == 0) {
#ifdef DIAGNOSTIC
if (!daddr)
panic("ufs_bmaparray: indirect block not in cache");
#endif
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_iocmd = BIO_READ;
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
vfs_busy_pages(bp, 0);
bp->b_iooffset = dbtob(bp->b_blkno);
bstrategy(bp);
curthread->td_ru.ru_inblock++;
error = bufwait(bp);
if (error) {
brelse(bp);
return (error);
}
}
daddr = ((int32_t *)bp->b_data)[ap->in_off];
if (num == 1 && daddr && runp) {
for (bn = ap->in_off + 1;
bn < MNINDIR(ump) && *runp < maxrun &&
is_sequential(ump,
((int32_t *)bp->b_data)[bn - 1],
((int32_t *)bp->b_data)[bn]);
++bn, ++*runp);
bn = ap->in_off;
if (runb && bn) {
for(--bn; bn >= 0 && *runb < maxrun &&
is_sequential(ump, ((int32_t *)bp->b_data)[bn],
((int32_t *)bp->b_data)[bn+1]);
--bn, ++*runb);
}
}
}
if (bp)
bqrelse(bp);
/*
* Since this is FFS independent code, we are out of scope for the
* definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they
* will fall in the range 1..um_seqinc, so we use that test and
* return a request for a zeroed out buffer if attempts are made
* to read a BLK_NOCOPY or BLK_SNAP block.
*/
if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc){
*bnp = -1;
return (0);
}
*bnp = blkptrtodb(ump, daddr);
if (*bnp == 0) {
*bnp = -1;
}
return (0);
}
/*
* Create an array of logical block number/offset pairs which represent the
* path of indirect blocks required to access a data block. The first "pair"
* contains the logical block number of the appropriate single, double or
* triple indirect block and the offset into the inode indirect block array.
* Note, the logical block number of the inode single/double/triple indirect
* block appears twice in the array, once with the offset into the i_ib and
* once with the offset into the page itself.
*/
int
ext2_getlbns(vp, bn, ap, nump)
struct vnode *vp;
int32_t bn;
struct indir *ap;
int *nump;
{
long blockcnt, metalbn, realbn;
struct ext2mount *ump;
int i, numlevels, off;
int64_t qblockcnt;
ump = VFSTOEXT2(vp->v_mount);
if (nump)
*nump = 0;
numlevels = 0;
realbn = bn;
if ((long)bn < 0)
bn = -(long)bn;
/* The first NDADDR blocks are direct blocks. */
if (bn < NDADDR)
return (0);
/*
* Determine the number of levels of indirection. After this loop
* is done, blockcnt indicates the number of data blocks possible
* at the previous level of indirection, and NIADDR - i is the number
* of levels of indirection needed to locate the requested block.
*/
for (blockcnt = 1, i = NIADDR, bn -= NDADDR;; i--, bn -= blockcnt) {
if (i == 0)
return (EFBIG);
/*
* Use int64_t's here to avoid overflow for triple indirect
* blocks when longs have 32 bits and the block size is more
* than 4K.
*/
qblockcnt = (int64_t)blockcnt * MNINDIR(ump);
if (bn < qblockcnt)
break;
blockcnt = qblockcnt;
}
/* Calculate the address of the first meta-block. */
if (realbn >= 0)
metalbn = -(realbn - bn + NIADDR - i);
else
metalbn = -(-realbn - bn + NIADDR - i);
/*
* At each iteration, off is the offset into the bap array which is
* an array of disk addresses at the current level of indirection.
* The logical block number and the offset in that block are stored
* into the argument array.
*/
ap->in_lbn = metalbn;
ap->in_off = off = NIADDR - i;
ap->in_exists = 0;
ap++;
for (++numlevels; i <= NIADDR; i++) {
/* If searching for a meta-data block, quit when found. */
if (metalbn == realbn)
break;
off = (bn / blockcnt) % MNINDIR(ump);
++numlevels;
ap->in_lbn = metalbn;
ap->in_off = off;
ap->in_exists = 0;
++ap;
metalbn -= -1 + off * blockcnt;
blockcnt /= MNINDIR(ump);
}
if (nump)
*nump = numlevels;
return (0);
}