Changes to support 1Tb filesizes. Pages are now named by an

(object,index) pair instead of (object,offset) pair.
This commit is contained in:
John Dyson 1995-12-11 04:58:34 +00:00
parent c3fda50ba5
commit a316d390bd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=12767
52 changed files with 1007 additions and 831 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.67 1995/12/07 12:45:36 davidg Exp $
* $Id: pmap.c,v 1.68 1995/12/10 13:36:28 phk Exp $
*/
/*
@ -672,7 +672,8 @@ pmap_alloc_pv_entry()
* allocate a physical page out of the vm system
*/
m = vm_page_alloc(kernel_object,
pvva - vm_map_min(kernel_map), VM_ALLOC_INTERRUPT);
OFF_TO_IDX(pvva - vm_map_min(kernel_map)),
VM_ALLOC_INTERRUPT);
if (m) {
int newentries;
int i;
@ -1399,73 +1400,85 @@ pmap_enter_quick(pmap, va, pa)
return;
}
#define MAX_INIT_PT (1024*2048)
#define MAX_INIT_PT (512 * 4096)
/*
* pmap_object_init_pt preloads the ptes for a given object
* into the specified pmap. This eliminates the blast of soft
* faults on process startup and immediately after an mmap.
*/
void
pmap_object_init_pt(pmap, addr, object, offset, size)
pmap_object_init_pt(pmap, addr, object, pindex, size)
pmap_t pmap;
vm_offset_t addr;
vm_object_t object;
vm_offset_t offset;
vm_offset_t size;
vm_pindex_t pindex;
vm_size_t size;
{
vm_offset_t tmpoff;
vm_offset_t tmpidx;
int psize;
vm_page_t p;
int objbytes;
int objpgs;
if (!pmap || ((size > MAX_INIT_PT) &&
(object->resident_page_count > (MAX_INIT_PT / NBPG)))) {
(object->resident_page_count > MAX_INIT_PT / PAGE_SIZE))) {
return;
}
psize = (size >> PAGE_SHIFT);
/*
* if we are processing a major portion of the object, then scan the
* entire thing.
*/
if (size > (object->size >> 2)) {
objbytes = size;
if (psize > (object->size >> 2)) {
objpgs = psize;
for (p = object->memq.tqh_first;
((objbytes > 0) && (p != NULL));
((objpgs > 0) && (p != NULL));
p = p->listq.tqe_next) {
tmpoff = p->offset;
if (tmpoff < offset) {
tmpidx = p->pindex;
if (tmpidx < pindex) {
continue;
}
tmpoff -= offset;
if (tmpoff >= size) {
tmpidx -= pindex;
if (tmpidx >= psize) {
continue;
}
if (((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) &&
if (((p->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE)) != 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->bmapped == 0) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) {
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->flags & PG_CACHE)
vm_page_deactivate(p);
vm_page_hold(p);
p->flags |= PG_MAPPED;
pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap,
addr + (tmpidx << PAGE_SHIFT),
VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
objbytes -= NBPG;
objpgs -= 1;
}
} else {
/*
* else lookup the pages one-by-one.
*/
for (tmpoff = 0; tmpoff < size; tmpoff += NBPG) {
p = vm_page_lookup(object, tmpoff + offset);
if (p && ((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) &&
(p->bmapped == 0) && (p->busy == 0) &&
for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE)) != 0) &&
(p->bmapped == 0) &&
(p->busy == 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) {
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->flags & PG_CACHE)
vm_page_deactivate(p);
vm_page_hold(p);
p->flags |= PG_MAPPED;
pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap,
addr + (tmpidx << PAGE_SHIFT),
VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
}

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.46 1995/12/07 12:45:40 davidg Exp $
* $Id: vm_machdep.c,v 1.47 1995/12/10 13:36:34 phk Exp $
*/
#include "npx.h"
@ -894,6 +894,7 @@ vm_page_zero_idle() {
pmap_zero_page(VM_PAGE_TO_PHYS(m));
disable_intr();
TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
++vm_page_zero_count;
return 1;
}
return 0;

View File

@ -37,7 +37,7 @@
*
* @(#)procfs_mem.c 8.4 (Berkeley) 1/21/94
*
* $Id: procfs_mem.c,v 1.11 1995/12/03 14:54:35 bde Exp $
* $Id: procfs_mem.c,v 1.12 1995/12/07 12:47:15 davidg Exp $
*/
/*
@ -170,8 +170,8 @@ procfs_rwmem(p, uio)
/* Find space in kernel_map for the page we're interested in */
if (!error)
error = vm_map_find(kernel_map, object, off, &kva,
PAGE_SIZE, 1);
error = vm_map_find(kernel_map, object,
IDX_TO_OFF(off), &kva, PAGE_SIZE, 1);
if (!error) {
/*

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.6 (Berkeley) 4/9/94
* $Id: spec_vnops.c,v 1.21 1995/12/07 12:47:17 davidg Exp $
* $Id: spec_vnops.c,v 1.22 1995/12/08 11:17:52 julian Exp $
*/
#include <sys/param.h>
@ -365,7 +365,8 @@ spec_write(ap)
}
error = uiomove((char *)bp->b_data + on, n, uio);
if (n + on == bsize) {
bawrite(bp);
/* bawrite(bp); */
cluster_write(bp, 0);
} else
bdwrite(bp);
} while (error == 0 && uio->uio_resid > 0 && n != 0);
@ -750,7 +751,7 @@ spec_getpages(ap)
/*
* Calculate the size of the transfer.
*/
blkno = (ap->a_m[0]->offset + ap->a_offset) / DEV_BSIZE;
blkno = (IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset) / DEV_BSIZE;
/*
* Round up physical size for real devices.
@ -839,3 +840,33 @@ spec_getpages(ap)
printf("spec_getpages: I/O read error\n");
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
}
/* ARGSUSED */
int
spec_getattr(ap)
struct vop_getattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct vattr *vap = ap->a_vap;
struct partinfo dpart;
bzero(vap, sizeof (*vap));
if (vp->v_type == VBLK)
vap->va_blocksize = BLKDEV_IOSIZE;
else if (vp->v_type == VCHR)
vap->va_blocksize = MAXBSIZE;
if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
(caddr_t)&dpart, FREAD, ap->a_p) == 0) {
vap->va_bytes = (u_quad_t) dpart.disklab->d_partitions[minor(vp->v_rdev)].p_size * DEV_BSIZE;
vap->va_size = vap->va_bytes;
}
return (0);
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.67 1995/12/07 12:45:36 davidg Exp $
* $Id: pmap.c,v 1.68 1995/12/10 13:36:28 phk Exp $
*/
/*
@ -672,7 +672,8 @@ pmap_alloc_pv_entry()
* allocate a physical page out of the vm system
*/
m = vm_page_alloc(kernel_object,
pvva - vm_map_min(kernel_map), VM_ALLOC_INTERRUPT);
OFF_TO_IDX(pvva - vm_map_min(kernel_map)),
VM_ALLOC_INTERRUPT);
if (m) {
int newentries;
int i;
@ -1399,73 +1400,85 @@ pmap_enter_quick(pmap, va, pa)
return;
}
#define MAX_INIT_PT (1024*2048)
#define MAX_INIT_PT (512 * 4096)
/*
* pmap_object_init_pt preloads the ptes for a given object
* into the specified pmap. This eliminates the blast of soft
* faults on process startup and immediately after an mmap.
*/
void
pmap_object_init_pt(pmap, addr, object, offset, size)
pmap_object_init_pt(pmap, addr, object, pindex, size)
pmap_t pmap;
vm_offset_t addr;
vm_object_t object;
vm_offset_t offset;
vm_offset_t size;
vm_pindex_t pindex;
vm_size_t size;
{
vm_offset_t tmpoff;
vm_offset_t tmpidx;
int psize;
vm_page_t p;
int objbytes;
int objpgs;
if (!pmap || ((size > MAX_INIT_PT) &&
(object->resident_page_count > (MAX_INIT_PT / NBPG)))) {
(object->resident_page_count > MAX_INIT_PT / PAGE_SIZE))) {
return;
}
psize = (size >> PAGE_SHIFT);
/*
* if we are processing a major portion of the object, then scan the
* entire thing.
*/
if (size > (object->size >> 2)) {
objbytes = size;
if (psize > (object->size >> 2)) {
objpgs = psize;
for (p = object->memq.tqh_first;
((objbytes > 0) && (p != NULL));
((objpgs > 0) && (p != NULL));
p = p->listq.tqe_next) {
tmpoff = p->offset;
if (tmpoff < offset) {
tmpidx = p->pindex;
if (tmpidx < pindex) {
continue;
}
tmpoff -= offset;
if (tmpoff >= size) {
tmpidx -= pindex;
if (tmpidx >= psize) {
continue;
}
if (((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) &&
if (((p->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE)) != 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->bmapped == 0) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) {
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->flags & PG_CACHE)
vm_page_deactivate(p);
vm_page_hold(p);
p->flags |= PG_MAPPED;
pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap,
addr + (tmpidx << PAGE_SHIFT),
VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
objbytes -= NBPG;
objpgs -= 1;
}
} else {
/*
* else lookup the pages one-by-one.
*/
for (tmpoff = 0; tmpoff < size; tmpoff += NBPG) {
p = vm_page_lookup(object, tmpoff + offset);
if (p && ((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) &&
(p->bmapped == 0) && (p->busy == 0) &&
for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE)) != 0) &&
(p->bmapped == 0) &&
(p->busy == 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) {
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->flags & PG_CACHE)
vm_page_deactivate(p);
vm_page_hold(p);
p->flags |= PG_MAPPED;
pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap,
addr + (tmpidx << PAGE_SHIFT),
VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
}

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.46 1995/12/07 12:45:40 davidg Exp $
* $Id: vm_machdep.c,v 1.47 1995/12/10 13:36:34 phk Exp $
*/
#include "npx.h"
@ -894,6 +894,7 @@ vm_page_zero_idle() {
pmap_zero_page(VM_PAGE_TO_PHYS(m));
disable_intr();
TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
++vm_page_zero_count;
return 1;
}
return 0;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)types.h 8.3 (Berkeley) 1/5/94
* $Id$
* $Id: types.h,v 1.4 1994/08/02 07:39:13 davidg Exp $
*/
#ifndef _MACHTYPES_H_
@ -47,8 +47,10 @@ typedef struct label_t {
} label_t;
#endif
typedef unsigned long vm_offset_t;
typedef unsigned long vm_offset_t;
typedef unsigned long vm_size_t;
typedef long long vm_ooffset_t;
typedef unsigned long vm_pindex_t;
/*
* Basic integral types. Omit the typedef if

View File

@ -12,7 +12,7 @@
* on the understanding that TFS is not responsible for the correct
* functioning of this software in any circumstances.
*
* $Id: bt742a.c,v 1.46 1995/12/07 12:45:55 davidg Exp $
* $Id: bt742a.c,v 1.47 1995/12/10 13:38:22 phk Exp $
*/
/*
@ -1296,7 +1296,6 @@ bt_init(unit)
bt->bt_mbx.tmbi = &bt->bt_mbx.mbi[0];
bt_inquire_setup_information(unit, &info);
/*
* Note that we are going and return (to probe)
*/

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)wd.c 7.2 (Berkeley) 5/9/91
* $Id: wd.c,v 1.98 1995/12/08 23:20:52 phk Exp $
* $Id: wd.c,v 1.99 1995/12/10 15:54:58 bde Exp $
*/
/* TODO:
@ -223,9 +223,9 @@ struct diskgeom {
struct disk {
long dk_bc; /* byte count left */
short dk_skip; /* blocks already transferred */
char dk_ctrlr; /* physical controller number */
char dk_unit; /* physical unit number */
char dk_lunit; /* logical unit number */
int dk_ctrlr; /* physical controller number */
int dk_unit; /* physical unit number */
int dk_lunit; /* logical unit number */
char dk_state; /* control state */
u_char dk_status; /* copy of status reg. */
u_char dk_error; /* copy of error reg. */
@ -560,7 +560,6 @@ next: }
void
wdstrategy(register struct buf *bp)
{
register struct buf *dp;
struct disk *du;
int lunit = dkunit(bp->b_dev);
int s;
@ -705,7 +704,6 @@ wdstart(int ctrlr)
register struct disk *du;
register struct buf *bp;
struct diskgeom *lp; /* XXX sic */
struct buf *dp;
long blknum;
long secpertrk, secpercyl;
int lunit;
@ -938,7 +936,7 @@ void
wdintr(int unit)
{
register struct disk *du;
register struct buf *bp, *dp;
register struct buf *bp;
if (wdtab[unit].b_active == 2)
return; /* intr in wdflushirq() */
@ -1141,9 +1139,6 @@ wdopen(dev_t dev, int flags, int fmt, struct proc *p)
register unsigned int lunit;
register struct disk *du;
int error;
int part = dkpart(dev), mask = 1 << part;
struct partition *pp;
char *msg;
lunit = dkunit(dev);
if (lunit >= NWD || dktype(dev) != 0)
@ -1652,11 +1647,11 @@ wdgetctlr(struct disk *du)
du->dk_multi = 1;
}
#ifdef NOTYET
/* #ifdef NOTYET */
/* set read caching and write caching */
wdcommand(du, 0, 0, 0, WDFEA_RCACHE, WDCC_FEATURES);
wdcommand(du, 0, 0, 0, WDFEA_WCACHE, WDCC_FEATURES);
#endif
/* #endif */
return (0);
}

View File

@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: imgact_aout.c,v 1.18 1995/12/02 16:31:59 bde Exp $
* $Id: imgact_aout.c,v 1.19 1995/12/07 12:46:33 davidg Exp $
*/
#include <sys/param.h>
@ -57,7 +57,8 @@ exec_aout_imgact(imgp)
{
struct exec *a_out = (struct exec *) imgp->image_header;
struct vmspace *vmspace = imgp->proc->p_vmspace;
unsigned long vmaddr, virtual_offset, file_offset;
unsigned long vmaddr, virtual_offset;
unsigned long file_offset;
unsigned long bss_size;
int error;

View File

@ -54,7 +54,7 @@
* functioning of this software, nor does the author assume any responsibility
* for damages incurred with its use.
*
* $Id: subr_rlist.c,v 1.12 1995/12/02 18:58:53 bde Exp $
* $Id: subr_rlist.c,v 1.13 1995/12/07 12:46:53 davidg Exp $
*/
#include <sys/param.h>
@ -85,12 +85,12 @@ rlist_malloc()
int i;
while( rlist_count < RLIST_MIN) {
int s = splhigh();
rl = (struct rlist *)kmem_malloc(kmem_map, NBPG, M_WAITOK);
rl = (struct rlist *)kmem_malloc(kmem_map, PAGE_SIZE, M_WAITOK);
splx(s);
if( !rl)
break;
for(i=0;i<(NBPG/(sizeof *rl));i++) {
for(i=0;i<(PAGE_SIZE/(sizeof *rl));i++) {
rl->rl_next = rlfree;
rlfree = rl;
rlist_count++;

View File

@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id: vfs_bio.c,v 1.74 1995/12/04 16:48:32 phk Exp $
* $Id: vfs_bio.c,v 1.75 1995/12/07 12:47:02 davidg Exp $
*/
/*
@ -74,7 +74,6 @@ void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
void vfs_clean_pages(struct buf * bp);
static void vfs_setdirty(struct buf *bp);
static __inline struct buf * gbincore(struct vnode * vp, daddr_t blkno);
int needsbuffer;
@ -105,6 +104,8 @@ int bufspace, maxbufspace;
struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
struct bqueues bufqueues[BUFFER_QUEUES];
#define BUF_MAXUSE 8
/*
* Initialize buffer headers and related structures.
*/
@ -150,7 +151,8 @@ bufinit()
bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
bogus_page = vm_page_alloc(kernel_object,
bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL);
((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
VM_ALLOC_NORMAL);
}
@ -397,9 +399,8 @@ brelse(struct buf * bp)
if (bp->b_flags & B_WANTED) {
bp->b_flags &= ~(B_WANTED | B_AGE);
wakeup(bp);
} else if (bp->b_flags & B_VMIO) {
wakeup(bp);
}
}
if (bp->b_flags & B_LOCKED)
bp->b_flags &= ~B_ERROR;
@ -418,7 +419,7 @@ brelse(struct buf * bp)
* invalidate the pages in the VM object.
*/
if (bp->b_flags & B_VMIO) {
vm_offset_t foff;
vm_ooffset_t foff;
vm_object_t obj;
int i, resid;
vm_page_t m;
@ -428,23 +429,27 @@ brelse(struct buf * bp)
vp = bp->b_vp;
if (!vp)
panic("brelse: missing vp");
if (!vp->v_mount)
panic("brelse: missing mount info");
if (bp->b_npages) {
vm_pindex_t poff;
obj = (vm_object_t) vp->v_object;
foff = trunc_page(vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno);
if (vp->v_type == VBLK)
foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
else
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
poff = OFF_TO_IDX(foff);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
m = vm_page_lookup(obj, foff);
m = vm_page_lookup(obj, poff + i);
if (!m) {
panic("brelse: page missing\n");
}
bp->b_pages[i] = m;
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
pmap_qenter(trunc_page(bp->b_data),
bp->b_pages, bp->b_npages);
}
resid = (m->offset + PAGE_SIZE) - foff;
resid = IDX_TO_OFF(m->pindex+1) - foff;
if (resid > iototal)
resid = iototal;
if (resid > 0) {
@ -456,7 +461,7 @@ brelse(struct buf * bp)
if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
vm_page_test_dirty(m);
if (m->dirty == 0) {
vm_page_set_invalid(m, foff, resid);
vm_page_set_invalid(m, (vm_offset_t) foff, resid);
if (m->valid == 0)
vm_page_protect(m, VM_PROT_NONE);
}
@ -473,11 +478,13 @@ brelse(struct buf * bp)
--m->bmapped;
if (m->bmapped == 0) {
if (m->flags & PG_WANTED) {
wakeup(m);
m->flags &= ~PG_WANTED;
wakeup(m);
}
if ((m->busy == 0) && ((m->flags & PG_BUSY) == 0)) {
vm_page_test_dirty(m);
if (m->object->flags & OBJ_MIGHTBEDIRTY) {
vm_page_test_dirty(m);
}
/*
* if page isn't valid, no sense in keeping it around
*/
@ -551,7 +558,7 @@ brelse(struct buf * bp)
/*
* Check to see if a block is currently memory resident.
*/
static __inline struct buf *
__inline struct buf *
gbincore(struct vnode * vp, daddr_t blkno)
{
struct buf *bp;
@ -576,7 +583,7 @@ gbincore(struct vnode * vp, daddr_t blkno)
* clearing out B_DELWRI buffers... This is much better
* than the old way of writing only one buffer at a time.
*/
void
int
vfs_bio_awrite(struct buf * bp)
{
int i;
@ -585,12 +592,16 @@ vfs_bio_awrite(struct buf * bp)
int s;
int ncl;
struct buf *bpa;
int nwritten;
s = splbio();
if (vp->v_mount && (vp->v_flag & VVMIO) &&
if (/* (vp->v_type != VBLK) && */
(bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
int size = vp->v_mount->mnt_stat.f_iosize;
int maxcl = MAXPHYS / size;
int size;
int maxcl;
size = vp->v_mount->mnt_stat.f_iosize;
maxcl = MAXPHYS / size;
for (i = 1; i < maxcl; i++) {
if ((bpa = gbincore(vp, lblkno + i)) &&
@ -598,7 +609,7 @@ vfs_bio_awrite(struct buf * bp)
(B_DELWRI | B_CLUSTEROK)) &&
(bpa->b_bufsize == size)) {
if ((bpa->b_blkno == bpa->b_lblkno) ||
(bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE))
(bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
break;
} else {
break;
@ -609,9 +620,9 @@ vfs_bio_awrite(struct buf * bp)
* this is a possible cluster write
*/
if (ncl != 1) {
cluster_wbuild(vp, size, lblkno, ncl);
nwritten = cluster_wbuild(vp, size, lblkno, ncl);
splx(s);
return;
return nwritten;
}
}
bremfree(bp);
@ -620,7 +631,9 @@ vfs_bio_awrite(struct buf * bp)
* default (old) behavior, writing out only one block
*/
bp->b_flags |= B_BUSY | B_ASYNC;
nwritten = bp->b_bufsize;
(void) VOP_BWRITE(bp);
return nwritten;
}
@ -632,6 +645,7 @@ getnewbuf(int slpflag, int slptimeo, int doingvmio)
{
struct buf *bp;
int s;
int nbyteswritten = 0;
s = splbio();
start:
@ -661,14 +675,24 @@ getnewbuf(int slpflag, int slptimeo, int doingvmio)
if (!bp) {
/* wait for a free buffer of any kind */
needsbuffer = 1;
tsleep(&needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo);
tsleep(&needsbuffer,
(PRIBIO + 1) | slpflag, "newbuf", slptimeo);
splx(s);
return (0);
}
if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
--bp->b_usecount;
TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
if (bufqueues[QUEUE_LRU].tqh_first != NULL) {
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
goto start;
}
}
/* if we are a delayed write, convert to an async write */
if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
vfs_bio_awrite(bp);
nbyteswritten += vfs_bio_awrite(bp);
if (!slpflag && !slptimeo) {
splx(s);
return (0);
@ -717,7 +741,8 @@ getnewbuf(int slpflag, int slptimeo, int doingvmio)
bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
bp->b_dirtyoff = bp->b_dirtyend = 0;
bp->b_validoff = bp->b_validend = 0;
if (bufspace >= maxbufspace) {
bp->b_usecount = 2;
if (bufspace >= maxbufspace + nbyteswritten) {
s = splbio();
bp->b_flags |= B_INVAL;
brelse(bp);
@ -763,8 +788,9 @@ int
inmem(struct vnode * vp, daddr_t blkno)
{
vm_object_t obj;
vm_offset_t off, toff, tinc;
vm_offset_t toff, tinc;
vm_page_t m;
vm_ooffset_t off;
if (incore(vp, blkno))
return 1;
@ -781,10 +807,10 @@ inmem(struct vnode * vp, daddr_t blkno)
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
m = vm_page_lookup(obj, trunc_page(toff + off));
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
if (!m)
return 0;
if (vm_page_is_valid(m, toff + off, tinc) == 0)
if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
return 0;
}
return 1;
@ -826,7 +852,7 @@ vfs_setdirty(struct buf *bp) {
break;
}
}
boffset = i * PAGE_SIZE;
boffset = (i << PAGE_SHIFT);
if (boffset < bp->b_dirtyoff) {
bp->b_dirtyoff = boffset;
}
@ -839,14 +865,12 @@ vfs_setdirty(struct buf *bp) {
break;
}
}
boffset = (i + 1) * PAGE_SIZE;
offset = boffset + bp->b_pages[0]->offset;
if (offset >= object->size) {
boffset = object->size - bp->b_pages[0]->offset;
}
if (bp->b_dirtyend < boffset) {
bp->b_dirtyend = boffset;
}
boffset = (i + 1);
offset = boffset + bp->b_pages[0]->pindex;
if (offset >= object->size)
boffset = object->size - bp->b_pages[0]->pindex;
if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
bp->b_dirtyend = (boffset << PAGE_SHIFT);
}
}
@ -862,10 +886,13 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
s = splbio();
loop:
if (bp = gbincore(vp, blkno)) {
if ((bp = gbincore(vp, blkno))) {
if (bp->b_flags & (B_BUSY|B_INVAL)) {
bp->b_flags |= B_WANTED;
if (!tsleep(bp, PRIBIO | slpflag, "getblk", slptimeo))
if (bp->b_usecount < BUF_MAXUSE)
++bp->b_usecount;
if (!tsleep(bp,
(PRIBIO + 1) | slpflag, "getblk", slptimeo))
goto loop;
splx(s);
@ -920,6 +947,8 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
}
}
}
if (bp->b_usecount < BUF_MAXUSE)
++bp->b_usecount;
splx(s);
return (bp);
} else {
@ -1017,7 +1046,7 @@ allocbuf(struct buf * bp, int size)
/*
* Just get anonymous memory from the kernel
*/
mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
newbsize = round_page(size);
if (newbsize < bp->b_bufsize) {
@ -1035,13 +1064,13 @@ allocbuf(struct buf * bp, int size)
vm_page_t m;
int desiredpages;
newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
desiredpages = round_page(newbsize) / PAGE_SIZE;
newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
if (newbsize < bp->b_bufsize) {
if (desiredpages < bp->b_npages) {
pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages));
(desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
for (i = desiredpages; i < bp->b_npages; i++) {
m = bp->b_pages[i];
s = splhigh();
@ -1066,37 +1095,45 @@ allocbuf(struct buf * bp, int size)
}
} else if (newbsize > bp->b_bufsize) {
vm_object_t obj;
vm_offset_t tinc, off, toff, objoff;
vm_offset_t tinc, toff;
vm_ooffset_t off;
vm_pindex_t objoff;
int pageindex, curbpnpages;
struct vnode *vp;
int bsize;
vp = bp->b_vp;
bsize = vp->v_mount->mnt_stat.f_iosize;
if (vp->v_type == VBLK)
bsize = DEV_BSIZE;
else
bsize = vp->v_mount->mnt_stat.f_iosize;
if (bp->b_npages < desiredpages) {
obj = vp->v_object;
tinc = PAGE_SIZE;
if (tinc > bsize)
tinc = bsize;
off = bp->b_lblkno * bsize;
off = (vm_ooffset_t) bp->b_lblkno * bsize;
doretry:
curbpnpages = bp->b_npages;
bp->b_flags |= B_CACHE;
for (toff = 0; toff < newbsize; toff += tinc) {
int bytesinpage;
pageindex = toff / PAGE_SIZE;
objoff = trunc_page(toff + off);
pageindex = toff >> PAGE_SHIFT;
objoff = OFF_TO_IDX(off + toff);
if (pageindex < curbpnpages) {
m = bp->b_pages[pageindex];
if (m->offset != objoff)
if (m->pindex != objoff)
panic("allocbuf: page changed offset??!!!?");
bytesinpage = tinc;
if (tinc > (newbsize - toff))
bytesinpage = newbsize - toff;
if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
if (!vm_page_is_valid(m,
(vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)),
bytesinpage)) {
bp->b_flags &= ~B_CACHE;
}
if ((m->flags & PG_ACTIVE) == 0) {
@ -1130,7 +1167,7 @@ allocbuf(struct buf * bp, int size)
s = splbio();
m->flags |= PG_WANTED;
tsleep(m, PRIBIO, "pgtblk", 0);
tsleep(m, PVM, "pgtblk", 0);
splx(s);
goto doretry;
@ -1143,7 +1180,9 @@ allocbuf(struct buf * bp, int size)
bytesinpage = tinc;
if (tinc > (newbsize - toff))
bytesinpage = newbsize - toff;
if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
if (!vm_page_is_valid(m,
(vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)),
bytesinpage)) {
bp->b_flags &= ~B_CACHE;
}
if ((m->flags & PG_ACTIVE) == 0) {
@ -1163,7 +1202,7 @@ allocbuf(struct buf * bp, int size)
bp->b_npages = curbpnpages;
bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages);
bp->b_data += off % PAGE_SIZE;
bp->b_data += off & (PAGE_SIZE - 1);
}
}
}
@ -1234,13 +1273,16 @@ biodone(register struct buf * bp)
}
if (bp->b_flags & B_VMIO) {
int i, resid;
vm_offset_t foff;
vm_ooffset_t foff;
vm_page_t m;
vm_object_t obj;
int iosize;
struct vnode *vp = bp->b_vp;
foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
if (vp->v_type == VBLK)
foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
else
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
obj = vp->v_object;
if (!obj) {
panic("biodone: no object");
@ -1257,7 +1299,7 @@ biodone(register struct buf * bp)
m = bp->b_pages[i];
if (m == bogus_page) {
bogusflag = 1;
m = vm_page_lookup(obj, foff);
m = vm_page_lookup(obj, OFF_TO_IDX(foff));
if (!m) {
#if defined(VFS_BIO_DEBUG)
printf("biodone: page disappeared\n");
@ -1269,11 +1311,11 @@ biodone(register struct buf * bp)
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
}
#if defined(VFS_BIO_DEBUG)
if (trunc_page(foff) != m->offset) {
printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset);
if (OFF_TO_IDX(foff) != m->pindex) {
printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
}
#endif
resid = (m->offset + PAGE_SIZE) - foff;
resid = IDX_TO_OFF(m->pindex + 1) - foff;
if (resid > iosize)
resid = iosize;
/*
@ -1282,7 +1324,8 @@ biodone(register struct buf * bp)
* here in the read case.
*/
if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
vm_page_set_validclean(m, foff & (PAGE_SIZE-1), resid);
vm_page_set_validclean(m,
(vm_offset_t) (foff & (PAGE_SIZE-1)), resid);
}
/*
@ -1292,12 +1335,19 @@ biodone(register struct buf * bp)
*/
if (m->busy == 0) {
printf("biodone: page busy < 0, "
"off: %ld, foff: %ld, "
"pindex: %d, foff: 0x(%x,%x), "
"resid: %d, index: %d\n",
m->offset, foff, resid, i);
printf(" iosize: %ld, lblkno: %ld, flags: 0x%x, npages: %d\n",
bp->b_vp->v_mount->mnt_stat.f_iosize,
bp->b_lblkno, bp->b_flags, bp->b_npages);
(int) m->pindex, (int)(foff >> 32),
(int) foff & 0xffffffff, resid, i);
if (vp->v_type != VBLK)
printf(" iosize: %d, lblkno: %d, flags: 0x%lx, npages: %d\n",
bp->b_vp->v_mount->mnt_stat.f_iosize,
(int) bp->b_lblkno,
bp->b_flags, bp->b_npages);
else
printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
(int) bp->b_lblkno,
bp->b_flags, bp->b_npages);
printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n",
m->valid, m->dirty, m->bmapped);
panic("biodone: page busy < 0\n");
@ -1353,7 +1403,7 @@ vfs_update()
{
(void) spl0(); /* XXX redundant? wrong place? */
while (1) {
tsleep(&vfs_update_wakeup, PRIBIO, "update",
tsleep(&vfs_update_wakeup, PUSER, "update",
hz * vfs_update_interval);
vfs_update_wakeup = 0;
sync(curproc, NULL, NULL);
@ -1387,15 +1437,15 @@ vfs_unbusy_pages(struct buf * bp)
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
vm_object_t obj = vp->v_object;
vm_offset_t foff;
vm_ooffset_t foff;
foff = trunc_page(vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno);
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
if (m == bogus_page) {
m = vm_page_lookup(obj, foff + i * PAGE_SIZE);
m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
if (!m) {
panic("vfs_unbusy_pages: page missing\n");
}
@ -1432,13 +1482,17 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
if (bp->b_flags & B_VMIO) {
vm_object_t obj = bp->b_vp->v_object;
vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
vm_ooffset_t foff;
int iocount = bp->b_bufsize;
if (bp->b_vp->v_type == VBLK)
foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
else
foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
vfs_setdirty(bp);
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
int resid = (m->offset + PAGE_SIZE) - foff;
int resid = IDX_TO_OFF(m->pindex + 1) - foff;
if (resid > iocount)
resid = iocount;
@ -1449,7 +1503,7 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
if (clear_modify) {
vm_page_protect(m, VM_PROT_READ);
vm_page_set_validclean(m,
foff & (PAGE_SIZE-1), resid);
(vm_offset_t) (foff & (PAGE_SIZE-1)), resid);
} else if (bp->b_bcount >= PAGE_SIZE) {
if (m->valid && (bp->b_flags & B_CACHE) == 0) {
bp->b_pages[i] = bogus_page;
@ -1473,19 +1527,23 @@ vfs_clean_pages(struct buf * bp)
int i;
if (bp->b_flags & B_VMIO) {
vm_offset_t foff =
bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
vm_ooffset_t foff;
int iocount = bp->b_bufsize;
if (bp->b_vp->v_type == VBLK)
foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
else
foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
int resid = (m->offset + PAGE_SIZE) - foff;
int resid = IDX_TO_OFF(m->pindex + 1) - foff;
if (resid > iocount)
resid = iocount;
if (resid > 0) {
vm_page_set_validclean(m,
foff & (PAGE_SIZE-1), resid);
((vm_offset_t) foff & (PAGE_SIZE-1)), resid);
}
foff += resid;
iocount -= resid;
@ -1498,10 +1556,14 @@ vfs_bio_clrbuf(struct buf *bp) {
int i;
if( bp->b_flags & B_VMIO) {
if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
int j;
if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) {
int mask;
mask = 0;
for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
mask |= (1 << (i/DEV_BSIZE));
if( bp->b_pages[0]->valid != mask) {
bzero(bp->b_data, bp->b_bufsize);
}
bp->b_pages[0]->valid = mask;
bp->b_resid = 0;
return;
}
@ -1510,12 +1572,12 @@ vfs_bio_clrbuf(struct buf *bp) {
continue;
if( bp->b_pages[i]->valid == 0) {
if ((bp->b_pages[i]->flags & PG_ZERO) == 0)
bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE);
bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
} else {
int j;
for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
if( (bp->b_pages[i]->valid & (1<<j)) == 0)
bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE);
bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
}
}
bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
@ -1543,7 +1605,7 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
tryagain:
p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS,
p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
VM_ALLOC_NORMAL);
if (!p) {
VM_WAIT;
@ -1551,7 +1613,7 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
}
vm_page_wire(p);
pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p;
bp->b_pages[((caddr_t) pg - bp->b_data) >> PAGE_SHIFT] = p;
PAGE_WAKEUP(p);
bp->b_npages++;
}
@ -1566,8 +1628,9 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
vm_offset_t to = round_page(toa);
for (pg = from; pg < to; pg += PAGE_SIZE) {
p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE];
bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0;
int index = ((caddr_t) pg - bp->b_data) >> PAGE_SHIFT;
p = bp->b_pages[index];
bp->b_pages[index] = 0;
pmap_kremove(pg);
vm_page_free(p);
--bp->b_npages;

View File

@ -33,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
* $Id: vfs_cluster.c,v 1.28 1995/11/20 04:53:45 dyson Exp $
* $Id: vfs_cluster.c,v 1.29 1995/12/07 12:47:03 davidg Exp $
*/
#include <sys/param.h>
@ -134,6 +134,7 @@ cluster_read(vp, filesize, lblkno, size, cred, bpp)
*/
origlblkno = lblkno;
*bpp = bp = getblk(vp, lblkno, size, 0, 0);
seq = ISSEQREAD(vp, lblkno);
/*
* if it is in the cache, then check to see if the reads have been
@ -146,7 +147,8 @@ cluster_read(vp, filesize, lblkno, size, cred, bpp)
vp->v_ralen >>= RA_SHIFTDOWN;
return 0;
} else if( vp->v_maxra > lblkno) {
if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >= (lblkno + vp->v_ralen)) {
if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >=
(lblkno + vp->v_ralen)) {
if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
++vp->v_ralen;
return 0;
@ -190,13 +192,8 @@ cluster_read(vp, filesize, lblkno, size, cred, bpp)
*/
for (i = 0; i < vp->v_ralen; i++) {
rablkno = lblkno + i;
alreadyincore = (int) incore(vp, rablkno);
alreadyincore = (int) gbincore(vp, rablkno);
if (!alreadyincore) {
if (inmem(vp, rablkno)) {
if (vp->v_maxra < rablkno)
vp->v_maxra = rablkno + 1;
continue;
}
if (rablkno < vp->v_maxra) {
vp->v_maxra = rablkno;
vp->v_ralen >>= RA_SHIFTDOWN;
@ -213,7 +210,7 @@ cluster_read(vp, filesize, lblkno, size, cred, bpp)
*/
rbp = NULL;
if (!alreadyincore &&
(rablkno + 1) * size <= filesize &&
((u_quad_t)(rablkno + 1) * size) <= filesize &&
!(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
blkno != -1) {
if (num_ra > vp->v_ralen)
@ -289,8 +286,12 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
panic("cluster_rbuild: size %d != filesize %d\n",
size, vp->v_mount->mnt_stat.f_iosize);
#endif
if (size * (lbn + run) > filesize)
/*
* avoid a division
*/
while ((u_quad_t) size * (lbn + run) > filesize) {
--run;
}
tbp = getblk(vp, lbn, size, 0, 0);
if (tbp->b_flags & B_CACHE)
@ -321,11 +322,13 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
inc = btodb(size);
for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
if (i != 0) {
if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
if ((bp->b_npages * PAGE_SIZE) +
round_page(size) > MAXPHYS)
break;
if (incore(vp, lbn + i))
if (gbincore(vp, lbn + i))
break;
tbp = getblk(vp, lbn + i, size, 0, 0);
if ((tbp->b_flags & B_CACHE) ||
@ -350,7 +353,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run)
}
tbp->b_flags |= B_READ | B_ASYNC;
if( tbp->b_blkno == tbp->b_lblkno) {
if (tbp->b_blkno == tbp->b_lblkno) {
tbp->b_blkno = bn;
} else if (tbp->b_blkno != bn) {
brelse(tbp);
@ -462,7 +465,7 @@ cluster_write(bp, filesize)
*/
cursize = vp->v_lastw - vp->v_cstart + 1;
#if 1
if ((lbn + 1) * lblocksize != filesize ||
if (((u_quad_t)(lbn + 1) * lblocksize) != filesize ||
lbn != vp->v_lastw + 1 ||
vp->v_clen <= cursize) {
if (!async)
@ -513,7 +516,7 @@ cluster_write(bp, filesize)
* cluster as large as possible, otherwise find size of
* existing cluster.
*/
if ((lbn + 1) * lblocksize != filesize &&
if (((u_quad_t) (lbn + 1) * lblocksize) != filesize &&
(bp->b_blkno == bp->b_lblkno) &&
(VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
bp->b_blkno == -1)) {
@ -527,7 +530,10 @@ cluster_write(bp, filesize)
vp->v_clen = maxclen;
if (!async && maxclen == 0) { /* I/O not contiguous */
vp->v_cstart = lbn + 1;
bawrite(bp);
if (!async)
bawrite(bp);
else
bdwrite(bp);
} else { /* Wait for rest of cluster */
vp->v_cstart = lbn;
bdwrite(bp);
@ -557,7 +563,7 @@ cluster_write(bp, filesize)
* performed. Check to see that it doesn't fall in the middle of
* the current block (if last_bp == NULL).
*/
void
int
cluster_wbuild(vp, size, start_lbn, len)
struct vnode *vp;
long size;
@ -566,26 +572,22 @@ cluster_wbuild(vp, size, start_lbn, len)
{
struct buf *bp, *tbp;
int i, j, s;
int totalwritten = 0;
int dbsize = btodb(size);
int origlen = len;
while (len > 0) {
s = splbio();
if ( ((tbp = gbincore(vp, start_lbn)) == NULL) ||
((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) {
++start_lbn;
--len;
splx(s);
continue;
}
bremfree(tbp);
tbp->b_flags |= B_BUSY;
tbp->b_flags &= ~B_DONE;
splx(s);
redo:
if (len == 0)
return;
if ( ((tbp = incore(vp, start_lbn)) == NULL) ||
((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) {
++start_lbn;
--len;
goto redo;
}
tbp = getblk(vp, start_lbn, size, 0, 0);
if ((tbp->b_flags & B_DELWRI) == 0) {
++start_lbn;
--len;
brelse(tbp);
goto redo;
}
/*
* Extra memory in the buffer, punt on this buffer. XXX we could
* handle this in most cases, but we would have to push the extra
@ -593,88 +595,93 @@ cluster_wbuild(vp, size, start_lbn, len)
* potentially pull it back up if the cluster was terminated
* prematurely--too much hassle.
*/
if (((tbp->b_flags & (B_VMIO|B_CLUSTEROK)) != (B_VMIO|B_CLUSTEROK)) ||
(tbp->b_bcount != tbp->b_bufsize) ||
len == 1) {
bawrite(tbp);
++start_lbn;
--len;
goto redo;
}
if (((tbp->b_flags & B_CLUSTEROK) != B_CLUSTEROK) ||
(tbp->b_bcount != tbp->b_bufsize) ||
(tbp->b_bcount != size) ||
len == 1) {
totalwritten += tbp->b_bufsize;
bawrite(tbp);
++start_lbn;
--len;
continue;
}
bp = trypbuf();
if (bp == NULL) {
bawrite(tbp);
++start_lbn;
--len;
goto redo;
}
bp = trypbuf();
if (bp == NULL) {
totalwritten += tbp->b_bufsize;
bawrite(tbp);
++start_lbn;
--len;
continue;
}
TAILQ_INIT(&bp->b_cluster.cluster_head);
bp->b_bcount = 0;
bp->b_bufsize = 0;
bp->b_npages = 0;
TAILQ_INIT(&bp->b_cluster.cluster_head);
bp->b_bcount = 0;
bp->b_bufsize = 0;
bp->b_npages = 0;
bp->b_blkno = tbp->b_blkno;
bp->b_lblkno = tbp->b_lblkno;
(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
bp->b_iodone = cluster_callback;
pbgetvp(vp, bp);
bp->b_blkno = tbp->b_blkno;
bp->b_lblkno = tbp->b_lblkno;
(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER | (tbp->b_flags & B_VMIO);
bp->b_iodone = cluster_callback;
pbgetvp(vp, bp);
for (i = 0; i < len; ++i, ++start_lbn) {
if (i != 0) {
for (i = 0; i < len; ++i, ++start_lbn) {
if (i != 0) {
s = splbio();
if ((tbp = gbincore(vp, start_lbn)) == NULL) {
splx(s);
break;
}
if ((tbp->b_flags & (B_VMIO|B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI)) != (B_DELWRI|B_CLUSTEROK|(bp->b_flags & B_VMIO))) {
splx(s);
break;
}
if ((tbp->b_bcount != size) ||
((bp->b_blkno + dbsize * i) != tbp->b_blkno) ||
((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))) {
splx(s);
break;
}
bremfree(tbp);
tbp->b_flags |= B_BUSY;
tbp->b_flags &= ~B_DONE;
splx(s);
}
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
++m->busy;
++m->object->paging_in_progress;
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages - 1] != m)) {
bp->b_pages[bp->b_npages] = m;
bp->b_npages++;
}
}
bp->b_bcount += size;
bp->b_bufsize += size;
tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
tbp->b_flags |= B_ASYNC;
s = splbio();
if ((tbp = incore(vp, start_lbn)) == NULL) {
splx(s);
break;
}
if ((tbp->b_flags & (B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI)) != (B_DELWRI|B_CLUSTEROK)) {
splx(s);
break;
}
if ((tbp->b_bcount != size) ||
((bp->b_blkno + dbsize * i) != tbp->b_blkno) ||
((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))) {
splx(s);
break;
}
bremfree(tbp);
tbp->b_flags |= B_BUSY;
tbp->b_flags &= ~B_DONE;
reassignbuf(tbp, tbp->b_vp); /* put on clean list */
++tbp->b_vp->v_numoutput;
splx(s);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
}
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
++m->busy;
++m->object->paging_in_progress;
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages - 1] != m)) {
bp->b_pages[bp->b_npages] = m;
bp->b_npages++;
}
}
bp->b_bcount += size;
bp->b_bufsize += size;
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *) bp->b_pages, bp->b_npages);
totalwritten += bp->b_bufsize;
bawrite(bp);
tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
tbp->b_flags |= B_ASYNC;
s = splbio();
reassignbuf(tbp, tbp->b_vp); /* put on clean list */
++tbp->b_vp->v_numoutput;
splx(s);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
len -= i;
}
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *) bp->b_pages, bp->b_npages);
bawrite(bp);
len -= i;
goto redo;
return totalwritten;
}
#if 0

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
* $Id: vfs_subr.c,v 1.46 1995/12/06 13:27:39 phk Exp $
* $Id: vfs_subr.c,v 1.47 1995/12/07 12:47:04 davidg Exp $
*/
/*
@ -1332,7 +1332,6 @@ sysctl_vnode SYSCTL_HANDLER_ARGS
SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
0, 0, sysctl_vnode, "S,vnode", "");
/*
* Check to see if a filesystem is mounted on a block device.
*/
@ -1521,14 +1520,13 @@ vfs_export_lookup(mp, nep, nam)
*/
void
vfs_msync(struct mount *mp, int flags) {
struct vnode *vp;
struct vnode *vp, *nvp;
loop:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
if (vp->v_mount != mp)
goto loop;
nvp = vp->v_mntvnodes.le_next;
if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
continue;
if (vp->v_object &&

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
* $Id: vfs_syscalls.c,v 1.41 1995/11/18 11:35:05 bde Exp $
* $Id: vfs_syscalls.c,v 1.42 1995/12/07 12:47:06 davidg Exp $
*/
#include <sys/param.h>
@ -682,6 +682,7 @@ open(p, uap, retval)
}
p->p_dupfd = 0;
vp = nd.ni_vp;
fp->f_flag = flags & FMASK;
fp->f_type = DTYPE_VNODE;
fp->f_ops = &vnops;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
* $Id: vfs_subr.c,v 1.46 1995/12/06 13:27:39 phk Exp $
* $Id: vfs_subr.c,v 1.47 1995/12/07 12:47:04 davidg Exp $
*/
/*
@ -1332,7 +1332,6 @@ sysctl_vnode SYSCTL_HANDLER_ARGS
SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
0, 0, sysctl_vnode, "S,vnode", "");
/*
* Check to see if a filesystem is mounted on a block device.
*/
@ -1521,14 +1520,13 @@ vfs_export_lookup(mp, nep, nam)
*/
void
vfs_msync(struct mount *mp, int flags) {
struct vnode *vp;
struct vnode *vp, *nvp;
loop:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
if (vp->v_mount != mp)
goto loop;
nvp = vp->v_mntvnodes.le_next;
if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
continue;
if (vp->v_object &&

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
* $Id: vfs_syscalls.c,v 1.41 1995/11/18 11:35:05 bde Exp $
* $Id: vfs_syscalls.c,v 1.42 1995/12/07 12:47:06 davidg Exp $
*/
#include <sys/param.h>
@ -682,6 +682,7 @@ open(p, uap, retval)
}
p->p_dupfd = 0;
vp = nd.ni_vp;
fp->f_flag = flags & FMASK;
fp->f_type = DTYPE_VNODE;
fp->f_ops = &vnops;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
* $Id: vfs_vnops.c,v 1.19 1995/10/22 09:32:29 davidg Exp $
* $Id: vfs_vnops.c,v 1.20 1995/12/07 12:47:07 davidg Exp $
*/
#include <sys/param.h>
@ -156,26 +156,8 @@ vn_open(ndp, fmode, cmode)
* this is here for VMIO support
*/
if (vp->v_type == VREG) {
retry:
if ((vp->v_flag & VVMIO) == 0) {
error = VOP_GETATTR(vp, vap, cred, p);
if (error)
goto bad;
(void) vnode_pager_alloc(vp, vap->va_size, 0, 0);
vp->v_flag |= VVMIO;
} else {
vm_object_t object;
if ((object = vp->v_object) &&
(object->flags & OBJ_DEAD)) {
VOP_UNLOCK(vp);
tsleep(object, PVM, "vodead", 0);
VOP_LOCK(vp);
goto retry;
}
if (!object)
panic("vn_open: VMIO object missing");
vm_object_reference(object);
}
if ((error = vn_vmio_open(vp, p, cred)) != 0)
goto bad;
}
if (fmode & FWRITE)
vp->v_writecount++;
@ -220,17 +202,7 @@ vn_close(vp, flags, cred, p)
if (flags & FWRITE)
vp->v_writecount--;
error = VOP_CLOSE(vp, flags, cred, p);
/*
* this code is here for VMIO support, will eventually
* be in vfs code.
*/
if (vp->v_flag & VVMIO) {
vrele(vp);
if (vp->v_object == NULL)
panic("vn_close: VMIO object missing");
vm_object_deallocate(vp->v_object);
} else
vrele(vp);
vn_vmio_close(vp);
return (error);
}
@ -481,3 +453,55 @@ vn_closefile(fp, p)
return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
fp->f_cred, p));
}
int
vn_vmio_open(vp, p, cred)
struct vnode *vp;
struct proc *p;
struct ucred *cred;
{
struct vattr vat;
int error;
/*
* this is here for VMIO support
*/
if (vp->v_type == VREG || vp->v_type == VBLK) {
retry:
if ((vp->v_flag & VVMIO) == 0) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
return error;
(void) vnode_pager_alloc(vp, vat.va_size, 0, 0);
vp->v_flag |= VVMIO;
} else {
vm_object_t object;
if ((object = vp->v_object) &&
(object->flags & OBJ_DEAD)) {
VOP_UNLOCK(vp);
tsleep(object, PVM, "vodead", 0);
VOP_LOCK(vp);
goto retry;
}
if (!object)
panic("vn_open: VMIO object missing");
vm_object_reference(object);
}
}
return 0;
}
void
vn_vmio_close(vp)
struct vnode *vp;
{
/*
* this code is here for VMIO support, will eventually
* be in vfs code.
*/
if (vp->v_flag & VVMIO) {
vrele(vp);
if (vp->v_object == NULL)
panic("vn_close: VMIO object missing");
vm_object_deallocate(vp->v_object);
} else
vrele(vp);
}

View File

@ -31,7 +31,7 @@
# SUCH DAMAGE.
#
# @(#)vnode_if.src 8.3 (Berkeley) 2/3/94
# $Id: vnode_if.src,v 1.7 1995/09/04 00:20:19 dyson Exp $
# $Id: vnode_if.src,v 1.8 1995/10/23 02:55:55 dyson Exp $
#
vop_lookup {
IN struct vnode *dvp;
@ -300,7 +300,7 @@ vop_getpages {
IN vm_page_t *m;
IN int count;
IN int reqpage;
IN vm_offset_t offset;
IN vm_ooffset_t offset;
};
vop_putpages {
@ -309,7 +309,7 @@ vop_putpages {
IN int count;
IN int sync;
IN int *rtvals;
IN vm_offset_t offset;
IN vm_ooffset_t offset;
};
# Needs work: no vp?

View File

@ -37,7 +37,7 @@
*
* @(#)procfs_mem.c 8.4 (Berkeley) 1/21/94
*
* $Id: procfs_mem.c,v 1.11 1995/12/03 14:54:35 bde Exp $
* $Id: procfs_mem.c,v 1.12 1995/12/07 12:47:15 davidg Exp $
*/
/*
@ -170,8 +170,8 @@ procfs_rwmem(p, uio)
/* Find space in kernel_map for the page we're interested in */
if (!error)
error = vm_map_find(kernel_map, object, off, &kva,
PAGE_SIZE, 1);
error = vm_map_find(kernel_map, object,
IDX_TO_OFF(off), &kva, PAGE_SIZE, 1);
if (!error) {
/*

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.6 (Berkeley) 4/9/94
* $Id: spec_vnops.c,v 1.21 1995/12/07 12:47:17 davidg Exp $
* $Id: spec_vnops.c,v 1.22 1995/12/08 11:17:52 julian Exp $
*/
#include <sys/param.h>
@ -365,7 +365,8 @@ spec_write(ap)
}
error = uiomove((char *)bp->b_data + on, n, uio);
if (n + on == bsize) {
bawrite(bp);
/* bawrite(bp); */
cluster_write(bp, 0);
} else
bdwrite(bp);
} while (error == 0 && uio->uio_resid > 0 && n != 0);
@ -750,7 +751,7 @@ spec_getpages(ap)
/*
* Calculate the size of the transfer.
*/
blkno = (ap->a_m[0]->offset + ap->a_offset) / DEV_BSIZE;
blkno = (IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset) / DEV_BSIZE;
/*
* Round up physical size for real devices.
@ -839,3 +840,33 @@ spec_getpages(ap)
printf("spec_getpages: I/O read error\n");
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
}
/* ARGSUSED */
int
spec_getattr(ap)
struct vop_getattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct proc *a_p;
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
register struct vattr *vap = ap->a_vap;
struct partinfo dpart;
bzero(vap, sizeof (*vap));
if (vp->v_type == VBLK)
vap->va_blocksize = BLKDEV_IOSIZE;
else if (vp->v_type == VCHR)
vap->va_blocksize = MAXBSIZE;
if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
(caddr_t)&dpart, FREAD, ap->a_p) == 0) {
vap->va_bytes = (u_quad_t) dpart.disklab->d_partitions[minor(vp->v_rdev)].p_size * DEV_BSIZE;
vap->va_size = vap->va_bytes;
}
return (0);
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)specdev.h 8.2 (Berkeley) 2/2/94
* $Id: specdev.h,v 1.5 1995/11/09 08:16:12 bde Exp $
* $Id: specdev.h,v 1.6 1995/11/21 12:54:02 bde Exp $
*/
/*
@ -89,7 +89,10 @@ int spec_lookup __P((struct vop_lookup_args *));
int spec_open __P((struct vop_open_args *));
int spec_close __P((struct vop_close_args *));
#define spec_access ((int (*) __P((struct vop_access_args *)))spec_ebadf)
/*
#define spec_getattr ((int (*) __P((struct vop_getattr_args *)))spec_ebadf)
*/
int spec_getattr __P((struct vop_getattr_args *));
#define spec_setattr ((int (*) __P((struct vop_setattr_args *)))spec_ebadf)
int spec_read __P((struct vop_read_args *));
int spec_write __P((struct vop_write_args *));

View File

@ -14,7 +14,7 @@
*
* Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992
*
* $Id: sd.c,v 1.78 1995/12/09 20:42:35 phk Exp $
* $Id: sd.c,v 1.79 1995/12/10 01:47:33 bde Exp $
*/
#define SPLSD splbio
@ -445,7 +445,12 @@ sd_strategy(struct buf *bp, struct scsi_link *sc_link)
/*
* Place it in the queue of disk activities for this disk
*/
#define SDDISKSORT
#ifdef SDDISKSORT
tqdisksort(&sd->buf_queue, bp);
#else
TAILQ_INSERT_TAIL(&sd->buf_queue, bp, b_act);
#endif
/*
* Tell the device to get going on the transfer if it's

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.7 (Berkeley) 1/21/94
* $Id: buf.h,v 1.23 1995/11/19 22:22:03 dyson Exp $
* $Id: buf.h,v 1.24 1995/11/20 12:35:16 phk Exp $
*/
#ifndef _SYS_BUF_H_
@ -69,8 +69,9 @@ struct buf {
struct buf *b_actf, **b_actb; /* Device driver queue when active. *depricated* XXX */
TAILQ_ENTRY(buf) b_act; /* Device driver queue when active. *new* */
struct proc *b_proc; /* Associated proc; NULL if kernel. */
volatile long b_flags; /* B_* flags. */
int b_qindex; /* buffer queue index */
long b_flags; /* B_* flags. */
unsigned short b_qindex; /* buffer queue index */
unsigned char b_usecount; /* buffer use count */
int b_error; /* Errno value. */
long b_bufsize; /* Allocated buffer size. */
long b_bcount; /* Valid bytes in buffer. */
@ -206,9 +207,10 @@ int bwrite __P((struct buf *));
void bdwrite __P((struct buf *));
void bawrite __P((struct buf *));
void brelse __P((struct buf *));
void vfs_bio_awrite __P((struct buf *));
int vfs_bio_awrite __P((struct buf *));
struct buf * getpbuf __P((void));
struct buf *incore __P((struct vnode *, daddr_t));
struct buf *gbincore __P((struct vnode *, daddr_t));
int inmem __P((struct vnode *, daddr_t));
struct buf *getblk __P((struct vnode *, daddr_t, int, int, int));
struct buf *geteblk __P((int));
@ -219,7 +221,7 @@ void biodone __P((struct buf *));
void cluster_callback __P((struct buf *));
int cluster_read __P((struct vnode *, u_quad_t, daddr_t, long,
struct ucred *, struct buf **));
void cluster_wbuild __P((struct vnode *, long, daddr_t, int));
int cluster_wbuild __P((struct vnode *, long, daddr_t, int));
void cluster_write __P((struct buf *, u_quad_t));
int physio __P((void (*)(struct buf *), struct buf *, dev_t,
int, u_int (*)(struct buf *), struct uio *));

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.7 (Berkeley) 1/21/94
* $Id: buf.h,v 1.23 1995/11/19 22:22:03 dyson Exp $
* $Id: buf.h,v 1.24 1995/11/20 12:35:16 phk Exp $
*/
#ifndef _SYS_BUF_H_
@ -69,8 +69,9 @@ struct buf {
struct buf *b_actf, **b_actb; /* Device driver queue when active. *depricated* XXX */
TAILQ_ENTRY(buf) b_act; /* Device driver queue when active. *new* */
struct proc *b_proc; /* Associated proc; NULL if kernel. */
volatile long b_flags; /* B_* flags. */
int b_qindex; /* buffer queue index */
long b_flags; /* B_* flags. */
unsigned short b_qindex; /* buffer queue index */
unsigned char b_usecount; /* buffer use count */
int b_error; /* Errno value. */
long b_bufsize; /* Allocated buffer size. */
long b_bcount; /* Valid bytes in buffer. */
@ -206,9 +207,10 @@ int bwrite __P((struct buf *));
void bdwrite __P((struct buf *));
void bawrite __P((struct buf *));
void brelse __P((struct buf *));
void vfs_bio_awrite __P((struct buf *));
int vfs_bio_awrite __P((struct buf *));
struct buf * getpbuf __P((void));
struct buf *incore __P((struct vnode *, daddr_t));
struct buf *gbincore __P((struct vnode *, daddr_t));
int inmem __P((struct vnode *, daddr_t));
struct buf *getblk __P((struct vnode *, daddr_t, int, int, int));
struct buf *geteblk __P((int));
@ -219,7 +221,7 @@ void biodone __P((struct buf *));
void cluster_callback __P((struct buf *));
int cluster_read __P((struct vnode *, u_quad_t, daddr_t, long,
struct ucred *, struct buf **));
void cluster_wbuild __P((struct vnode *, long, daddr_t, int));
int cluster_wbuild __P((struct vnode *, long, daddr_t, int));
void cluster_write __P((struct buf *, u_quad_t));
int physio __P((void (*)(struct buf *), struct buf *, dev_t,
int, u_int (*)(struct buf *), struct uio *));

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
* $Id: vnode.h,v 1.23 1995/11/08 04:51:15 dyson Exp $
* $Id: vnode.h,v 1.24 1995/11/09 08:17:13 bde Exp $
*/
#ifndef _SYS_VNODE_H_
@ -421,6 +421,8 @@ int vn_read __P((struct file *fp, struct uio *uio, struct ucred *cred));
int vn_select __P((struct file *fp, int which, struct proc *p));
int vn_stat __P((struct vnode *vp, struct stat *sb, struct proc *p));
int vn_write __P((struct file *fp, struct uio *uio, struct ucred *cred));
int vn_vmio_open __P((struct vnode *vp, struct proc *p, struct ucred *cred));
void vn_vmio_close __P((struct vnode *vp));
struct vnode *
checkalias __P((struct vnode *vp, dev_t nvp_rdev, struct mount *mp));
void vprint __P((char *, struct vnode *));

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93
* $Id: ffs_inode.c,v 1.16 1995/11/05 21:01:15 dyson Exp $
* $Id: ffs_inode.c,v 1.17 1995/12/07 12:47:50 davidg Exp $
*/
#include <sys/param.h>
@ -143,6 +143,7 @@ ffs_update(ap)
if (ap->a_waitfor && (ap->a_vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
return (bwrite(bp));
else {
bp->b_flags |= B_CLUSTEROK;
bdwrite(bp);
return (0);
}
@ -228,7 +229,7 @@ ffs_truncate(ap)
bdwrite(bp);
else
bawrite(bp);
vnode_pager_setsize(ovp, (u_long)length);
vnode_pager_setsize(ovp, length);
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (VOP_UPDATE(ovp, &tv, &tv, 1));
}
@ -287,7 +288,7 @@ ffs_truncate(ap)
for (i = NDADDR - 1; i > lastblock; i--)
oip->i_db[i] = 0;
oip->i_flag |= IN_CHANGE | IN_UPDATE;
error = VOP_UPDATE(ovp, &tv, &tv, 1);
error = VOP_UPDATE(ovp, &tv, &tv, 0);
if (error)
allerror = error;
/*
@ -391,7 +392,7 @@ ffs_truncate(ap)
if (oip->i_blocks < 0) /* sanity */
oip->i_blocks = 0;
oip->i_flag |= IN_CHANGE;
vnode_pager_setsize(ovp, (u_long)length);
vnode_pager_setsize(ovp, length);
#ifdef QUOTA
(void) chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
@ -470,7 +471,12 @@ ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
(u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
if (last == -1)
bp->b_flags |= B_INVAL;
error = bwrite(bp);
if ((vp->v_mount->mnt_flag & MNT_ASYNC) == 0) {
error = bwrite(bp);
} else {
bawrite(bp);
error = 0;
}
if (error)
allerror = error;
bap = copy;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_vfsops.c 8.8 (Berkeley) 4/18/94
* $Id: ffs_vfsops.c,v 1.29 1995/11/20 12:25:37 phk Exp $
* $Id: ffs_vfsops.c,v 1.30 1995/12/07 12:47:51 davidg Exp $
*/
#include <sys/param.h>
@ -69,7 +69,6 @@
int ffs_sbupdate __P((struct ufsmount *, int));
int ffs_reload __P((struct mount *,struct ucred *,struct proc *));
int ffs_oldfscompat __P((struct fs *));
void ffs_vmlimits __P((struct fs *));
struct vfsops ufs_vfsops = {
ffs_mount,
@ -392,7 +391,6 @@ ffs_reload(mp, cred, p)
bp->b_flags |= B_INVAL;
brelse(bp);
ffs_oldfscompat(fs);
ffs_vmlimits(fs);
/*
* Step 3: re-read summary information from disk.
*/
@ -562,7 +560,6 @@ ffs_mountfs(devvp, mp, p)
ump->um_quotas[i] = NULLVP;
devvp->v_specflags |= SI_MOUNTEDON;
ffs_oldfscompat(fs);
ffs_vmlimits(fs);
/*
* Set FS local "last mounted on" information (NULL pad)
@ -614,32 +611,19 @@ ffs_oldfscompat(fs)
#if 0
int i; /* XXX */
quad_t sizepb = fs->fs_bsize; /* XXX */
/* XXX */
fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
for (i = 0; i < NIADDR; i++) { /* XXX */
sizepb *= NINDIR(fs); /* XXX */
fs->fs_maxfilesize += sizepb; /* XXX */
} /* XXX */
#endif
fs->fs_maxfilesize = (u_quad_t) 1 << 39;
fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
} /* XXX */
return (0);
}
/*
* Sanity check for VM file size limits -- temporary until
* VM system can support > 32bit offsets
*/
void
ffs_vmlimits(fs)
struct fs *fs;
{
if( fs->fs_maxfilesize > (((u_quad_t) 1 << 31) - 1))
fs->fs_maxfilesize = ((u_quad_t) 1 << 31) - 1;
}
/*
* unmount system call
*/
@ -670,7 +654,10 @@ ffs_unmount(mp, mntflags, p)
ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
error = VOP_CLOSE(ump->um_devvp, ronly ? FREAD : FREAD|FWRITE,
NOCRED, p);
/*
vrele(ump->um_devvp);
*/
vn_vmio_close(ump->um_devvp);
free(fs->fs_csp[0], M_UFSMNT);
free(fs, M_UFSMNT);
free(ump, M_UFSMNT);
@ -764,7 +751,7 @@ ffs_sync(mp, waitfor, cred, p)
struct ucred *cred;
struct proc *p;
{
register struct vnode *vp;
register struct vnode *vp, *nvp;
register struct inode *ip;
register struct ufsmount *ump = VFSTOUFS(mp);
register struct fs *fs;
@ -790,15 +777,14 @@ ffs_sync(mp, waitfor, cred, p)
* Write back each (modified) inode.
*/
loop:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
goto loop;
nvp = vp->v_mntvnodes.le_next;
if (VOP_ISLOCKED(vp))
continue;
ip = VTOI(vp);
@ -815,7 +801,8 @@ ffs_sync(mp, waitfor, cred, p)
vput(vp);
} else {
tv = time;
VOP_UPDATE(vp, &tv, &tv, waitfor == MNT_WAIT);
/* VOP_UPDATE(vp, &tv, &tv, waitfor == MNT_WAIT); */
VOP_UPDATE(vp, &tv, &tv, 0);
}
}
/*

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.7 (Berkeley) 1/21/94
* $Id: ufs_readwrite.c,v 1.15 1995/11/05 21:01:10 dyson Exp $
* $Id: ufs_readwrite.c,v 1.16 1995/11/20 12:25:23 phk Exp $
*/
#ifdef LFS_READWRITE
@ -241,7 +241,7 @@ WRITE(ap)
xfersize = uio->uio_resid;
if (uio->uio_offset + xfersize > ip->i_size)
vnode_pager_setsize(vp, (u_long)uio->uio_offset + xfersize);
vnode_pager_setsize(vp, uio->uio_offset + xfersize);
#ifdef LFS_READWRITE
(void)lfs_check(vp, lbn);
@ -327,7 +327,7 @@ int
ffs_getpages(ap)
struct vop_getpages_args *ap;
{
vm_offset_t foff, physoffset;
off_t foff, physoffset;
int i, size, bsize;
struct vnode *dp;
int bbackwards, bforwards;
@ -361,7 +361,7 @@ ffs_getpages(ap)
* reqlblkno is the logical block that contains the page
* poff is the index of the page into the logical block
*/
foff = ap->a_m[ap->a_reqpage]->offset + ap->a_offset;
foff = IDX_TO_OFF(ap->a_m[ap->a_reqpage]->pindex) + ap->a_offset;
reqlblkno = foff / bsize;
poff = (foff % bsize) / PAGE_SIZE;
@ -422,11 +422,11 @@ ffs_getpages(ap)
*/
size = pcount * PAGE_SIZE;
if ((ap->a_m[firstpage]->offset + size) >
if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) >
((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size)
size = ((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size - ap->a_m[firstpage]->offset;
size = ((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size - IDX_TO_OFF(ap->a_m[firstpage]->pindex);
physoffset -= ap->a_m[ap->a_reqpage]->offset;
physoffset -= IDX_TO_OFF(ap->a_m[ap->a_reqpage]->pindex);
rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size,
(ap->a_reqpage - firstpage), physoffset);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_vnops.c 8.10 (Berkeley) 4/1/94
* $Id: ufs_vnops.c,v 1.33 1995/11/09 08:14:37 bde Exp $
* $Id: ufs_vnops.c,v 1.34 1995/11/19 19:46:23 dyson Exp $
*/
#include <sys/param.h>
@ -468,10 +468,6 @@ ufs_chmod(vp, mode, cred, p)
ip->i_mode &= ~ALLPERMS;
ip->i_mode |= (mode & ALLPERMS);
ip->i_flag |= IN_CHANGE;
/*
if ((vp->v_flag & VTEXT) && (ip->i_mode & S_ISTXT) == 0)
(void) vnode_pager_uncache(vp);
*/
return (0);
}
@ -1720,9 +1716,9 @@ ufs_unlock(ap)
} */ *ap;
{
register struct inode *ip = VTOI(ap->a_vp);
struct proc *p = curproc;
#ifdef DIAGNOSTIC
struct proc *p = curproc;
if ((ip->i_flag & IN_LOCKED) == 0) {
vprint("ufs_unlock: unlocked inode", ap->a_vp);

View File

@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: default_pager.c,v 1.2 1995/07/13 10:29:34 davidg Exp $
* $Id: default_pager.c,v 1.3 1995/12/07 12:48:00 davidg Exp $
*/
#include <sys/param.h>
@ -67,7 +67,7 @@ default_pager_alloc(handle, size, prot, offset)
void *handle;
register vm_size_t size;
vm_prot_t prot;
vm_offset_t offset;
vm_ooffset_t offset;
{
if (handle != NULL)
panic("default_pager_alloc: handle specified");
@ -127,9 +127,9 @@ default_pager_putpages(object, m, c, sync, rtvals)
}
boolean_t
default_pager_haspage(object, offset, before, after)
default_pager_haspage(object, pindex, before, after)
vm_object_t object;
vm_offset_t offset;
vm_pindex_t pindex;
int *before;
int *after;
{

View File

@ -28,16 +28,16 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
* $Id: default_pager.h,v 1.1 1995/07/13 10:15:03 davidg Exp $
*/
#ifndef _DEFAULT_PAGER_H_
#define _DEFAULT_PAGER_H_ 1
vm_object_t default_pager_alloc __P((void *, vm_size_t, vm_prot_t, vm_offset_t));
vm_object_t default_pager_alloc __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t));
void default_pager_dealloc __P((vm_object_t));
int default_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
int default_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *));
boolean_t default_pager_haspage __P((vm_object_t, vm_offset_t, int *, int *));
boolean_t default_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
#endif /* _DEFAULT_PAGER_H_ */

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
* $Id: device_pager.c,v 1.15 1995/12/03 18:59:55 bde Exp $
* $Id: device_pager.c,v 1.16 1995/12/07 12:48:01 davidg Exp $
*/
#include <sys/param.h>
@ -86,7 +86,7 @@ dev_pager_alloc(handle, size, prot, foff)
void *handle;
vm_size_t size;
vm_prot_t prot;
vm_offset_t foff;
vm_ooffset_t foff;
{
dev_t dev;
d_mmap_t *mapfunc;
@ -138,7 +138,8 @@ dev_pager_alloc(handle, size, prot, foff)
/*
* Allocate object and associate it with the pager.
*/
object = vm_object_allocate(OBJT_DEVICE, foff + size);
object = vm_object_allocate(OBJT_DEVICE,
OFF_TO_IDX(foff + size));
object->handle = handle;
TAILQ_INIT(&object->un_pager.devp.devp_pglist);
TAILQ_INSERT_TAIL(&dev_pager_object_list, object, pager_object_list);
@ -147,8 +148,8 @@ dev_pager_alloc(handle, size, prot, foff)
* Gain a reference to the object.
*/
vm_object_reference(object);
if (foff + size > object->size)
object->size = foff + size;
if (OFF_TO_IDX(foff + size) > object->size)
object->size = OFF_TO_IDX(foff + size);
}
dev_pager_alloc_lock = 0;
@ -181,7 +182,8 @@ dev_pager_getpages(object, m, count, reqpage)
int count;
int reqpage;
{
vm_offset_t offset, paddr;
vm_offset_t offset;
vm_offset_t paddr;
vm_page_t page;
dev_t dev;
int i, s;
@ -189,14 +191,14 @@ dev_pager_getpages(object, m, count, reqpage)
int prot;
dev = (dev_t) (u_long) object->handle;
offset = m[reqpage]->offset + object->paging_offset;
offset = m[reqpage]->pindex + OFF_TO_IDX(object->paging_offset);
prot = PROT_READ; /* XXX should pass in? */
mapfunc = cdevsw[major(dev)].d_mmap;
if (mapfunc == NULL || mapfunc == (d_mmap_t *)nullop)
panic("dev_pager_getpage: no map function");
paddr = pmap_phys_address((*mapfunc) ((dev_t) dev, (int) offset, prot));
paddr = pmap_phys_address((*mapfunc) ((dev_t) dev, (int) offset << PAGE_SHIFT, prot));
#ifdef DIAGNOSTIC
if (paddr == -1)
panic("dev_pager_getpage: map function returns error");
@ -230,9 +232,9 @@ dev_pager_putpages(object, m, count, sync, rtvals)
}
boolean_t
dev_pager_haspage(object, offset, before, after)
dev_pager_haspage(object, pindex, before, after)
vm_object_t object;
vm_offset_t offset;
vm_pindex_t pindex;
int *before;
int *after;
{

View File

@ -36,17 +36,17 @@
* SUCH DAMAGE.
*
* @(#)device_pager.h 8.3 (Berkeley) 12/13/93
* $Id: device_pager.h,v 1.3 1995/01/09 16:05:30 davidg Exp $
* $Id: device_pager.h,v 1.4 1995/07/13 08:48:11 davidg Exp $
*/
#ifndef _DEVICE_PAGER_
#define _DEVICE_PAGER_ 1
void dev_pager_init __P((void));
vm_object_t dev_pager_alloc __P((void *, vm_size_t, vm_prot_t, vm_offset_t));
vm_object_t dev_pager_alloc __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t));
void dev_pager_dealloc __P((vm_object_t));
int dev_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
int dev_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *));
boolean_t dev_pager_haspage __P((vm_object_t, vm_offset_t, int *, int *));
boolean_t dev_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
#endif /* _DEVICE_PAGER_ */

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.52 1995/12/03 12:18:33 bde Exp $
* $Id: swap_pager.c,v 1.53 1995/12/07 12:48:05 davidg Exp $
*/
/*
@ -83,7 +83,7 @@ static int no_swap_space = 1;
struct rlist *swaplist;
int nswaplist;
#define MAX_PAGEOUT_CLUSTER 8
#define MAX_PAGEOUT_CLUSTER 16
TAILQ_HEAD(swpclean, swpagerclean);
@ -124,10 +124,10 @@ static struct pagerlst *swp_qs[] = {
*/
static vm_object_t
swap_pager_alloc __P((void *handle, vm_size_t size,
vm_prot_t prot, vm_offset_t offset));
vm_prot_t prot, vm_ooffset_t offset));
static void swap_pager_dealloc __P((vm_object_t object));
static boolean_t
swap_pager_haspage __P((vm_object_t object, vm_offset_t offset,
swap_pager_haspage __P((vm_object_t object, vm_pindex_t pindex,
int *before, int *after));
static void swap_pager_init __P((void));
struct pagerops swappagerops = {
@ -147,8 +147,8 @@ static __pure int
swap_pager_block_index __P((vm_offset_t offset)) __pure2;
static __pure int
swap_pager_block_offset __P((vm_offset_t offset)) __pure2;
static int *swap_pager_diskaddr __P((vm_object_t object,
vm_offset_t offset, int *valid));
static daddr_t *swap_pager_diskaddr __P((vm_object_t object,
vm_pindex_t pindex, int *valid));
static void swap_pager_finish __P((swp_clean_t spc));
static void swap_pager_freepage __P((vm_page_t m));
static void swap_pager_free_swap __P((vm_object_t object));
@ -157,7 +157,7 @@ static void swap_pager_freeswapspace __P((vm_object_t object,
unsigned int to));
static int swap_pager_getswapspace __P((vm_object_t object,
unsigned int amount,
unsigned int *rtval));
daddr_t *rtval));
static void swap_pager_iodone __P((struct buf *));
static void swap_pager_iodone1 __P((struct buf *bp));
static int swap_pager_ready __P((void));
@ -234,9 +234,7 @@ swap_pager_swp_alloc(object, wait)
int nblocks;
int i, j;
nblocks = (btodb(object->size) + btodb(SWB_NPAGES * PAGE_SIZE) - 1) /
btodb(SWB_NPAGES * PAGE_SIZE);
nblocks = (object->size + SWB_NPAGES - 1) / SWB_NPAGES;
swb = malloc(nblocks * sizeof(*swb), M_VMPGDATA, wait);
if (swb == NULL)
return 1;
@ -272,7 +270,7 @@ swap_pager_alloc(handle, size, prot, offset)
void *handle;
register vm_size_t size;
vm_prot_t prot;
vm_offset_t offset;
vm_ooffset_t offset;
{
vm_object_t object;
@ -292,12 +290,14 @@ swap_pager_alloc(handle, size, prot, offset)
* Probably quite rare, but is yet another reason to just
* rip support of "named anonymous regions" out altogether.
*/
object = vm_object_allocate(OBJT_SWAP, offset + size);
object = vm_object_allocate(OBJT_SWAP,
OFF_TO_IDX(offset+ PAGE_SIZE - 1 + size));
object->handle = handle;
(void) swap_pager_swp_alloc(object, M_WAITOK);
}
} else {
object = vm_object_allocate(OBJT_SWAP, offset + size);
object = vm_object_allocate(OBJT_SWAP,
OFF_TO_IDX(offset + PAGE_SIZE - 1 + size));
(void) swap_pager_swp_alloc(object, M_WAITOK);
}
@ -310,10 +310,10 @@ swap_pager_alloc(handle, size, prot, offset)
* if the block has been written
*/
static inline int *
swap_pager_diskaddr(object, offset, valid)
inline static daddr_t *
swap_pager_diskaddr(object, pindex, valid)
vm_object_t object;
vm_offset_t offset;
vm_pindex_t pindex;
int *valid;
{
register sw_blk_t swb;
@ -321,13 +321,13 @@ swap_pager_diskaddr(object, offset, valid)
if (valid)
*valid = 0;
ix = offset / (SWB_NPAGES * PAGE_SIZE);
ix = pindex / SWB_NPAGES;
if ((ix >= object->un_pager.swp.swp_nblocks) ||
(offset >= object->size)) {
(pindex >= object->size)) {
return (FALSE);
}
swb = &object->un_pager.swp.swp_blocks[ix];
ix = (offset % (SWB_NPAGES * PAGE_SIZE)) / PAGE_SIZE;
ix = pindex % SWB_NPAGES;
if (valid)
*valid = swb->swb_valid & (1 << ix);
return &swb->swb_block[ix];
@ -346,12 +346,12 @@ swap_pager_setvalid(object, offset, valid)
register sw_blk_t swb;
int ix;
ix = offset / (SWB_NPAGES * PAGE_SIZE);
ix = offset / SWB_NPAGES;
if (ix >= object->un_pager.swp.swp_nblocks)
return;
swb = &object->un_pager.swp.swp_blocks[ix];
ix = (offset % (SWB_NPAGES * PAGE_SIZE)) / PAGE_SIZE;
ix = offset % SWB_NPAGES;
if (valid)
swb->swb_valid |= (1 << ix);
else
@ -367,15 +367,17 @@ static int
swap_pager_getswapspace(object, amount, rtval)
vm_object_t object;
unsigned int amount;
unsigned int *rtval;
daddr_t *rtval;
{
unsigned location;
vm_swap_size -= amount;
if (!rlist_alloc(&swaplist, amount, rtval)) {
if (!rlist_alloc(&swaplist, amount, &location)) {
vm_swap_size += amount;
return 0;
} else {
swapsizecheck();
object->un_pager.swp.swp_allocsize += amount;
*rtval = location;
return 1;
}
}
@ -401,16 +403,16 @@ swap_pager_freeswapspace(object, from, to)
void
swap_pager_freespace(object, start, size)
vm_object_t object;
vm_offset_t start;
vm_offset_t size;
vm_pindex_t start;
vm_size_t size;
{
vm_offset_t i;
vm_pindex_t i;
int s;
s = splbio();
for (i = start; i < round_page(start + size); i += PAGE_SIZE) {
for (i = start; i < start + size; i += 1) {
int valid;
int *addr = swap_pager_diskaddr(object, i, &valid);
daddr_t *addr = swap_pager_diskaddr(object, i, &valid);
if (addr && *addr != SWB_EMPTY) {
swap_pager_freeswapspace(object, *addr, *addr + btodb(PAGE_SIZE) - 1);
@ -522,19 +524,21 @@ swap_pager_reclaim()
* see if any blocks associated with a pager has been
* allocated but not used (written)
*/
for (i = 0; i < object->un_pager.swp.swp_nblocks; i++) {
sw_blk_t swb = &object->un_pager.swp.swp_blocks[i];
if (object->paging_in_progress == 0) {
for (i = 0; i < object->un_pager.swp.swp_nblocks; i++) {
sw_blk_t swb = &object->un_pager.swp.swp_blocks[i];
if (swb->swb_locked)
continue;
for (j = 0; j < SWB_NPAGES; j++) {
if (swb->swb_block[j] != SWB_EMPTY &&
(swb->swb_valid & (1 << j)) == 0) {
reclaims[reclaimcount].address = swb->swb_block[j];
reclaims[reclaimcount++].object = object;
swb->swb_block[j] = SWB_EMPTY;
if (reclaimcount >= MAXRECLAIM)
goto rfinished;
if (swb->swb_locked)
continue;
for (j = 0; j < SWB_NPAGES; j++) {
if (swb->swb_block[j] != SWB_EMPTY &&
(swb->swb_valid & (1 << j)) == 0) {
reclaims[reclaimcount].address = swb->swb_block[j];
reclaims[reclaimcount++].object = object;
swb->swb_block[j] = SWB_EMPTY;
if (reclaimcount >= MAXRECLAIM)
goto rfinished;
}
}
}
}
@ -565,12 +569,12 @@ swap_pager_reclaim()
void
swap_pager_copy(srcobject, srcoffset, dstobject, dstoffset, offset)
vm_object_t srcobject;
vm_offset_t srcoffset;
vm_pindex_t srcoffset;
vm_object_t dstobject;
vm_offset_t dstoffset;
vm_offset_t offset;
vm_pindex_t dstoffset;
vm_pindex_t offset;
{
vm_offset_t i;
vm_pindex_t i;
int origsize;
int s;
@ -603,11 +607,11 @@ swap_pager_copy(srcobject, srcoffset, dstobject, dstoffset, offset)
/*
* transfer source to destination
*/
for (i = 0; i < dstobject->size; i += PAGE_SIZE) {
for (i = 0; i < dstobject->size; i += 1) {
int srcvalid, dstvalid;
int *srcaddrp = swap_pager_diskaddr(srcobject, i + offset + srcoffset,
daddr_t *srcaddrp = swap_pager_diskaddr(srcobject, i + offset + srcoffset,
&srcvalid);
int *dstaddrp;
daddr_t *dstaddrp;
/*
* see if the source has space allocated
@ -715,17 +719,17 @@ swap_pager_dealloc(object)
}
static inline __pure int
swap_pager_block_index(offset)
vm_offset_t offset;
swap_pager_block_index(pindex)
vm_pindex_t pindex;
{
return (offset / (SWB_NPAGES * PAGE_SIZE));
return (pindex / SWB_NPAGES);
}
static inline __pure int
swap_pager_block_offset(offset)
vm_offset_t offset;
swap_pager_block_offset(pindex)
vm_pindex_t pindex;
{
return ((offset % (PAGE_SIZE * SWB_NPAGES)) / PAGE_SIZE);
return (pindex % SWB_NPAGES);
}
/*
@ -733,27 +737,25 @@ swap_pager_block_offset(offset)
* been written out.
*/
static boolean_t
swap_pager_haspage(object, offset, before, after)
swap_pager_haspage(object, pindex, before, after)
vm_object_t object;
vm_offset_t offset;
vm_pindex_t pindex;
int *before;
int *after;
{
register sw_blk_t swb;
int ix;
int gix;
if (before != NULL)
*before = 0;
if (after != NULL)
*after = 0;
ix = offset / (SWB_NPAGES * PAGE_SIZE);
ix = pindex / SWB_NPAGES;
if (ix >= object->un_pager.swp.swp_nblocks) {
return (FALSE);
}
swb = &object->un_pager.swp.swp_blocks[ix];
gix = offset / PAGE_SIZE;
ix = gix % SWB_NPAGES;
ix = pindex % SWB_NPAGES;
if (swb->swb_block[ix] != SWB_EMPTY) {
@ -844,7 +846,7 @@ swap_pager_getpages(object, m, count, reqpage)
boolean_t rv;
vm_offset_t kva, off[count];
swp_clean_t spc;
vm_offset_t paging_offset;
vm_pindex_t paging_offset;
int reqaddr[count];
int sequential;
@ -853,12 +855,12 @@ swap_pager_getpages(object, m, count, reqpage)
int reqdskregion;
object = m[reqpage]->object;
paging_offset = object->paging_offset;
sequential = (m[reqpage]->offset == (object->last_read + PAGE_SIZE));
paging_offset = OFF_TO_IDX(object->paging_offset);
sequential = (m[reqpage]->pindex == (object->last_read + 1));
for (i = 0; i < count; i++) {
vm_offset_t foff = m[i]->offset + paging_offset;
int ix = swap_pager_block_index(foff);
vm_pindex_t fidx = m[i]->pindex + paging_offset;
int ix = swap_pager_block_index(fidx);
if (ix >= object->un_pager.swp.swp_nblocks) {
int j;
@ -874,7 +876,7 @@ swap_pager_getpages(object, m, count, reqpage)
break;
}
swb[i] = &object->un_pager.swp.swp_blocks[ix];
off[i] = swap_pager_block_offset(foff);
off[i] = swap_pager_block_offset(fidx);
reqaddr[i] = swb[i]->swb_block[off[i]];
}
@ -950,32 +952,8 @@ swap_pager_getpages(object, m, count, reqpage)
swap_pager_ridpages(m, count, reqpage);
m[0] = m[reqpage];
reqaddr[0] = reqaddr[reqpage];
count = 1;
reqpage = 0;
/*
* get a swap pager clean data structure, block until we get
* it
*/
if (swap_pager_free.tqh_first == NULL) {
s = splbio();
if (curproc == pageproc)
swap_pager_sync();
else
pagedaemon_wakeup();
while (swap_pager_free.tqh_first == NULL) {
swap_pager_needflags |= SWAP_FREE_NEEDED;
if (curproc == pageproc)
swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT;
tsleep(&swap_pager_free,
PVM, "swpfre", 0);
if (curproc == pageproc)
swap_pager_sync();
else
pagedaemon_wakeup();
}
splx(s);
}
spc = swap_pager_free.tqh_first;
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
kva = spc->spc_kva;
@ -1047,7 +1025,7 @@ swap_pager_getpages(object, m, count, reqpage)
pmap_qremove(kva, count);
if (spc) {
m[reqpage]->object->last_read = m[reqpage]->offset;
m[reqpage]->object->last_read = m[reqpage]->pindex;
if (bp->b_flags & B_WANTED)
wakeup(bp);
/*
@ -1099,7 +1077,7 @@ swap_pager_getpages(object, m, count, reqpage)
}
}
m[reqpage]->object->last_read = m[count-1]->offset;
m[reqpage]->object->last_read = m[count-1]->pindex;
/*
* If we're out of swap space, then attempt to free
@ -1111,7 +1089,7 @@ swap_pager_getpages(object, m, count, reqpage)
for (i = 0; i < count; i++) {
m[i]->dirty = VM_PAGE_BITS_ALL;
}
swap_pager_freespace(object, m[0]->offset + paging_offset, count * PAGE_SIZE);
swap_pager_freespace(object, m[0]->pindex + paging_offset, count);
}
} else {
swap_pager_ridpages(m, count, reqpage);
@ -1138,9 +1116,9 @@ swap_pager_putpages(object, m, count, sync, rtvals)
register int s;
int i, j, ix;
boolean_t rv;
vm_offset_t kva, off, foff;
vm_offset_t kva, off, fidx;
swp_clean_t spc;
vm_offset_t paging_offset;
vm_pindex_t paging_pindex;
int reqaddr[count];
int failed;
@ -1154,12 +1132,12 @@ swap_pager_putpages(object, m, count, sync, rtvals)
spc = NULL;
object = m[0]->object;
paging_offset = object->paging_offset;
paging_pindex = OFF_TO_IDX(object->paging_offset);
failed = 0;
for (j = 0; j < count; j++) {
foff = m[j]->offset + paging_offset;
ix = swap_pager_block_index(foff);
fidx = m[j]->pindex + paging_pindex;
ix = swap_pager_block_index(fidx);
swb[j] = 0;
if (ix >= object->un_pager.swp.swp_nblocks) {
rtvals[j] = VM_PAGER_FAIL;
@ -1174,10 +1152,10 @@ swap_pager_putpages(object, m, count, sync, rtvals)
rtvals[j] = VM_PAGER_FAIL;
continue;
}
off = swap_pager_block_offset(foff);
off = swap_pager_block_offset(fidx);
reqaddr[j] = swb[j]->swb_block[off];
if (reqaddr[j] == SWB_EMPTY) {
int blk;
daddr_t blk;
int tries;
int ntoget;
@ -1199,13 +1177,13 @@ swap_pager_putpages(object, m, count, sync, rtvals)
* intent of this code is to allocate small chunks for
* small objects)
*/
if ((foff == 0) &&
((ntoget * PAGE_SIZE) > object->size)) {
ntoget = (object->size + (PAGE_SIZE - 1)) / PAGE_SIZE;
if ((off == 0) && ((fidx + ntoget) > object->size)) {
ntoget = object->size - fidx;
}
retrygetspace:
if (!swap_pager_full && ntoget > 1 &&
swap_pager_getswapspace(object, ntoget * btodb(PAGE_SIZE), &blk)) {
swap_pager_getswapspace(object, ntoget * btodb(PAGE_SIZE),
&blk)) {
for (i = 0; i < ntoget; i++) {
swb[j]->swb_block[i] = blk + btodb(PAGE_SIZE) * i;
@ -1239,8 +1217,9 @@ swap_pager_putpages(object, m, count, sync, rtvals)
*/
failed = 0;
for (i = 0; i < count; i++) {
if (failed || (reqaddr[i] != reqaddr[0] + i * btodb(PAGE_SIZE)) ||
(reqaddr[i] / dmmax) != (reqaddr[0] / dmmax) ||
if (failed ||
(reqaddr[i] != reqaddr[0] + i * btodb(PAGE_SIZE)) ||
((reqaddr[i] / dmmax) != (reqaddr[0] / dmmax)) ||
(rtvals[i] != VM_PAGER_OK)) {
failed = 1;
if (rtvals[i] == VM_PAGER_OK)
@ -1264,8 +1243,10 @@ swap_pager_putpages(object, m, count, sync, rtvals)
}
count = i;
for (i = 0; i < count; i++) {
if (reqaddr[i] == SWB_EMPTY)
printf("I/O to empty block????\n");
if (reqaddr[i] == SWB_EMPTY) {
printf("I/O to empty block???? -- pindex: %d, i: %d\n",
m[i]->pindex, i);
}
}
/*
@ -1285,7 +1266,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
s = splbio();
if (curproc == pageproc) {
swap_pager_sync();
#if 0
#if 1
splx(s);
return VM_PAGER_AGAIN;
#endif
@ -1323,8 +1304,8 @@ swap_pager_putpages(object, m, count, sync, rtvals)
* get the base I/O offset into the swap file
*/
for (i = 0; i < count; i++) {
foff = m[i]->offset + paging_offset;
off = swap_pager_block_offset(foff);
fidx = m[i]->pindex + paging_pindex;
off = swap_pager_block_offset(fidx);
/*
* set the valid bit
*/

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90
* $Id: swap_pager.h,v 1.10 1995/10/07 19:02:52 davidg Exp $
* $Id: swap_pager.h,v 1.11 1995/11/16 09:51:22 bde Exp $
*/
/*
@ -59,7 +59,7 @@
struct swblock {
unsigned short swb_valid; /* bitmask for valid pages */
unsigned short swb_locked; /* block locked */
int swb_block[SWB_NPAGES]; /* unfortunately int instead of daddr_t */
daddr_t swb_block[SWB_NPAGES]; /* unfortunately int instead of daddr_t */
};
typedef struct swblock *sw_blk_t;
@ -71,8 +71,9 @@ int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
int swap_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *));
void swap_pager_sync __P((void));
int swap_pager_swp_alloc __P((vm_object_t, int));
void swap_pager_copy __P((vm_object_t, vm_offset_t, vm_object_t, vm_offset_t, vm_offset_t));
void swap_pager_freespace __P((vm_object_t, vm_offset_t, vm_offset_t));
void swap_pager_copy __P((vm_object_t, vm_pindex_t, vm_object_t,
vm_pindex_t, vm_pindex_t));
void swap_pager_freespace __P((vm_object_t, vm_pindex_t, vm_size_t));
void swap_pager_swap_init __P((void));
#endif

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
* $Id: vm_extern.h,v 1.19 1995/11/14 09:29:29 phk Exp $
* $Id: vm_extern.h,v 1.20 1995/12/07 12:48:08 davidg Exp $
*/
#ifndef _VM_EXTERN_H_
@ -81,7 +81,7 @@ vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, boolean_t));
void munmapfd __P((struct proc *, int));
int pager_cache __P((vm_object_t, boolean_t));
int swaponvp __P((struct proc *, struct vnode *, dev_t , u_long ));
int swaponvp __P((struct proc *, struct vnode *, dev_t , u_long));
void swapout __P((struct proc *));
void swapout_procs __P((void));
void swstrategy __P((struct buf *));
@ -92,19 +92,20 @@ void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fork __P((struct proc *, struct proc *, int));
void vm_map_print __P((/* db_expr_t */ int, boolean_t, /* db_expr_t */ int, char *));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
void vm_object_print __P((/* db_expr_t */ int, boolean_t, /* db_expr_t */ int, char *));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, caddr_t, vm_ooffset_t));
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_free __P((struct vmspace *));
void vnode_pager_setsize __P((struct vnode *, u_long));
void vnode_pager_setsize __P((struct vnode *, vm_ooffset_t));
void vnode_pager_umount __P((struct mount *));
void vnode_pager_uncache __P((struct vnode *));
void vslock __P((caddr_t, u_int));
void vsunlock __P((caddr_t, u_int, int));
void vm_object_print __P((/* db_expr_t */ int, boolean_t, /* db_expr_t */ int,
char *));
#endif /* KERNEL */

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.37 1995/11/20 12:19:53 phk Exp $
* $Id: vm_fault.c,v 1.38 1995/12/07 12:48:10 davidg Exp $
*/
/*
@ -129,10 +129,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
boolean_t change_wiring;
{
vm_object_t first_object;
vm_offset_t first_offset;
vm_pindex_t first_pindex;
vm_map_entry_t entry;
register vm_object_t object;
register vm_offset_t offset;
register vm_pindex_t pindex;
vm_page_t m;
vm_page_t first_m;
vm_prot_t prot;
@ -192,7 +192,7 @@ RetryFault:;
if ((result = vm_map_lookup(&map, vaddr,
fault_type, &entry, &first_object,
&first_offset, &prot, &wired, &su)) != KERN_SUCCESS) {
&first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
return (result);
}
@ -248,14 +248,14 @@ RetryFault:;
*/
object = first_object;
offset = first_offset;
pindex = first_pindex;
/*
* See whether this page is resident
*/
while (TRUE) {
m = vm_page_lookup(object, offset);
m = vm_page_lookup(object, pindex);
if (m != NULL) {
/*
* If the page is being brought in, wait for it and
@ -297,7 +297,7 @@ RetryFault:;
if (((object->type != OBJT_DEFAULT) && (!change_wiring || wired))
|| (object == first_object)) {
if (offset >= object->size) {
if (pindex >= object->size) {
UNLOCK_AND_DEALLOCATE;
return (KERN_PROTECTION_FAILURE);
}
@ -305,7 +305,7 @@ RetryFault:;
/*
* Allocate a new page for this object/offset pair.
*/
m = vm_page_alloc(object, offset,
m = vm_page_alloc(object, pindex,
vp?VM_ALLOC_NORMAL:(VM_ALLOC_NORMAL|VM_ALLOC_ZERO));
if (m == NULL) {
@ -357,7 +357,7 @@ RetryFault:;
* is responsible for disposition of old page
* if moved.
*/
m = vm_page_lookup(object, offset);
m = vm_page_lookup(object, pindex);
if( !m) {
UNLOCK_AND_DEALLOCATE;
goto RetryFault;
@ -416,7 +416,7 @@ RetryFault:;
* unlocking the current one.
*/
offset += object->backing_object_offset;
pindex += OFF_TO_IDX(object->backing_object_offset);
next_object = object->backing_object;
if (next_object == NULL) {
/*
@ -427,7 +427,7 @@ RetryFault:;
vm_object_pip_wakeup(object);
object = first_object;
offset = first_offset;
pindex = first_pindex;
m = first_m;
}
first_m = NULL;
@ -521,7 +521,7 @@ RetryFault:;
cnt.v_cow_faults++;
m = first_m;
object = first_object;
offset = first_offset;
pindex = first_pindex;
/*
* Now that we've gotten the copy out of the way,
@ -545,7 +545,7 @@ RetryFault:;
if (!lookup_still_valid) {
vm_object_t retry_object;
vm_offset_t retry_offset;
vm_pindex_t retry_pindex;
vm_prot_t retry_prot;
/*
@ -562,7 +562,7 @@ RetryFault:;
* and will merely take another fault.
*/
result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE,
&entry, &retry_object, &retry_offset, &retry_prot, &wired, &su);
&entry, &retry_object, &retry_pindex, &retry_prot, &wired, &su);
/*
* If we don't need the page any longer, put it on the active
@ -578,7 +578,7 @@ RetryFault:;
lookup_still_valid = TRUE;
if ((retry_object != first_object) ||
(retry_offset != first_offset)) {
(retry_pindex != first_pindex)) {
RELEASE_PAGE(m);
UNLOCK_AND_DEALLOCATE;
goto RetryFault;
@ -655,7 +655,7 @@ RetryFault:;
}
if ((m->flags & PG_BUSY) == 0)
printf("page not busy: %d\n", m->offset);
printf("page not busy: %d\n", m->pindex);
/*
* Unlock everything, and return
*/
@ -773,8 +773,8 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
{
vm_object_t dst_object;
vm_object_t src_object;
vm_offset_t dst_offset;
vm_offset_t src_offset;
vm_ooffset_t dst_offset;
vm_ooffset_t src_offset;
vm_prot_t prot;
vm_offset_t vaddr;
vm_page_t dst_m;
@ -792,7 +792,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* actually shadow anything - we copy the pages directly.)
*/
dst_object = vm_object_allocate(OBJT_DEFAULT,
(vm_size_t) (dst_entry->end - dst_entry->start));
(vm_size_t) OFF_TO_IDX(dst_entry->end - dst_entry->start));
dst_entry->object.vm_object = dst_object;
dst_entry->offset = 0;
@ -812,7 +812,8 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* Allocate a page in the destination object
*/
do {
dst_m = vm_page_alloc(dst_object, dst_offset, VM_ALLOC_NORMAL);
dst_m = vm_page_alloc(dst_object,
OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
if (dst_m == NULL) {
VM_WAIT;
}
@ -823,7 +824,8 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* (Because the source is wired down, the page will be in
* memory.)
*/
src_m = vm_page_lookup(src_object, dst_offset + src_offset);
src_m = vm_page_lookup(src_object,
OFF_TO_IDX(dst_offset + src_offset));
if (src_m == NULL)
panic("vm_fault_copy_wired: page missing");
@ -871,20 +873,21 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
{
int i;
vm_object_t object;
vm_offset_t offset, startoffset, endoffset, toffset, size;
vm_pindex_t pindex, startpindex, endpindex, tpindex;
vm_offset_t size;
vm_page_t rtm;
int treqpage;
int cbehind, cahead;
object = m->object;
offset = m->offset;
pindex = m->pindex;
/*
* if the requested page is not available, then give up now
*/
if (!vm_pager_has_page(object,
object->paging_offset + offset, &cbehind, &cahead))
OFF_TO_IDX(object->paging_offset) + pindex, &cbehind, &cahead))
return 0;
if ((cbehind == 0) && (cahead == 0)) {
@ -916,45 +919,45 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
* scan backward for the read behind pages -- in memory or on disk not
* in same object
*/
toffset = offset - PAGE_SIZE;
if (toffset < offset) {
if (rbehind * PAGE_SIZE > offset)
rbehind = offset / PAGE_SIZE;
startoffset = offset - rbehind * PAGE_SIZE;
while (toffset >= startoffset) {
if (vm_page_lookup( object, toffset)) {
startoffset = toffset + PAGE_SIZE;
tpindex = pindex - 1;
if (tpindex < pindex) {
if (rbehind > pindex)
rbehind = pindex;
startpindex = pindex - rbehind;
while (tpindex >= startpindex) {
if (vm_page_lookup( object, tpindex)) {
startpindex = tpindex + 1;
break;
}
if (toffset == 0)
if (tpindex == 0)
break;
toffset -= PAGE_SIZE;
tpindex -= 1;
}
} else {
startoffset = offset;
startpindex = pindex;
}
/*
* scan forward for the read ahead pages -- in memory or on disk not
* in same object
*/
toffset = offset + PAGE_SIZE;
endoffset = offset + (rahead + 1) * PAGE_SIZE;
if (endoffset > object->size)
endoffset = object->size;
while (toffset < endoffset) {
if ( vm_page_lookup(object, toffset)) {
tpindex = pindex + 1;
endpindex = pindex + (rahead + 1);
if (endpindex > object->size)
endpindex = object->size;
while (tpindex < endpindex) {
if ( vm_page_lookup(object, tpindex)) {
break;
}
toffset += PAGE_SIZE;
tpindex += 1;
}
endoffset = toffset;
endpindex = tpindex;
/* calculate number of bytes of pages */
size = (endoffset - startoffset) / PAGE_SIZE;
size = endpindex - startpindex;
/* calculate the page offset of the required page */
treqpage = (offset - startoffset) / PAGE_SIZE;
treqpage = pindex - startpindex;
/* see if we have space (again) */
if ((cnt.v_free_count + cnt.v_cache_count) >
@ -965,7 +968,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
for (i = 0; i < size; i++) {
if (i != treqpage) {
rtm = vm_page_alloc(object,
startoffset + i * PAGE_SIZE,
startpindex + i,
VM_ALLOC_NORMAL);
if (rtm == NULL) {
if (i < treqpage) {

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_init.c,v 1.10 1995/12/02 17:11:20 bde Exp $
* $Id: vm_init.c,v 1.11 1995/12/07 12:48:12 davidg Exp $
*/
/*
@ -113,7 +113,7 @@ vm_mem_init(dummy)
/*
* Initialize other VM packages
*/
vm_object_init(virtual_end - VM_MIN_KERNEL_ADDRESS);
vm_object_init();
vm_map_startup();
kmem_init(virtual_avail, virtual_end);
pmap_init(avail_start, avail_end);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.18 1995/12/07 12:48:13 davidg Exp $
* $Id: vm_kern.c,v 1.19 1995/12/10 14:52:09 bde Exp $
*/
/*
@ -181,7 +181,9 @@ kmem_alloc(map, size)
for (i = 0; i < size; i += PAGE_SIZE) {
vm_page_t mem;
while ((mem = vm_page_alloc(kernel_object, offset + i, (VM_ALLOC_NORMAL|VM_ALLOC_ZERO))) == NULL) {
while ((mem = vm_page_alloc(kernel_object,
OFF_TO_IDX(offset + i),
(VM_ALLOC_NORMAL|VM_ALLOC_ZERO))) == NULL) {
VM_WAIT;
}
if ((mem->flags & PG_ZERO) == 0)
@ -332,7 +334,7 @@ kmem_malloc(map, size, waitflag)
* pulling it off the active queue to prevent pageout.
*/
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_alloc(kmem_object, offset + i,
m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
(waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
/*
@ -343,7 +345,8 @@ kmem_malloc(map, size, waitflag)
if (m == NULL) {
while (i != 0) {
i -= PAGE_SIZE;
m = vm_page_lookup(kmem_object, offset + i);
m = vm_page_lookup(kmem_object,
OFF_TO_IDX(offset + i));
vm_page_free(m);
}
vm_map_delete(map, addr, addr + size);
@ -372,7 +375,7 @@ kmem_malloc(map, size, waitflag)
* splimp...)
*/
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(kmem_object, offset + i);
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
}
vm_map_unlock(map);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.27 1995/11/20 12:19:49 phk Exp $
* $Id: vm_map.c,v 1.28 1995/12/07 12:48:15 davidg Exp $
*/
/*
@ -329,7 +329,7 @@ vm_map_entry_create(map)
vm_page_t m;
m = vm_page_alloc(kmem_object,
mapvm - vm_map_min(kmem_map),
OFF_TO_IDX(mapvm - vm_map_min(kmem_map)),
(map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL);
if (m) {
int newentries;
@ -502,7 +502,7 @@ int
vm_map_insert(map, object, offset, start, end)
vm_map_t map;
vm_object_t object;
vm_offset_t offset;
vm_ooffset_t offset;
vm_offset_t start;
vm_offset_t end;
{
@ -553,9 +553,7 @@ vm_map_insert(map, object, offset, start, end)
(prev_entry->wired_count == 0)) {
if (vm_object_coalesce(prev_entry->object.vm_object,
NULL,
prev_entry->offset,
(vm_offset_t) 0,
OFF_TO_IDX(prev_entry->offset),
(vm_size_t) (prev_entry->end
- prev_entry->start),
(vm_size_t) (end - prev_entry->end))) {
@ -765,7 +763,7 @@ int
vm_map_find(map, object, offset, addr, length, find_space)
vm_map_t map;
vm_object_t object;
vm_offset_t offset;
vm_ooffset_t offset;
vm_offset_t *addr; /* IN/OUT */
vm_size_t length;
boolean_t find_space;
@ -1322,13 +1320,13 @@ vm_map_pageable(map, start, end, new_pageable)
vm_object_shadow(&entry->object.vm_object,
&entry->offset,
(vm_size_t) (entry->end
OFF_TO_IDX(entry->end
- entry->start));
entry->needs_copy = FALSE;
} else if (entry->object.vm_object == NULL) {
entry->object.vm_object =
vm_object_allocate(OBJT_DEFAULT, (vm_size_t) (entry->end
- entry->start));
vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(entry->end - entry->start));
entry->offset = (vm_offset_t) 0;
}
}
@ -1443,7 +1441,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
vm_map_entry_t entry;
vm_size_t size;
vm_object_t object;
vm_offset_t offset;
vm_ooffset_t offset;
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
@ -1501,9 +1499,15 @@ vm_map_clean(map, start, end, syncio, invalidate)
* idea.
*/
if (current->protection & VM_PROT_WRITE)
vm_object_page_clean(object, offset, offset + size, syncio, TRUE);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size),
syncio, TRUE);
if (invalidate)
vm_object_page_remove(object, offset, offset + size, FALSE);
vm_object_page_remove(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size),
FALSE);
}
start += size;
}
@ -1627,12 +1631,12 @@ vm_map_delete(map, start, end)
*/
if (object == kernel_object || object == kmem_object)
vm_object_page_remove(object, entry->offset,
entry->offset + (e - s), FALSE);
vm_object_page_remove(object, OFF_TO_IDX(entry->offset),
OFF_TO_IDX(entry->offset + (e - s)), FALSE);
else if (!map->is_main_map)
vm_object_pmap_remove(object,
entry->offset,
entry->offset + (e - s));
OFF_TO_IDX(entry->offset),
OFF_TO_IDX(entry->offset + (e - s)));
else
pmap_remove(map->pmap, s, e);
@ -1736,6 +1740,8 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
vm_map_t src_map, dst_map;
register vm_map_entry_t src_entry, dst_entry;
{
vm_pindex_t temp_pindex;
if (src_entry->is_sub_map || dst_entry->is_sub_map)
return;
@ -1759,9 +1765,9 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
else
vm_object_pmap_remove(dst_entry->object.vm_object,
dst_entry->offset,
dst_entry->offset +
(dst_entry->end - dst_entry->start));
OFF_TO_IDX(dst_entry->offset),
OFF_TO_IDX(dst_entry->offset +
(dst_entry->end - dst_entry->start)));
if (src_entry->wired_count == 0) {
@ -1789,21 +1795,21 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
src_entry->protection & ~VM_PROT_WRITE);
} else {
vm_object_pmap_copy(src_entry->object.vm_object,
src_entry->offset,
src_entry->offset + (src_entry->end
- src_entry->start));
OFF_TO_IDX(src_entry->offset),
OFF_TO_IDX(src_entry->offset + (src_entry->end
- src_entry->start)));
}
}
/*
* Make a copy of the object.
*/
temp_pindex = OFF_TO_IDX(dst_entry->offset);
vm_object_copy(src_entry->object.vm_object,
src_entry->offset,
(vm_size_t) (src_entry->end -
src_entry->start),
OFF_TO_IDX(src_entry->offset),
&dst_entry->object.vm_object,
&dst_entry->offset,
&temp_pindex,
&src_needs_copy);
dst_entry->offset = IDX_TO_OFF(temp_pindex);
/*
* If we didn't get a copy-object now, mark the source map
* entry so that a shadow will be created to hold its changed
@ -1950,14 +1956,14 @@ vmspace_fork(vm1)
*/
int
vm_map_lookup(var_map, vaddr, fault_type, out_entry,
object, offset, out_prot, wired, single_use)
object, pindex, out_prot, wired, single_use)
vm_map_t *var_map; /* IN/OUT */
register vm_offset_t vaddr;
register vm_prot_t fault_type;
vm_map_entry_t *out_entry; /* OUT */
vm_object_t *object; /* OUT */
vm_offset_t *offset; /* OUT */
vm_pindex_t *pindex; /* OUT */
vm_prot_t *out_prot; /* OUT */
boolean_t *wired; /* OUT */
boolean_t *single_use; /* OUT */
@ -2095,7 +2101,7 @@ RetryLookup:;
vm_object_shadow(
&entry->object.vm_object,
&entry->offset,
(vm_size_t) (entry->end - entry->start));
OFF_TO_IDX(entry->end - entry->start));
entry->needs_copy = FALSE;
@ -2120,7 +2126,7 @@ RetryLookup:;
goto RetryLookup;
}
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
(vm_size_t) (entry->end - entry->start));
OFF_TO_IDX(entry->end - entry->start));
entry->offset = 0;
lock_write_to_read(&share_map->lock);
}
@ -2129,7 +2135,7 @@ RetryLookup:;
* copy-on-write or empty, it has been fixed up.
*/
*offset = (share_offset - entry->start) + entry->offset;
*pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
*object = entry->object.vm_object;
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.h,v 1.7 1995/08/26 23:18:37 bde Exp $
* $Id: vm_map.h,v 1.8 1995/12/07 12:48:17 davidg Exp $
*/
/*
@ -103,7 +103,7 @@ struct vm_map_entry {
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
vm_ooffset_t offset; /* offset into object */
boolean_t is_a_map:1, /* Is "object" a map? */
is_sub_map:1, /* Is "object" a submap? */
/* Only in sharing maps: */
@ -217,13 +217,13 @@ vm_map_entry_t vm_map_entry_create __P((vm_map_t));
void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
int vm_map_find __P((vm_map_t, vm_object_t, vm_offset_t, vm_offset_t *, vm_size_t, boolean_t));
int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t));
int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t, boolean_t));
int vm_map_insert __P((vm_map_t, vm_object_t, vm_offset_t, vm_offset_t, vm_offset_t));
int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
vm_offset_t *, vm_prot_t *, boolean_t *, boolean_t *));
vm_pindex_t *, vm_prot_t *, boolean_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.30 1995/12/03 12:18:35 bde Exp $
* $Id: vm_mmap.c,v 1.31 1995/12/07 12:48:19 davidg Exp $
*/
/*
@ -244,7 +244,7 @@ mmap(p, uap, retval)
}
}
error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
flags, handle, (vm_offset_t) uap->pos);
flags, handle, uap->pos);
if (error == 0)
*retval = (int) addr;
return (error);
@ -605,7 +605,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
vm_prot_t prot, maxprot;
register int flags;
caddr_t handle; /* XXX should be vp */
vm_offset_t foff;
vm_ooffset_t foff;
{
boolean_t fitit;
vm_object_t object;
@ -708,7 +708,8 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
* "Pre-fault" resident pages.
*/
if ((type == OBJT_VNODE) && (map->pmap != NULL)) {
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
pmap_object_init_pt(map->pmap, *addr,
object, (vm_pindex_t) OFF_TO_IDX(foff), size);
}
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.57 1995/12/03 12:18:37 bde Exp $
* $Id: vm_object.c,v 1.58 1995/12/07 12:48:21 davidg Exp $
*/
/*
@ -162,9 +162,9 @@ _vm_object_allocate(type, size, object)
object->paging_in_progress = 0;
object->resident_page_count = 0;
object->handle = NULL;
object->paging_offset = 0;
object->paging_offset = (vm_ooffset_t) 0;
object->backing_object = NULL;
object->backing_object_offset = (vm_offset_t) 0;
object->backing_object_offset = (vm_ooffset_t) 0;
object->last_read = 0;
@ -178,7 +178,7 @@ _vm_object_allocate(type, size, object)
* Initialize the VM objects module.
*/
void
vm_object_init(vm_offset_t nothing)
vm_object_init()
{
TAILQ_INIT(&vm_object_cached_list);
TAILQ_INIT(&vm_object_list);
@ -189,11 +189,11 @@ vm_object_init(vm_offset_t nothing)
vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
kernel_object = &kernel_object_store;
_vm_object_allocate(OBJT_DEFAULT, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
kmem_object = &kmem_object_store;
_vm_object_allocate(OBJT_DEFAULT, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
}
@ -434,8 +434,8 @@ vm_object_terminate(object)
void
vm_object_page_clean(object, start, end, syncio, lockflag)
vm_object_t object;
vm_offset_t start;
vm_offset_t end;
vm_pindex_t start;
vm_pindex_t end;
boolean_t syncio;
boolean_t lockflag;
{
@ -456,23 +456,18 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
VOP_LOCK(vp);
object->flags |= OBJ_CLEANING;
if (start != end) {
start = trunc_page(start);
end = round_page(end);
}
tstart = start;
if (end == 0) {
tend = object->size;
} else {
tend = end;
}
if (tstart == 0 && tend == object->size) {
if ((tstart == 0) && (tend == object->size)) {
object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
}
runlen = 0;
for(;tstart < tend; tstart += PAGE_SIZE) {
for(;tstart < tend; tstart += 1) {
relookup:
p = vm_page_lookup(object, tstart);
if (!p) {
@ -482,7 +477,7 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
}
continue;
}
if (p->valid == 0 || (p->flags & PG_CACHE)) {
if ((p->valid == 0) || (p->flags & PG_CACHE)) {
if (runlen > 0) {
vm_pageout_flush(ma, runlen, syncio);
runlen = 0;
@ -585,8 +580,8 @@ vm_object_cache_trim()
void
vm_object_pmap_copy(object, start, end)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
register vm_pindex_t start;
register vm_pindex_t end;
{
register vm_page_t p;
@ -611,11 +606,10 @@ vm_object_pmap_copy(object, start, end)
void
vm_object_pmap_remove(object, start, end)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
register vm_pindex_t start;
register vm_pindex_t end;
{
register vm_page_t p;
if (object == NULL)
return;
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
@ -635,13 +629,12 @@ vm_object_pmap_remove(object, start, end)
* up by a non-default pager.
*/
void
vm_object_copy(src_object, src_offset, size,
vm_object_copy(src_object, src_offset,
dst_object, dst_offset, src_needs_copy)
register vm_object_t src_object;
vm_offset_t src_offset;
vm_size_t size;
vm_pindex_t src_offset;
vm_object_t *dst_object;/* OUT */
vm_offset_t *dst_offset;/* OUT */
vm_pindex_t *dst_offset;/* OUT */
boolean_t *src_needs_copy; /* OUT */
{
if (src_object == NULL) {
@ -692,7 +685,7 @@ vm_object_copy(src_object, src_offset, size,
void
vm_object_shadow(object, offset, length)
vm_object_t *object; /* IN/OUT */
vm_offset_t *offset; /* IN/OUT */
vm_ooffset_t *offset; /* IN/OUT */
vm_size_t length;
{
register vm_object_t source;
@ -743,7 +736,9 @@ vm_object_qcollapse(object)
register vm_object_t object;
{
register vm_object_t backing_object;
register vm_offset_t backing_offset, new_offset;
register vm_pindex_t backing_offset_index, paging_offset_index;
vm_pindex_t backing_object_paging_offset_index;
vm_pindex_t new_pindex;
register vm_page_t p, pp;
register vm_size_t size;
@ -753,7 +748,9 @@ vm_object_qcollapse(object)
backing_object->ref_count += 2;
backing_offset = object->backing_object_offset;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset);
paging_offset_index = OFF_TO_IDX(object->paging_offset);
size = object->size;
p = backing_object->memq.tqh_first;
while (p) {
@ -766,26 +763,27 @@ vm_object_qcollapse(object)
continue;
}
vm_page_protect(p, VM_PROT_NONE);
new_offset = (p->offset - backing_offset);
if (p->offset < backing_offset ||
new_offset >= size) {
new_pindex = p->pindex - backing_offset_index;
if (p->pindex < backing_offset_index ||
new_pindex >= size) {
if (backing_object->type == OBJT_SWAP)
swap_pager_freespace(backing_object,
backing_object->paging_offset + p->offset, PAGE_SIZE);
backing_object_paging_offset_index+p->pindex,
1);
vm_page_free(p);
} else {
pp = vm_page_lookup(object, new_offset);
pp = vm_page_lookup(object, new_pindex);
if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
object->paging_offset + new_offset, NULL, NULL))) {
paging_offset_index + new_pindex, NULL, NULL))) {
if (backing_object->type == OBJT_SWAP)
swap_pager_freespace(backing_object,
backing_object->paging_offset + p->offset, PAGE_SIZE);
backing_object_paging_offset_index + p->pindex, 1);
vm_page_free(p);
} else {
if (backing_object->type == OBJT_SWAP)
swap_pager_freespace(backing_object,
backing_object->paging_offset + p->offset, PAGE_SIZE);
vm_page_rename(p, object, new_offset);
backing_object_paging_offset_index + p->pindex, 1);
vm_page_rename(p, object, new_pindex);
p->dirty = VM_PAGE_BITS_ALL;
}
}
@ -807,9 +805,9 @@ vm_object_collapse(object)
{
vm_object_t backing_object;
vm_offset_t backing_offset;
vm_ooffset_t backing_offset;
vm_size_t size;
vm_offset_t new_offset;
vm_pindex_t new_pindex, backing_offset_index;
vm_page_t p, pp;
while (TRUE) {
@ -856,6 +854,7 @@ vm_object_collapse(object)
*/
backing_offset = object->backing_object_offset;
backing_offset_index = OFF_TO_IDX(backing_offset);
size = object->size;
/*
@ -877,7 +876,7 @@ vm_object_collapse(object)
while ((p = backing_object->memq.tqh_first) != 0) {
new_offset = (p->offset - backing_offset);
new_pindex = p->pindex - backing_offset_index;
/*
* If the parent has a page here, or if this
@ -887,20 +886,20 @@ vm_object_collapse(object)
* Otherwise, move it as planned.
*/
if (p->offset < backing_offset ||
new_offset >= size) {
if (p->pindex < backing_offset_index ||
new_pindex >= size) {
vm_page_protect(p, VM_PROT_NONE);
PAGE_WAKEUP(p);
vm_page_free(p);
} else {
pp = vm_page_lookup(object, new_offset);
pp = vm_page_lookup(object, new_pindex);
if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
object->paging_offset + new_offset, NULL, NULL))) {
OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) {
vm_page_protect(p, VM_PROT_NONE);
PAGE_WAKEUP(p);
vm_page_free(p);
} else {
vm_page_rename(p, object, new_offset);
vm_page_rename(p, object, new_pindex);
}
}
}
@ -919,9 +918,11 @@ vm_object_collapse(object)
* shadow object.
*/
swap_pager_copy(
backing_object, backing_object->paging_offset,
object, object->paging_offset,
object->backing_object_offset);
backing_object,
OFF_TO_IDX(backing_object->paging_offset),
object,
OFF_TO_IDX(object->paging_offset),
OFF_TO_IDX(object->backing_object_offset));
vm_object_pip_wakeup(object);
} else {
object->paging_in_progress++;
@ -951,7 +952,8 @@ vm_object_collapse(object)
/*
* free unnecessary blocks
*/
swap_pager_freespace(object, 0, object->paging_offset);
swap_pager_freespace(object, 0,
OFF_TO_IDX(object->paging_offset));
vm_object_pip_wakeup(object);
}
@ -1009,7 +1011,7 @@ vm_object_collapse(object)
*/
for (p = backing_object->memq.tqh_first; p; p = p->listq.tqe_next) {
new_offset = (p->offset - backing_offset);
new_pindex = p->pindex - backing_offset_index;
/*
* If the parent has a page here, or if this
@ -1019,13 +1021,13 @@ vm_object_collapse(object)
* the chain.
*/
if (p->offset >= backing_offset && new_offset <= size) {
if (p->pindex >= backing_offset_index &&
new_pindex <= size) {
pp = vm_page_lookup(object, new_offset);
pp = vm_page_lookup(object, new_pindex);
if ((pp == NULL || pp->valid == 0) &&
!vm_pager_has_page(object, object->paging_offset + new_offset, NULL, NULL)) {
!vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
/*
* Page still needed. Can't go any
* further.
@ -1079,26 +1081,24 @@ vm_object_collapse(object)
void
vm_object_page_remove(object, start, end, clean_only)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
register vm_pindex_t start;
register vm_pindex_t end;
boolean_t clean_only;
{
register vm_page_t p, next;
vm_offset_t size;
unsigned int size;
int s;
if (object == NULL)
return;
object->paging_in_progress++;
start = trunc_page(start);
end = round_page(end);
again:
size = end - start;
if (size > 4 * PAGE_SIZE || size >= object->size / 4) {
if (size > 4 || size >= object->size / 4) {
for (p = object->memq.tqh_first; p != NULL; p = next) {
next = p->listq.tqe_next;
if ((start <= p->offset) && (p->offset < end)) {
if ((start <= p->pindex) && (p->pindex < end)) {
s = splhigh();
if (p->bmapped) {
splx(s);
@ -1145,8 +1145,8 @@ vm_object_page_remove(object, start, end, clean_only)
PAGE_WAKEUP(p);
vm_page_free(p);
}
start += PAGE_SIZE;
size -= PAGE_SIZE;
start += 1;
size -= 1;
}
}
vm_object_pip_wakeup(object);
@ -1175,19 +1175,13 @@ vm_object_page_remove(object, start, end, clean_only)
* The object must *not* be locked.
*/
boolean_t
vm_object_coalesce(prev_object, next_object,
prev_offset, next_offset,
prev_size, next_size)
vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
register vm_object_t prev_object;
vm_object_t next_object;
vm_offset_t prev_offset, next_offset;
vm_pindex_t prev_pindex;
vm_size_t prev_size, next_size;
{
vm_size_t newsize;
if (next_object != NULL) {
return (FALSE);
}
if (prev_object == NULL) {
return (TRUE);
}
@ -1208,45 +1202,28 @@ vm_object_coalesce(prev_object, next_object,
prev_object->backing_object != NULL) {
return (FALSE);
}
prev_size >>= PAGE_SHIFT;
next_size >>= PAGE_SHIFT;
/*
* Remove any pages that may still be in the object from a previous
* deallocation.
*/
vm_object_page_remove(prev_object,
prev_offset + prev_size,
prev_offset + prev_size + next_size, FALSE);
prev_pindex + prev_size,
prev_pindex + prev_size + next_size, FALSE);
/*
* Extend the object if necessary.
*/
newsize = prev_offset + prev_size + next_size;
newsize = prev_pindex + prev_size + next_size;
if (newsize > prev_object->size)
prev_object->size = newsize;
return (TRUE);
}
/*
* returns page after looking up in shadow chain
*/
static vm_page_t
vm_object_page_lookup(object, offset)
vm_object_t object;
vm_offset_t offset;
{
vm_page_t m;
if (!(m = vm_page_lookup(object, offset))) {
if (!object->backing_object)
return 0;
else
return vm_object_page_lookup(object->backing_object, offset + object->backing_object_offset);
}
return m;
}
#ifdef DDB
static int
@ -1329,8 +1306,6 @@ vm_object_in_map( object)
void
vm_object_check() {
int i;
int maxhash = 0;
vm_object_t object;
/*
@ -1400,7 +1375,7 @@ vm_object_print(iobject, full, dummy3, dummy4)
count++;
printf("(off=0x%lx,page=0x%lx)",
(u_long) p->offset, (u_long) VM_PAGE_TO_PHYS(p));
(u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
}
if (count != 0)
printf("\n");

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.23 1995/11/05 20:46:01 dyson Exp $
* $Id: vm_object.h,v 1.24 1995/12/07 12:48:22 davidg Exp $
*/
/*
@ -96,15 +96,15 @@ struct vm_object {
u_short flags; /* see below */
u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
int resident_page_count; /* number of resident pages */
vm_offset_t paging_offset; /* Offset into paging space */
vm_ooffset_t paging_offset; /* Offset into paging space */
struct vm_object *backing_object; /* object that I'm a shadow of */
vm_offset_t backing_object_offset;/* Offset in backing object */
vm_ooffset_t backing_object_offset;/* Offset in backing object */
vm_offset_t last_read; /* last read in object -- detect seq behavior */
TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
void *handle;
union {
struct {
vm_size_t vnp_size; /* Current size of file */
off_t vnp_size; /* Current size of file */
} vnp;
struct {
TAILQ_HEAD(, vm_page) devp_pglist; /* list of pages allocated */
@ -129,6 +129,8 @@ struct vm_object {
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
#define OBJ_CLEANING 0x0200
#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
#ifdef KERNEL
extern int vm_object_cache_max;
@ -162,18 +164,18 @@ vm_object_pip_wakeup(vm_object_t object)
vm_object_t vm_object_allocate __P((objtype_t, vm_size_t));
void vm_object_cache_clear __P((void));
void vm_object_cache_trim __P((void));
boolean_t vm_object_coalesce __P((vm_object_t, vm_object_t, vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t));
boolean_t vm_object_coalesce __P((vm_object_t, vm_pindex_t, vm_size_t, vm_size_t));
void vm_object_collapse __P((vm_object_t));
void vm_object_copy __P((vm_object_t, vm_offset_t, vm_size_t, vm_object_t *, vm_offset_t *, boolean_t *));
void vm_object_copy __P((vm_object_t, vm_pindex_t, vm_object_t *, vm_pindex_t *, boolean_t *));
void vm_object_deactivate_pages __P((vm_object_t));
void vm_object_deallocate __P((vm_object_t));
void vm_object_init __P((vm_size_t));
void vm_object_page_clean __P((vm_object_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
void vm_object_page_remove __P((vm_object_t, vm_offset_t, vm_offset_t, boolean_t));
void vm_object_pmap_copy __P((vm_object_t, vm_offset_t, vm_offset_t));
void vm_object_pmap_remove __P((vm_object_t, vm_offset_t, vm_offset_t));
void vm_object_init __P((void));
void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t, boolean_t));
void vm_object_page_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
void vm_object_pmap_copy __P((vm_object_t, vm_pindex_t, vm_pindex_t));
void vm_object_pmap_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t));
void vm_object_reference __P((vm_object_t));
void vm_object_shadow __P((vm_object_t *, vm_offset_t *, vm_size_t));
void vm_object_shadow __P((vm_object_t *, vm_ooffset_t *, vm_size_t));
void vm_object_terminate __P((vm_object_t));
#endif /* KERNEL */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.39 1995/12/03 12:18:39 bde Exp $
* $Id: vm_page.c,v 1.40 1995/12/07 12:48:23 davidg Exp $
*/
/*
@ -115,6 +115,7 @@ vm_offset_t first_phys_addr;
vm_offset_t last_phys_addr;
vm_size_t page_mask;
int page_shift;
int vm_page_zero_count;
/*
* map of contiguous valid DEV_BSIZE chunks in a page
@ -354,9 +355,11 @@ vm_page_startup(starta, enda, vaddr)
* NOTE: This macro depends on vm_page_bucket_count being a power of 2.
*/
static inline __pure int
vm_page_hash(vm_object_t object, vm_offset_t offset)
vm_page_hash(object, pindex)
vm_object_t object;
vm_pindex_t pindex;
{
return ((unsigned) object + (offset >> PAGE_SHIFT)) & vm_page_hash_mask;
return ((unsigned) object + pindex) & vm_page_hash_mask;
}
/*
@ -369,10 +372,10 @@ vm_page_hash(vm_object_t object, vm_offset_t offset)
*/
inline void
vm_page_insert(mem, object, offset)
vm_page_insert(mem, object, pindex)
register vm_page_t mem;
register vm_object_t object;
register vm_offset_t offset;
register vm_pindex_t pindex;
{
register struct pglist *bucket;
@ -384,13 +387,13 @@ vm_page_insert(mem, object, offset)
*/
mem->object = object;
mem->offset = offset;
mem->pindex = pindex;
/*
* Insert it into the object_object/offset hash table
*/
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
TAILQ_INSERT_TAIL(bucket, mem, hashq);
/*
@ -430,7 +433,7 @@ vm_page_remove(mem)
* Remove from the object_object/offset hash table
*/
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->pindex)];
TAILQ_REMOVE(bucket, mem, hashq);
/*
@ -458,9 +461,9 @@ vm_page_remove(mem)
*/
vm_page_t
vm_page_lookup(object, offset)
vm_page_lookup(object, pindex)
register vm_object_t object;
register vm_offset_t offset;
register vm_pindex_t pindex;
{
register vm_page_t mem;
register struct pglist *bucket;
@ -470,11 +473,11 @@ vm_page_lookup(object, offset)
* Search the hash table for this object/offset pair
*/
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
s = splhigh();
for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
if ((mem->object == object) && (mem->offset == offset)) {
if ((mem->object == object) && (mem->pindex == pindex)) {
splx(s);
return (mem);
}
@ -493,19 +496,16 @@ vm_page_lookup(object, offset)
* The object must be locked.
*/
void
vm_page_rename(mem, new_object, new_offset)
vm_page_rename(mem, new_object, new_pindex)
register vm_page_t mem;
register vm_object_t new_object;
vm_offset_t new_offset;
vm_pindex_t new_pindex;
{
int s;
if (mem->object == new_object)
return;
s = splhigh();
vm_page_remove(mem);
vm_page_insert(mem, new_object, new_offset);
vm_page_insert(mem, new_object, new_pindex);
splx(s);
}
@ -556,24 +556,19 @@ vm_page_unqueue(vm_page_t mem)
* Object must be locked.
*/
vm_page_t
vm_page_alloc(object, offset, page_req)
vm_page_alloc(object, pindex, page_req)
vm_object_t object;
vm_offset_t offset;
vm_pindex_t pindex;
int page_req;
{
register vm_page_t mem;
int s;
#ifdef DIAGNOSTIC
if (offset != trunc_page(offset))
panic("vm_page_alloc: offset not page aligned");
#if 0
mem = vm_page_lookup(object, offset);
/* #ifdef DIAGNOSTIC */
mem = vm_page_lookup(object, pindex);
if (mem)
panic("vm_page_alloc: page already allocated");
#endif
#endif
/* #endif */
if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
page_req = VM_ALLOC_SYSTEM;
@ -587,6 +582,7 @@ vm_page_alloc(object, offset, page_req)
if (page_req & VM_ALLOC_ZERO) {
mem = vm_page_queue_zero.tqh_first;
if (mem) {
--vm_page_zero_count;
TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
mem->flags = PG_BUSY|PG_ZERO;
} else {
@ -600,6 +596,7 @@ vm_page_alloc(object, offset, page_req)
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
mem->flags = PG_BUSY;
} else {
--vm_page_zero_count;
mem = vm_page_queue_zero.tqh_first;
TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
mem->flags = PG_BUSY|PG_ZERO;
@ -628,6 +625,7 @@ vm_page_alloc(object, offset, page_req)
if (page_req & VM_ALLOC_ZERO) {
mem = vm_page_queue_zero.tqh_first;
if (mem) {
--vm_page_zero_count;
TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
mem->flags = PG_BUSY|PG_ZERO;
} else {
@ -641,6 +639,7 @@ vm_page_alloc(object, offset, page_req)
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
mem->flags = PG_BUSY;
} else {
--vm_page_zero_count;
mem = vm_page_queue_zero.tqh_first;
TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
mem->flags = PG_BUSY|PG_ZERO;
@ -669,6 +668,7 @@ vm_page_alloc(object, offset, page_req)
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
mem->flags = PG_BUSY;
} else {
--vm_page_zero_count;
mem = vm_page_queue_zero.tqh_first;
TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
mem->flags = PG_BUSY|PG_ZERO;
@ -694,7 +694,7 @@ vm_page_alloc(object, offset, page_req)
mem->bmapped = 0;
/* XXX before splx until vm_page_insert is safe */
vm_page_insert(mem, object, offset);
vm_page_insert(mem, object, pindex);
splx(s);
@ -777,7 +777,8 @@ vm_page_alloc_contig(size, low, high, alignment)
m->act_count = 0;
m->bmapped = 0;
m->busy = 0;
vm_page_insert(m, kernel_object, tmp_addr - VM_MIN_KERNEL_ADDRESS);
vm_page_insert(m, kernel_object,
OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
vm_page_wire(m);
pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
tmp_addr += PAGE_SIZE;
@ -810,8 +811,8 @@ vm_page_free(mem)
if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
if (flags & PG_FREE)
panic("vm_page_free: freeing free page");
printf("vm_page_free: offset(%ld), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
printf("vm_page_free: pindex(%ld), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
mem->pindex, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
panic("vm_page_free: freeing busy page");
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.21 1995/10/23 04:29:39 dyson Exp $
* $Id: vm_page.h,v 1.22 1995/11/20 12:19:32 phk Exp $
*/
/*
@ -105,7 +105,7 @@ struct vm_page {
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
vm_object_t object; /* which object am I in (O,P) */
vm_offset_t offset; /* offset into object (O,P) */
vm_pindex_t pindex; /* offset into object (O,P) */
vm_offset_t phys_addr; /* physical address of page */
u_short wire_count; /* wired down maps refs (P) */
@ -180,6 +180,8 @@ extern struct pglist vm_page_queue_active; /* active memory queue */
extern struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern struct pglist vm_page_queue_cache; /* cache memory queue */
extern int vm_page_zero_count;
extern vm_page_t vm_page_array; /* First resident page in table */
extern long first_page; /* first physical page number */
@ -230,15 +232,15 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
#define VM_ALLOC_ZERO 0x80
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t, int));
vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
void vm_page_cache __P((register vm_page_t));
void vm_page_copy __P((vm_page_t, vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_free __P((vm_page_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.60 1995/11/20 12:19:26 phk Exp $
* $Id: vm_pageout.c,v 1.61 1995/12/07 12:48:24 davidg Exp $
*/
/*
@ -140,7 +140,7 @@ extern int vfs_update_wakeup;
#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
#define VM_PAGEOUT_PAGE_COUNT 8
#define VM_PAGEOUT_PAGE_COUNT 16
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
int vm_page_max_wired; /* XXX max # of wired pages system-wide */
@ -173,7 +173,7 @@ vm_pageout_clean(m, sync)
vm_page_t mc[2*VM_PAGEOUT_PAGE_COUNT];
int pageout_count;
int i, forward_okay, backward_okay, page_base;
vm_offset_t offset = m->offset;
vm_pindex_t pindex = m->pindex;
object = m->object;
@ -203,7 +203,7 @@ vm_pageout_clean(m, sync)
pageout_count = 1;
page_base = VM_PAGEOUT_PAGE_COUNT;
forward_okay = TRUE;
if (offset != 0)
if (pindex != 0)
backward_okay = TRUE;
else
backward_okay = FALSE;
@ -228,11 +228,11 @@ vm_pageout_clean(m, sync)
/*
* Stop forward scan at end of object.
*/
if ((offset + i * PAGE_SIZE) > object->size) {
if ((pindex + i) > object->size) {
forward_okay = FALSE;
goto do_backward;
}
p = vm_page_lookup(object, offset + i * PAGE_SIZE);
p = vm_page_lookup(object, pindex + i);
if (p) {
if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
forward_okay = FALSE;
@ -263,10 +263,10 @@ vm_pageout_clean(m, sync)
/*
* Stop backward scan at beginning of object.
*/
if ((offset - i * PAGE_SIZE) == 0) {
if ((pindex - i) == 0) {
backward_okay = FALSE;
}
p = vm_page_lookup(object, offset - i * PAGE_SIZE);
p = vm_page_lookup(object, pindex - i);
if (p) {
if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
backward_okay = FALSE;
@ -672,6 +672,7 @@ vm_pageout_scan()
* scanning again
*/
if ((next->flags & PG_INACTIVE) == 0) {
vm_pager_sync();
goto rescan1;
}
}
@ -710,11 +711,13 @@ vm_pageout_scan()
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m = next;
/* printf("busy: s: %d, f: 0x%x, h: %d\n",
m->busy, m->flags, m->hold_count); */
continue;
}
if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) ||
if (m->object->ref_count &&
((m->flags & (PG_REFERENCED|PG_WANTED)) ||
pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) {
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
m->flags &= ~PG_REFERENCED;
if (m->act_count < ACT_MAX) {

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.18 1995/11/20 12:19:19 phk Exp $
* $Id: vm_pager.c,v 1.19 1995/12/07 12:48:26 davidg Exp $
*/
/*
@ -162,7 +162,7 @@ vm_pager_allocate(type, handle, size, prot, off)
void *handle;
vm_size_t size;
vm_prot_t prot;
vm_offset_t off;
vm_ooffset_t off;
{
struct pagerops *ops;
@ -204,7 +204,7 @@ vm_pager_put_pages(object, m, count, sync, rtvals)
boolean_t
vm_pager_has_page(object, offset, before, after)
vm_object_t object;
vm_offset_t offset;
vm_pindex_t offset;
int *before;
int *after;
{

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
* $Id: vm_pager.h,v 1.9 1995/07/29 11:44:30 bde Exp $
* $Id: vm_pager.h,v 1.10 1995/11/20 12:19:16 phk Exp $
*/
/*
@ -50,11 +50,11 @@ TAILQ_HEAD(pagerlst, vm_object);
struct pagerops {
void (*pgo_init) __P((void)); /* Initialize pager. */
vm_object_t (*pgo_alloc) __P((void *, vm_size_t, vm_prot_t, vm_offset_t)); /* Allocate pager. */
vm_object_t (*pgo_alloc) __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t)); /* Allocate pager. */
void (*pgo_dealloc) __P((vm_object_t)); /* Disassociate. */
int (*pgo_getpages) __P((vm_object_t, vm_page_t *, int, int)); /* Get (read) page. */
int (*pgo_putpages) __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); /* Put (write) page. */
boolean_t (*pgo_haspage) __P((vm_object_t, vm_offset_t, int *, int *)); /* Does pager have page? */
boolean_t (*pgo_haspage) __P((vm_object_t, vm_pindex_t, int *, int *)); /* Does pager have page? */
void (*pgo_sync) __P((void));
};
@ -78,11 +78,11 @@ struct pagerops {
extern vm_map_t pager_map;
extern int pager_map_size;
vm_object_t vm_pager_allocate __P((objtype_t, void *, vm_size_t, vm_prot_t, vm_offset_t));
vm_object_t vm_pager_allocate __P((objtype_t, void *, vm_size_t, vm_prot_t, vm_ooffset_t));
void vm_pager_bufferinit __P((void));
void vm_pager_deallocate __P((vm_object_t));
int vm_pager_get_pages __P((vm_object_t, vm_page_t *, int, int));
boolean_t vm_pager_has_page __P((vm_object_t, vm_offset_t, int *, int *));
boolean_t vm_pager_has_page __P((vm_object_t, vm_pindex_t, int *, int *));
void vm_pager_init __P((void));
vm_object_t vm_pager_object_lookup __P((struct pagerlst *, void *));
vm_offset_t vm_pager_map_pages __P((vm_page_t *, int, boolean_t));

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.53 1995/11/20 12:19:11 phk Exp $
* $Id: vnode_pager.c,v 1.54 1995/12/07 12:48:31 davidg Exp $
*/
/*
@ -71,7 +71,7 @@
#include <vm/vnode_pager.h>
#include <vm/vm_extern.h>
extern vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_offset_t address,
extern vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address,
int *run));
extern void vnode_pager_iodone __P((struct buf *bp));
extern int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m));
@ -102,7 +102,7 @@ vnode_pager_alloc(handle, size, prot, offset)
void *handle;
vm_size_t size;
vm_prot_t prot;
vm_offset_t offset;
vm_ooffset_t offset;
{
vm_object_t object;
struct vnode *vp;
@ -137,14 +137,14 @@ vnode_pager_alloc(handle, size, prot, offset)
/*
* And an object of the appropriate size
*/
object = vm_object_allocate(OBJT_VNODE, round_page(size));
object = vm_object_allocate(OBJT_VNODE, size);
object->flags = OBJ_CANPERSIST;
/*
* Hold a reference to the vnode and initialize object data.
*/
VREF(vp);
object->un_pager.vnp.vnp_size = size;
object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE;
object->handle = handle;
vp->v_object = object;
@ -194,9 +194,9 @@ vnode_pager_dealloc(object)
}
boolean_t
vnode_pager_haspage(object, offset, before, after)
vnode_pager_haspage(object, pindex, before, after)
vm_object_t object;
vm_offset_t offset;
vm_pindex_t pindex;
int *before;
int *after;
{
@ -212,19 +212,20 @@ vnode_pager_haspage(object, offset, before, after)
* If filesystem no longer mounted or offset beyond end of file we do
* not have the page.
*/
if ((vp->v_mount == NULL) || (offset >= object->un_pager.vnp.vnp_size))
if ((vp->v_mount == NULL) ||
(IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size))
return FALSE;
bsize = vp->v_mount->mnt_stat.f_iosize;
pagesperblock = bsize / PAGE_SIZE;
reqblock = offset / bsize;
reqblock = pindex / pagesperblock;
err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
after, before);
if (err)
return TRUE;
if ( bn == -1)
return FALSE;
poff = (offset - (reqblock * bsize)) / PAGE_SIZE;
poff = pindex - (reqblock * pagesperblock);
if (before) {
*before *= pagesperblock;
*before += poff;
@ -233,8 +234,8 @@ vnode_pager_haspage(object, offset, before, after)
int numafter;
*after *= pagesperblock;
numafter = pagesperblock - (poff + 1);
if (offset + numafter * PAGE_SIZE > object->un_pager.vnp.vnp_size) {
numafter = (object->un_pager.vnp.vnp_size - offset)/PAGE_SIZE;
if (IDX_TO_OFF(pindex + numafter) > object->un_pager.vnp.vnp_size) {
numafter = OFF_TO_IDX((object->un_pager.vnp.vnp_size - IDX_TO_OFF(pindex)));
}
*after += numafter;
}
@ -252,7 +253,7 @@ vnode_pager_haspage(object, offset, before, after)
void
vnode_pager_setsize(vp, nsize)
struct vnode *vp;
u_long nsize;
vm_ooffset_t nsize;
{
vm_object_t object = vp->v_object;
@ -269,9 +270,13 @@ vnode_pager_setsize(vp, nsize)
* File has shrunk. Toss any cached pages beyond the new EOF.
*/
if (nsize < object->un_pager.vnp.vnp_size) {
if (round_page((vm_offset_t) nsize) < object->un_pager.vnp.vnp_size) {
vm_ooffset_t nsizerounded;
nsizerounded = IDX_TO_OFF(OFF_TO_IDX(nsize + PAGE_SIZE - 1));
if (nsizerounded < object->un_pager.vnp.vnp_size) {
vm_object_page_remove(object,
round_page((vm_offset_t) nsize), object->un_pager.vnp.vnp_size, FALSE);
OFF_TO_IDX(nsize + PAGE_SIZE - 1),
OFF_TO_IDX(object->un_pager.vnp.vnp_size),
FALSE);
}
/*
* this gets rid of garbage at the end of a page that is now
@ -281,17 +286,17 @@ vnode_pager_setsize(vp, nsize)
vm_offset_t kva;
vm_page_t m;
m = vm_page_lookup(object, trunc_page((vm_offset_t) nsize));
m = vm_page_lookup(object, OFF_TO_IDX(nsize));
if (m) {
kva = vm_pager_map_page(m);
bzero((caddr_t) kva + (nsize & PAGE_MASK),
round_page(nsize) - nsize);
(int) (round_page(nsize) - nsize));
vm_pager_unmap_page(kva);
}
}
}
object->un_pager.vnp.vnp_size = (vm_offset_t) nsize;
object->size = round_page(nsize);
object->un_pager.vnp.vnp_size = nsize;
object->size = OFF_TO_IDX(nsize + PAGE_SIZE - 1);
}
void
@ -368,15 +373,16 @@ vnode_pager_freepage(m)
vm_offset_t
vnode_pager_addr(vp, address, run)
struct vnode *vp;
vm_offset_t address;
vm_ooffset_t address;
int *run;
{
int rtaddress;
int bsize;
vm_offset_t block;
daddr_t block;
struct vnode *rtvp;
int err;
int vblock, voffset;
daddr_t vblock;
int voffset;
if ((int) address < 0)
return -1;
@ -445,10 +451,11 @@ vnode_pager_input_smlfs(object, m)
for (i = 0; i < PAGE_SIZE / bsize; i++) {
if ((vm_page_bits(m->offset + i * bsize, bsize) & m->valid))
if ((vm_page_bits(IDX_TO_OFF(m->pindex) + i * bsize, bsize) & m->valid))
continue;
fileaddr = vnode_pager_addr(vp, m->offset + i * bsize, (int *)0);
fileaddr = vnode_pager_addr(vp,
IDX_TO_OFF(m->pindex) + i * bsize, (int *)0);
if (fileaddr != -1) {
bp = getpbuf();
@ -523,12 +530,12 @@ vnode_pager_input_old(object, m)
/*
* Return failure if beyond current EOF
*/
if (m->offset >= object->un_pager.vnp.vnp_size) {
if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
return VM_PAGER_BAD;
} else {
size = PAGE_SIZE;
if (m->offset + size > object->un_pager.vnp.vnp_size)
size = object->un_pager.vnp.vnp_size - m->offset;
if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
/*
* Allocate a kernel virtual address and initialize so that
@ -540,7 +547,7 @@ vnode_pager_input_old(object, m)
aiov.iov_len = size;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = m->offset;
auio.uio_offset = IDX_TO_OFF(m->pindex);
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_READ;
auio.uio_resid = size;
@ -591,7 +598,8 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
int count;
int reqpage;
{
vm_offset_t kva, foff;
vm_offset_t kva;
off_t foff;
int i, size, bsize, first, firstaddr;
struct vnode *dp, *vp;
int runpg;
@ -612,7 +620,7 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
* originally, we did not check for an error return value -- assuming
* an fs always has a bmap entry point -- that assumption is wrong!!!
*/
foff = m[reqpage]->offset;
foff = IDX_TO_OFF(m[reqpage]->pindex);
/*
* if we can't bmap, use old VOP code
@ -666,7 +674,8 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
* calculate the run that includes the required page
*/
for(first = 0, i = 0; i < count; i = runend) {
firstaddr = vnode_pager_addr(vp, m[i]->offset, &runpg);
firstaddr = vnode_pager_addr(vp,
IDX_TO_OFF(m[i]->pindex), &runpg);
if (firstaddr == -1) {
if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d",
@ -709,7 +718,7 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
/*
* calculate the file virtual address for the transfer
*/
foff = m[0]->offset;
foff = IDX_TO_OFF(m[0]->pindex);
/*
* calculate the size of the transfer
@ -840,6 +849,7 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
struct vnode *vp;
int maxsize, ncount;
vm_ooffset_t poffset;
struct uio auio;
struct iovec aiov;
int error;
@ -848,8 +858,8 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
for (i = 0; i < count; i++)
rtvals[i] = VM_PAGER_AGAIN;
if ((int) m[0]->offset < 0) {
printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->offset, m[0]->dirty);
if ((int) m[0]->pindex < 0) {
printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->pindex, m[0]->dirty);
rtvals[0] = VM_PAGER_BAD;
return VM_PAGER_BAD;
}
@ -857,9 +867,10 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
maxsize = count * PAGE_SIZE;
ncount = count;
if (maxsize + m[0]->offset > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > m[0]->offset)
maxsize = object->un_pager.vnp.vnp_size - m[0]->offset;
poffset = IDX_TO_OFF(m[0]->pindex);
if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > poffset)
maxsize = object->un_pager.vnp.vnp_size - poffset;
else
maxsize = 0;
ncount = (maxsize + PAGE_SIZE - 1) / PAGE_SIZE;
@ -867,12 +878,14 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
for (i = ncount; i < count; i++) {
rtvals[i] = VM_PAGER_BAD;
}
#ifdef BOGUS
if (ncount == 0) {
printf("vnode_pager_putpages: write past end of file: %ld, %ld\n",
m[0]->offset,
object->un_pager.vnp.vnp_size);
printf("vnode_pager_putpages: write past end of file: %d, %lu\n",
poffset,
(unsigned long) object->un_pager.vnp.vnp_size);
return rtvals[0];
}
#endif
}
}
@ -885,12 +898,12 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
aiov.iov_len = maxsize;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = m[0]->offset;
auio.uio_offset = poffset;
auio.uio_segflg = UIO_NOCOPY;
auio.uio_rw = UIO_WRITE;
auio.uio_resid = maxsize;
auio.uio_procp = (struct proc *) 0;
error = VOP_WRITE(vp, &auio, IO_VMIO, curproc->p_ucred);
error = VOP_WRITE(vp, &auio, IO_VMIO|(sync?IO_SYNC:0), curproc->p_ucred);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += ncount;
@ -898,8 +911,8 @@ vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
printf("vnode_pager_putpages: I/O error %d\n", error);
}
if (auio.uio_resid) {
printf("vnode_pager_putpages: residual I/O %d at %ld\n",
auio.uio_resid, m[0]->offset);
printf("vnode_pager_putpages: residual I/O %d at %d\n",
auio.uio_resid, m[0]->pindex);
}
for (i = 0; i < count; i++) {
m[i]->busy--;

View File

@ -36,18 +36,18 @@
* SUCH DAMAGE.
*
* @(#)vnode_pager.h 8.1 (Berkeley) 6/11/93
* $Id: vnode_pager.h,v 1.5 1995/07/13 08:48:48 davidg Exp $
* $Id: vnode_pager.h,v 1.6 1995/09/06 05:37:43 dyson Exp $
*/
#ifndef _VNODE_PAGER_
#define _VNODE_PAGER_ 1
#ifdef KERNEL
vm_object_t vnode_pager_alloc __P((void *, vm_size_t, vm_prot_t, vm_offset_t));
vm_object_t vnode_pager_alloc __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t));
void vnode_pager_dealloc __P((vm_object_t));
int vnode_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
int vnode_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *));
boolean_t vnode_pager_haspage __P((vm_object_t, vm_offset_t, int *, int *));
boolean_t vnode_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
struct vnode *vnode_pager_lock __P((vm_object_t));
void vnode_pager_freepage __P((vm_page_t m));
#endif