Some VM improvements, including elimination of alot of Sig-11
problems. Tor Egge and others have helped with various VM bugs lately, but don't blame him -- blame me!!! pmap.c: 1) Create an object for kernel page table allocations. This fixes a bogus allocation method previously used for such, by grabbing pages from the kernel object, using bogus pindexes. (This was a code cleanup, and perhaps a minor system stability issue.) pmap.c: 2) Pre-set the modify and accessed bits when prudent. This will decrease bus traffic under certain circumstances. vfs_bio.c, vfs_cluster.c: 3) Rather than calculating the beginning virtual byte offset multiple times, stick the offset into the buffer header, so that the calculated offset can be reused. (Long long multiplies are often expensive, and this is a probably unmeasurable performance improvement, and code cleanup.) vfs_bio.c: 4) Handle write recursion more intelligently (but not perfectly) so that it is less likely to cause a system panic, and is also much more robust. vfs_bio.c: 5) getblk incorrectly wrote out blocks that are incorrectly sized. The problem is fixed, and writes blocks out ONLY when B_DELWRI is true. vfs_bio.c: 6) Check that already constituted buffers have fully valid pages. If not, then make sure that the B_CACHE bit is not set. (This was a major source of Sig-11 type problems.) vfs_bio.c: 7) Fix a potential system deadlock due to an incorrectly specified sleep priority while waiting for a buffer write operation. The change that I made opens the system up to serious problems, and we need to examine the issue of process sleep priorities. vfs_cluster.c, vfs_bio.c: 8) Make clustered reads work more correctly (and more completely) when buffers are already constituted, but not fully valid. (This was another system reliability issue.) vfs_subr.c, ffs_inode.c: 9) Create a vtruncbuf function, which is used by filesystems that can truncate files. The vinvalbuf forced a file sync type operation, while vtruncbuf only invalidates the buffers past the new end of file, and also invalidates the appropriate pages. (This was a system reliabiliy and performance issue.) 10) Modify FFS to use vtruncbuf. vm_object.c: 11) Make the object rundown mechanism for OBJT_VNODE type objects work more correctly. Included in that fix, create pager entries for the OBJT_DEAD pager type, so that paging requests that might slip in during race conditions are properly handled. (This was a system reliability issue.) vm_page.c: 12) Make some of the page validation routines be a little less picky about arguments passed to them. Also, support page invalidation change the object generation count so that we handle generation counts a little more robustly. vm_pageout.c: 13) Further reduce pageout daemon activity when the system doesn't need help from it. There should be no additional performance decrease even when the pageout daemon is running. (This was a significant performance issue.) vnode_pager.c: 14) Teach the vnode pager to handle race conditions during vnode deallocations.
This commit is contained in:
parent
7db79065bd
commit
bef608bd7e
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.188 1998/03/07 21:34:44 dyson Exp $
|
||||
* $Id: pmap.c,v 1.189 1998/03/09 22:09:13 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -157,6 +157,8 @@ static int pgeflag; /* PG_G or-in */
|
||||
static int pseflag; /* PG_PS or-in */
|
||||
static int pv_npg;
|
||||
|
||||
static vm_object_t kptobj;
|
||||
|
||||
static int nkpt;
|
||||
vm_offset_t kernel_vm_end;
|
||||
|
||||
@ -527,6 +529,10 @@ pmap_init(phys_start, phys_end)
|
||||
pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
|
||||
initial_pvs * sizeof (struct pv_entry));
|
||||
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg);
|
||||
/*
|
||||
* object for kernel page table pages
|
||||
*/
|
||||
kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
|
||||
|
||||
/*
|
||||
* Now it is safe to enable pv_table recording.
|
||||
@ -1156,7 +1162,7 @@ pmap_pinit(pmap)
|
||||
|
||||
/* install self-referential address mapping entry */
|
||||
*(unsigned *) (pmap->pm_pdir + PTDPTDI) =
|
||||
VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW;
|
||||
VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
|
||||
|
||||
pmap->pm_flags = 0;
|
||||
pmap->pm_count = 1;
|
||||
@ -1256,7 +1262,7 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
|
||||
ptepa = VM_PAGE_TO_PHYS(m);
|
||||
pmap->pm_pdir[ptepindex] =
|
||||
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A);
|
||||
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
|
||||
|
||||
/*
|
||||
* Set the page table hint
|
||||
@ -1390,13 +1396,12 @@ pmap_growkernel(vm_offset_t addr)
|
||||
struct proc *p;
|
||||
struct pmap *pmap;
|
||||
int s;
|
||||
vm_offset_t ptpkva, ptppaddr;
|
||||
vm_offset_t ptppaddr;
|
||||
vm_page_t nkpg;
|
||||
#ifdef SMP
|
||||
int i;
|
||||
#endif
|
||||
pd_entry_t newpdir;
|
||||
vm_pindex_t ptpidx;
|
||||
|
||||
s = splhigh();
|
||||
if (kernel_vm_end == 0) {
|
||||
@ -1413,23 +1418,22 @@ pmap_growkernel(vm_offset_t addr)
|
||||
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
|
||||
continue;
|
||||
}
|
||||
nkpt++;
|
||||
ptpkva = (vm_offset_t) vtopte(addr);
|
||||
ptpidx = (ptpkva >> PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* This index is bogus, but out of the way
|
||||
*/
|
||||
nkpg = vm_page_alloc(kernel_object, ptpidx, VM_ALLOC_SYSTEM);
|
||||
nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_SYSTEM);
|
||||
#if !defined(MAX_PERF)
|
||||
if (!nkpg)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
#endif
|
||||
|
||||
nkpt++;
|
||||
|
||||
vm_page_wire(nkpg);
|
||||
vm_page_remove(nkpg);
|
||||
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
|
||||
pmap_zero_page(ptppaddr);
|
||||
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW);
|
||||
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
|
||||
pdir_pde(PTD, kernel_vm_end) = newpdir;
|
||||
|
||||
#ifdef SMP
|
||||
@ -2163,7 +2167,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
* to update the pte.
|
||||
*/
|
||||
if ((origpte & ~(PG_M|PG_A)) != newpte) {
|
||||
*pte = newpte;
|
||||
*pte = newpte | PG_A;
|
||||
if (origpte)
|
||||
invltlb_1pg(va);
|
||||
}
|
||||
@ -3065,25 +3069,32 @@ pmap_ts_referenced(vm_offset_t pa)
|
||||
pv;
|
||||
pv = TAILQ_NEXT(pv, pv_list)) {
|
||||
|
||||
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
|
||||
/*
|
||||
* if the bit being tested is the modified bit, then
|
||||
* mark clean_map and ptes as never
|
||||
* modified.
|
||||
*/
|
||||
if (!pmap_track_modified(pv->pv_va))
|
||||
if (!pmap_track_modified(pv->pv_va)) {
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
|
||||
if (pte == NULL) {
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*pte & PG_A) {
|
||||
rtval++;
|
||||
*pte &= ~PG_A;
|
||||
if (rtval > 16)
|
||||
if (rtval > 4) {
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
}
|
||||
|
||||
splx(s);
|
||||
|
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.188 1998/03/07 21:34:44 dyson Exp $
|
||||
* $Id: pmap.c,v 1.189 1998/03/09 22:09:13 eivind Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -157,6 +157,8 @@ static int pgeflag; /* PG_G or-in */
|
||||
static int pseflag; /* PG_PS or-in */
|
||||
static int pv_npg;
|
||||
|
||||
static vm_object_t kptobj;
|
||||
|
||||
static int nkpt;
|
||||
vm_offset_t kernel_vm_end;
|
||||
|
||||
@ -527,6 +529,10 @@ pmap_init(phys_start, phys_end)
|
||||
pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
|
||||
initial_pvs * sizeof (struct pv_entry));
|
||||
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg);
|
||||
/*
|
||||
* object for kernel page table pages
|
||||
*/
|
||||
kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
|
||||
|
||||
/*
|
||||
* Now it is safe to enable pv_table recording.
|
||||
@ -1156,7 +1162,7 @@ pmap_pinit(pmap)
|
||||
|
||||
/* install self-referential address mapping entry */
|
||||
*(unsigned *) (pmap->pm_pdir + PTDPTDI) =
|
||||
VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW;
|
||||
VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
|
||||
|
||||
pmap->pm_flags = 0;
|
||||
pmap->pm_count = 1;
|
||||
@ -1256,7 +1262,7 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
|
||||
ptepa = VM_PAGE_TO_PHYS(m);
|
||||
pmap->pm_pdir[ptepindex] =
|
||||
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A);
|
||||
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
|
||||
|
||||
/*
|
||||
* Set the page table hint
|
||||
@ -1390,13 +1396,12 @@ pmap_growkernel(vm_offset_t addr)
|
||||
struct proc *p;
|
||||
struct pmap *pmap;
|
||||
int s;
|
||||
vm_offset_t ptpkva, ptppaddr;
|
||||
vm_offset_t ptppaddr;
|
||||
vm_page_t nkpg;
|
||||
#ifdef SMP
|
||||
int i;
|
||||
#endif
|
||||
pd_entry_t newpdir;
|
||||
vm_pindex_t ptpidx;
|
||||
|
||||
s = splhigh();
|
||||
if (kernel_vm_end == 0) {
|
||||
@ -1413,23 +1418,22 @@ pmap_growkernel(vm_offset_t addr)
|
||||
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
|
||||
continue;
|
||||
}
|
||||
nkpt++;
|
||||
ptpkva = (vm_offset_t) vtopte(addr);
|
||||
ptpidx = (ptpkva >> PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* This index is bogus, but out of the way
|
||||
*/
|
||||
nkpg = vm_page_alloc(kernel_object, ptpidx, VM_ALLOC_SYSTEM);
|
||||
nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_SYSTEM);
|
||||
#if !defined(MAX_PERF)
|
||||
if (!nkpg)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
#endif
|
||||
|
||||
nkpt++;
|
||||
|
||||
vm_page_wire(nkpg);
|
||||
vm_page_remove(nkpg);
|
||||
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
|
||||
pmap_zero_page(ptppaddr);
|
||||
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW);
|
||||
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
|
||||
pdir_pde(PTD, kernel_vm_end) = newpdir;
|
||||
|
||||
#ifdef SMP
|
||||
@ -2163,7 +2167,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
|
||||
* to update the pte.
|
||||
*/
|
||||
if ((origpte & ~(PG_M|PG_A)) != newpte) {
|
||||
*pte = newpte;
|
||||
*pte = newpte | PG_A;
|
||||
if (origpte)
|
||||
invltlb_1pg(va);
|
||||
}
|
||||
@ -3065,25 +3069,32 @@ pmap_ts_referenced(vm_offset_t pa)
|
||||
pv;
|
||||
pv = TAILQ_NEXT(pv, pv_list)) {
|
||||
|
||||
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
|
||||
/*
|
||||
* if the bit being tested is the modified bit, then
|
||||
* mark clean_map and ptes as never
|
||||
* modified.
|
||||
*/
|
||||
if (!pmap_track_modified(pv->pv_va))
|
||||
if (!pmap_track_modified(pv->pv_va)) {
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
|
||||
if (pte == NULL) {
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*pte & PG_A) {
|
||||
rtval++;
|
||||
*pte &= ~PG_A;
|
||||
if (rtval > 16)
|
||||
if (rtval > 4) {
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
|
||||
}
|
||||
|
||||
splx(s);
|
||||
|
@ -11,7 +11,7 @@
|
||||
* 2. Absolutely no warranty of function or purpose is made by the author
|
||||
* John S. Dyson.
|
||||
*
|
||||
* $Id: vfs_bio.c,v 1.154 1998/03/07 21:35:24 dyson Exp $
|
||||
* $Id: vfs_bio.c,v 1.155 1998/03/08 09:57:04 julian Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -644,26 +644,18 @@ brelse(struct buf * bp)
|
||||
vm_pindex_t poff;
|
||||
vm_object_t obj;
|
||||
struct vnode *vp;
|
||||
int blksize;
|
||||
|
||||
vp = bp->b_vp;
|
||||
|
||||
if (vp->v_type == VBLK)
|
||||
blksize = DEV_BSIZE;
|
||||
else
|
||||
blksize = vp->v_mount->mnt_stat.f_iosize;
|
||||
|
||||
resid = bp->b_bufsize;
|
||||
foff = -1LL;
|
||||
foff = bp->b_offset;
|
||||
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
m = bp->b_pages[i];
|
||||
if (m == bogus_page) {
|
||||
|
||||
obj = (vm_object_t) vp->v_object;
|
||||
|
||||
foff = (off_t) bp->b_lblkno * blksize;
|
||||
poff = OFF_TO_IDX(foff);
|
||||
poff = OFF_TO_IDX(bp->b_offset);
|
||||
|
||||
for (j = i; j < bp->b_npages; j++) {
|
||||
m = bp->b_pages[j];
|
||||
@ -684,13 +676,10 @@ brelse(struct buf * bp)
|
||||
break;
|
||||
}
|
||||
if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
|
||||
if ((blksize & PAGE_MASK) == 0) {
|
||||
vm_page_set_invalid(m, 0, resid);
|
||||
} else {
|
||||
if (foff == -1LL)
|
||||
foff = (off_t) bp->b_lblkno * blksize;
|
||||
vm_page_set_invalid(m, (vm_offset_t) foff, resid);
|
||||
}
|
||||
int poffset = foff & PAGE_MASK;
|
||||
int presid = resid > (PAGE_SIZE - poffset) ?
|
||||
(PAGE_SIZE - poffset) : resid;
|
||||
vm_page_set_invalid(m, poffset, presid);
|
||||
}
|
||||
resid -= PAGE_SIZE;
|
||||
}
|
||||
@ -1014,7 +1003,7 @@ getnewbuf(struct vnode *vp, daddr_t blkno,
|
||||
/* wait for a free buffer of any kind */
|
||||
needsbuffer |= VFS_BIO_NEED_ANY;
|
||||
do
|
||||
tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf",
|
||||
tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
|
||||
slptimeo);
|
||||
while (needsbuffer & VFS_BIO_NEED_ANY);
|
||||
return (0);
|
||||
@ -1076,22 +1065,35 @@ getnewbuf(struct vnode *vp, daddr_t blkno,
|
||||
* deadlocking.
|
||||
*/
|
||||
if (writerecursion > 0) {
|
||||
bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
|
||||
while (bp) {
|
||||
if ((bp->b_flags & B_DELWRI) == 0)
|
||||
break;
|
||||
bp = TAILQ_NEXT(bp, b_freelist);
|
||||
}
|
||||
if (bp == NULL) {
|
||||
bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
|
||||
if (writerecursion > 5) {
|
||||
bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
|
||||
while (bp) {
|
||||
if ((bp->b_flags & B_DELWRI) == 0)
|
||||
break;
|
||||
bp = TAILQ_NEXT(bp, b_freelist);
|
||||
}
|
||||
if (bp == NULL) {
|
||||
bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
|
||||
while (bp) {
|
||||
if ((bp->b_flags & B_DELWRI) == 0)
|
||||
break;
|
||||
bp = TAILQ_NEXT(bp, b_freelist);
|
||||
}
|
||||
}
|
||||
if (bp == NULL)
|
||||
panic("getnewbuf: cannot get buffer, infinite recursion failure");
|
||||
} else {
|
||||
bremfree(bp);
|
||||
bp->b_flags |= B_BUSY | B_AGE | B_ASYNC;
|
||||
nbyteswritten += bp->b_bufsize;
|
||||
++writerecursion;
|
||||
VOP_BWRITE(bp);
|
||||
--writerecursion;
|
||||
if (!slpflag && !slptimeo) {
|
||||
return (0);
|
||||
}
|
||||
goto start;
|
||||
}
|
||||
if (bp == NULL)
|
||||
panic("getnewbuf: cannot get buffer, infinite recursion failure");
|
||||
} else {
|
||||
++writerecursion;
|
||||
nbyteswritten += vfs_bio_awrite(bp);
|
||||
@ -1143,6 +1145,7 @@ getnewbuf(struct vnode *vp, daddr_t blkno,
|
||||
bp->b_dev = NODEV;
|
||||
bp->b_vp = NULL;
|
||||
bp->b_blkno = bp->b_lblkno = 0;
|
||||
bp->b_offset = 0;
|
||||
bp->b_iodone = 0;
|
||||
bp->b_error = 0;
|
||||
bp->b_resid = 0;
|
||||
@ -1230,7 +1233,7 @@ waitfreebuffers(int slpflag, int slptimeo) {
|
||||
if (numfreebuffers < hifreebuffers)
|
||||
break;
|
||||
needsbuffer |= VFS_BIO_NEED_FREE;
|
||||
if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo))
|
||||
if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo))
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1248,7 +1251,7 @@ flushdirtybuffers(int slpflag, int slptimeo) {
|
||||
return;
|
||||
}
|
||||
while (flushing) {
|
||||
if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) {
|
||||
if (tsleep(&flushing, (PRIBIO + 4)|slpflag, "biofls", slptimeo)) {
|
||||
splx(s);
|
||||
return;
|
||||
}
|
||||
@ -1400,6 +1403,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
|
||||
struct bufhashhdr *bh;
|
||||
int maxsize;
|
||||
int generation;
|
||||
int checksize;
|
||||
|
||||
if (vp->v_mount) {
|
||||
maxsize = vp->v_mount->mnt_stat.f_iosize;
|
||||
@ -1424,21 +1428,23 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
|
||||
}
|
||||
|
||||
if ((bp = gbincore(vp, blkno))) {
|
||||
loop1:
|
||||
generation = bp->b_generation;
|
||||
loop1:
|
||||
if (bp->b_flags & B_BUSY) {
|
||||
|
||||
bp->b_flags |= B_WANTED;
|
||||
if (bp->b_usecount < BUF_MAXUSE)
|
||||
++bp->b_usecount;
|
||||
|
||||
if (!tsleep(bp,
|
||||
(PRIBIO + 1) | slpflag, "getblk", slptimeo)) {
|
||||
(PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
|
||||
if (bp->b_generation != generation)
|
||||
goto loop;
|
||||
goto loop1;
|
||||
} else {
|
||||
splx(s);
|
||||
return (struct buf *) NULL;
|
||||
}
|
||||
|
||||
splx(s);
|
||||
return (struct buf *) NULL;
|
||||
}
|
||||
bp->b_flags |= B_BUSY | B_CACHE;
|
||||
bremfree(bp);
|
||||
@ -1447,7 +1453,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
|
||||
* check for size inconsistancies (note that they shouldn't
|
||||
* happen but do when filesystems don't handle the size changes
|
||||
* correctly.) We are conservative on metadata and don't just
|
||||
* extend the buffer but write and re-constitute it.
|
||||
* extend the buffer but write (if needed) and re-constitute it.
|
||||
*/
|
||||
|
||||
if (bp->b_bcount != size) {
|
||||
@ -1456,11 +1462,33 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
|
||||
allocbuf(bp, size);
|
||||
} else {
|
||||
bp->b_flags |= B_NOCACHE;
|
||||
VOP_BWRITE(bp);
|
||||
if (bp->b_flags & B_DELWRI) {
|
||||
VOP_BWRITE(bp);
|
||||
} else {
|
||||
brelse(bp);
|
||||
}
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the constituted buffer really deserves for the
|
||||
* B_CACHE bit to be set.
|
||||
*/
|
||||
checksize = bp->b_bufsize;
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
int resid;
|
||||
int poffset;
|
||||
poffset = bp->b_offset & PAGE_MASK;
|
||||
resid = (checksize > (PAGE_SIZE - poffset)) ?
|
||||
(PAGE_SIZE - poffset) : checksize;
|
||||
if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) {
|
||||
bp->b_flags &= ~(B_CACHE | B_DONE);
|
||||
break;
|
||||
}
|
||||
checksize -= resid;
|
||||
}
|
||||
|
||||
if (bp->b_usecount < BUF_MAXUSE)
|
||||
++bp->b_usecount;
|
||||
splx(s);
|
||||
@ -1494,6 +1522,11 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
|
||||
* be found by incore.
|
||||
*/
|
||||
bp->b_blkno = bp->b_lblkno = blkno;
|
||||
if (vp->v_type != VBLK)
|
||||
bp->b_offset = (off_t) blkno * maxsize;
|
||||
else
|
||||
bp->b_offset = (off_t) blkno * DEV_BSIZE;
|
||||
|
||||
bgetvp(vp, bp);
|
||||
LIST_REMOVE(bp, b_hash);
|
||||
bh = BUFHASH(vp, blkno);
|
||||
@ -1710,7 +1743,8 @@ allocbuf(struct buf * bp, int size)
|
||||
tinc = PAGE_SIZE;
|
||||
if (tinc > bsize)
|
||||
tinc = bsize;
|
||||
off = (vm_ooffset_t) bp->b_lblkno * bsize;
|
||||
|
||||
off = bp->b_offset;
|
||||
curbpnpages = bp->b_npages;
|
||||
doretry:
|
||||
bp->b_validoff = orig_validoff;
|
||||
@ -1814,7 +1848,7 @@ biowait(register struct buf * bp)
|
||||
if (bp->b_flags & B_READ)
|
||||
tsleep(bp, PRIBIO, "biord", 0);
|
||||
else
|
||||
tsleep(bp, curproc->p_usrpri, "biowr", 0);
|
||||
tsleep(bp, PRIBIO, "biowr", 0);
|
||||
#endif
|
||||
splx(s);
|
||||
if (bp->b_flags & B_EINTR) {
|
||||
@ -1896,10 +1930,8 @@ biodone(register struct buf * bp)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (vp->v_type == VBLK)
|
||||
foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
|
||||
else
|
||||
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
|
||||
foff = bp->b_offset;
|
||||
|
||||
#if !defined(MAX_PERF)
|
||||
if (!obj) {
|
||||
panic("biodone: no object");
|
||||
@ -1936,6 +1968,7 @@ biodone(register struct buf * bp)
|
||||
resid = IDX_TO_OFF(m->pindex + 1) - foff;
|
||||
if (resid > iosize)
|
||||
resid = iosize;
|
||||
|
||||
/*
|
||||
* In the write case, the valid and clean bits are
|
||||
* already changed correctly, so we only need to do this
|
||||
@ -2060,15 +2093,12 @@ vfs_unbusy_pages(struct buf * bp)
|
||||
if (bp->b_flags & B_VMIO) {
|
||||
struct vnode *vp = bp->b_vp;
|
||||
vm_object_t obj = vp->v_object;
|
||||
vm_ooffset_t foff;
|
||||
|
||||
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
|
||||
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
vm_page_t m = bp->b_pages[i];
|
||||
|
||||
if (m == bogus_page) {
|
||||
m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
|
||||
m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
|
||||
#if !defined(MAX_PERF)
|
||||
if (!m) {
|
||||
panic("vfs_unbusy_pages: page missing\n");
|
||||
@ -2146,11 +2176,11 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
|
||||
|
||||
soff = off;
|
||||
eoff = off + min(PAGE_SIZE, bp->b_bufsize);
|
||||
vm_page_set_invalid(m,
|
||||
(vm_offset_t) (soff & PAGE_MASK),
|
||||
(vm_offset_t) (eoff - soff));
|
||||
if (vp->v_tag == VT_NFS && vp->v_type != VBLK) {
|
||||
vm_ooffset_t sv, ev;
|
||||
vm_page_set_invalid(m,
|
||||
(vm_offset_t) (soff & PAGE_MASK),
|
||||
(vm_offset_t) (eoff - soff));
|
||||
off = off - pageno * PAGE_SIZE;
|
||||
sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
|
||||
ev = off + ((bp->b_validend + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1));
|
||||
@ -2159,8 +2189,8 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
|
||||
}
|
||||
if (eoff > soff)
|
||||
vm_page_set_validclean(m,
|
||||
(vm_offset_t) (soff & PAGE_MASK),
|
||||
(vm_offset_t) (eoff - soff));
|
||||
(vm_offset_t) (soff & PAGE_MASK),
|
||||
(vm_offset_t) (eoff - soff));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2181,10 +2211,7 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
|
||||
vm_object_t obj = vp->v_object;
|
||||
vm_ooffset_t foff;
|
||||
|
||||
if (vp->v_type == VBLK)
|
||||
foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
|
||||
else
|
||||
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
|
||||
foff = bp->b_offset;
|
||||
|
||||
vfs_setdirty(bp);
|
||||
|
||||
@ -2229,14 +2256,10 @@ vfs_clean_pages(struct buf * bp)
|
||||
if (bp->b_flags & B_VMIO) {
|
||||
struct vnode *vp = bp->b_vp;
|
||||
vm_ooffset_t foff;
|
||||
foff = bp->b_offset;
|
||||
|
||||
if (vp->v_type == VBLK)
|
||||
foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
|
||||
else
|
||||
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
|
||||
for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
|
||||
vm_page_t m = bp->b_pages[i];
|
||||
|
||||
vfs_page_set_valid(bp, foff, i, m);
|
||||
}
|
||||
}
|
||||
@ -2272,7 +2295,7 @@ vfs_bio_clrbuf(struct buf *bp) {
|
||||
bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
|
||||
}
|
||||
}
|
||||
/* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
|
||||
bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
bp->b_resid = 0;
|
||||
} else {
|
||||
|
@ -33,7 +33,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
|
||||
* $Id: vfs_cluster.c,v 1.56 1998/03/07 21:35:28 dyson Exp $
|
||||
* $Id: vfs_cluster.c,v 1.57 1998/03/08 09:57:09 julian Exp $
|
||||
*/
|
||||
|
||||
#include "opt_debug_cluster.h"
|
||||
@ -165,8 +165,8 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
|
||||
}
|
||||
reqbp = bp = NULL;
|
||||
} else {
|
||||
u_quad_t firstread;
|
||||
firstread = (u_quad_t) lblkno * size;
|
||||
off_t firstread;
|
||||
firstread = bp->b_offset;
|
||||
if (firstread + totread > filesize)
|
||||
totread = filesize - firstread;
|
||||
if (totread > size) {
|
||||
@ -253,6 +253,7 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
|
||||
curproc->p_stats->p_ru.ru_inblock++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* and if we have read-aheads, do them too
|
||||
*/
|
||||
@ -346,6 +347,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
bp->b_iodone = cluster_callback;
|
||||
bp->b_blkno = blkno;
|
||||
bp->b_lblkno = lbn;
|
||||
bp->b_offset = tbp->b_offset;
|
||||
pbgetvp(vp, bp);
|
||||
|
||||
TAILQ_INIT(&bp->b_cluster.cluster_head);
|
||||
@ -363,8 +365,20 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
round_page(size) > vp->v_maxio)
|
||||
break;
|
||||
|
||||
if (incore(vp, lbn + i))
|
||||
break;
|
||||
if (tbp = incore(vp, lbn + i)) {
|
||||
if (tbp->b_flags & B_BUSY)
|
||||
break;
|
||||
|
||||
for (j = 0; j < tbp->b_npages; j++)
|
||||
if (tbp->b_pages[j]->valid)
|
||||
break;
|
||||
|
||||
if (j != tbp->b_npages)
|
||||
break;
|
||||
|
||||
if (tbp->b_bcount != size)
|
||||
break;
|
||||
}
|
||||
|
||||
tbp = getblk(vp, lbn + i, size, 0, 0);
|
||||
|
||||
@ -374,18 +388,12 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
break;
|
||||
}
|
||||
|
||||
for (j=0;j<tbp->b_npages;j++) {
|
||||
if (tbp->b_pages[j]->valid) {
|
||||
for (j = 0;j < tbp->b_npages; j++)
|
||||
if (tbp->b_pages[j]->valid)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (j != tbp->b_npages) {
|
||||
/*
|
||||
* force buffer to be re-constituted later
|
||||
*/
|
||||
tbp->b_flags |= B_RELBUF;
|
||||
brelse(tbp);
|
||||
bqrelse(tbp);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -525,7 +533,7 @@ cluster_write(bp, filesize)
|
||||
*/
|
||||
cursize = vp->v_lastw - vp->v_cstart + 1;
|
||||
#ifndef notyet_block_reallocation_enabled
|
||||
if (((u_quad_t)(lbn + 1) * lblocksize) != filesize ||
|
||||
if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
|
||||
lbn != vp->v_lastw + 1 ||
|
||||
vp->v_clen <= cursize) {
|
||||
if (!async)
|
||||
@ -576,7 +584,7 @@ cluster_write(bp, filesize)
|
||||
* existing cluster.
|
||||
*/
|
||||
if ((vp->v_type == VREG) &&
|
||||
((u_quad_t) (lbn + 1) * lblocksize) != filesize &&
|
||||
((u_quad_t) bp->b_offset + lblocksize) != filesize &&
|
||||
(bp->b_blkno == bp->b_lblkno) &&
|
||||
(VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
|
||||
bp->b_blkno == -1)) {
|
||||
@ -682,6 +690,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
|
||||
bp->b_blkno = tbp->b_blkno;
|
||||
bp->b_lblkno = tbp->b_lblkno;
|
||||
bp->b_offset = tbp->b_offset;
|
||||
(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
|
||||
bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER |
|
||||
(tbp->b_flags & (B_VMIO|B_NEEDCOMMIT));
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
|
||||
* $Id: vfs_subr.c,v 1.139 1998/03/14 02:55:01 tegge Exp $
|
||||
* $Id: vfs_subr.c,v 1.140 1998/03/14 19:50:36 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -113,8 +113,10 @@ SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
|
||||
|
||||
int vfs_ioopt = 0;
|
||||
#ifdef REALLYBADBUG
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
struct mntlist mountlist; /* mounted filesystem list */
|
||||
struct simplelock mountlist_slock;
|
||||
@ -631,6 +633,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
|
||||
break;
|
||||
}
|
||||
bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
}
|
||||
}
|
||||
@ -663,6 +666,90 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Truncate a file's buffer and pages to a specified length. This
|
||||
* is in lieu of the old vinvalbuf mechanism, which performed unneeded
|
||||
* sync activity.
|
||||
*/
|
||||
int
|
||||
vtruncbuf(vp, cred, p, length, blksize)
|
||||
register struct vnode *vp;
|
||||
struct ucred *cred;
|
||||
struct proc *p;
|
||||
off_t length;
|
||||
int blksize;
|
||||
{
|
||||
register struct buf *bp;
|
||||
struct buf *nbp, *blist;
|
||||
int s, error, anyfreed;
|
||||
vm_object_t object;
|
||||
int trunclbn;
|
||||
|
||||
/*
|
||||
* Round up to the *next* lbn.
|
||||
*/
|
||||
trunclbn = ((length + blksize - 1) / blksize) * blksize;
|
||||
|
||||
s = splbio();
|
||||
restart:
|
||||
anyfreed = 1;
|
||||
for (;anyfreed;) {
|
||||
anyfreed = 0;
|
||||
for ( bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
|
||||
|
||||
nbp = LIST_NEXT(bp, b_vnbufs);
|
||||
|
||||
if (bp->b_lblkno >= trunclbn) {
|
||||
if (bp->b_flags & B_BUSY) {
|
||||
bp->b_flags |= B_WANTED;
|
||||
tsleep((caddr_t) bp, PRIBIO, "vtrb1", 0);
|
||||
nbp = bp;
|
||||
} else {
|
||||
bremfree(bp);
|
||||
bp->b_flags |= (B_BUSY|B_INVAL|B_NOCACHE|B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
anyfreed = 1;
|
||||
}
|
||||
if (nbp &&
|
||||
((LIST_NEXT(nbp, b_vnbufs) == NOLIST) || (nbp->b_vp != vp) ||
|
||||
(nbp->b_flags & B_DELWRI))) {
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
|
||||
nbp = LIST_NEXT(bp, b_vnbufs);
|
||||
|
||||
if (bp->b_lblkno >= trunclbn) {
|
||||
if (bp->b_flags & B_BUSY) {
|
||||
bp->b_flags |= B_WANTED;
|
||||
tsleep((caddr_t) bp, PRIBIO, "vtrb2", 0);
|
||||
nbp = bp;
|
||||
} else {
|
||||
bremfree(bp);
|
||||
bp->b_flags |= (B_BUSY|B_INVAL|B_NOCACHE|B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
anyfreed = 1;
|
||||
}
|
||||
if (nbp &&
|
||||
((LIST_NEXT(nbp, b_vnbufs) == NOLIST) || (nbp->b_vp != vp) ||
|
||||
(nbp->b_flags & B_DELWRI) == 0)) {
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
|
||||
vnode_pager_setsize(vp, length);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Associate a buffer with a vnode.
|
||||
*/
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
|
||||
* $Id: vfs_subr.c,v 1.139 1998/03/14 02:55:01 tegge Exp $
|
||||
* $Id: vfs_subr.c,v 1.140 1998/03/14 19:50:36 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -113,8 +113,10 @@ SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
|
||||
|
||||
int vfs_ioopt = 0;
|
||||
#ifdef REALLYBADBUG
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
struct mntlist mountlist; /* mounted filesystem list */
|
||||
struct simplelock mountlist_slock;
|
||||
@ -631,6 +633,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
|
||||
break;
|
||||
}
|
||||
bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
}
|
||||
}
|
||||
@ -663,6 +666,90 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Truncate a file's buffer and pages to a specified length. This
|
||||
* is in lieu of the old vinvalbuf mechanism, which performed unneeded
|
||||
* sync activity.
|
||||
*/
|
||||
int
|
||||
vtruncbuf(vp, cred, p, length, blksize)
|
||||
register struct vnode *vp;
|
||||
struct ucred *cred;
|
||||
struct proc *p;
|
||||
off_t length;
|
||||
int blksize;
|
||||
{
|
||||
register struct buf *bp;
|
||||
struct buf *nbp, *blist;
|
||||
int s, error, anyfreed;
|
||||
vm_object_t object;
|
||||
int trunclbn;
|
||||
|
||||
/*
|
||||
* Round up to the *next* lbn.
|
||||
*/
|
||||
trunclbn = ((length + blksize - 1) / blksize) * blksize;
|
||||
|
||||
s = splbio();
|
||||
restart:
|
||||
anyfreed = 1;
|
||||
for (;anyfreed;) {
|
||||
anyfreed = 0;
|
||||
for ( bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
|
||||
|
||||
nbp = LIST_NEXT(bp, b_vnbufs);
|
||||
|
||||
if (bp->b_lblkno >= trunclbn) {
|
||||
if (bp->b_flags & B_BUSY) {
|
||||
bp->b_flags |= B_WANTED;
|
||||
tsleep((caddr_t) bp, PRIBIO, "vtrb1", 0);
|
||||
nbp = bp;
|
||||
} else {
|
||||
bremfree(bp);
|
||||
bp->b_flags |= (B_BUSY|B_INVAL|B_NOCACHE|B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
anyfreed = 1;
|
||||
}
|
||||
if (nbp &&
|
||||
((LIST_NEXT(nbp, b_vnbufs) == NOLIST) || (nbp->b_vp != vp) ||
|
||||
(nbp->b_flags & B_DELWRI))) {
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
|
||||
|
||||
nbp = LIST_NEXT(bp, b_vnbufs);
|
||||
|
||||
if (bp->b_lblkno >= trunclbn) {
|
||||
if (bp->b_flags & B_BUSY) {
|
||||
bp->b_flags |= B_WANTED;
|
||||
tsleep((caddr_t) bp, PRIBIO, "vtrb2", 0);
|
||||
nbp = bp;
|
||||
} else {
|
||||
bremfree(bp);
|
||||
bp->b_flags |= (B_BUSY|B_INVAL|B_NOCACHE|B_RELBUF);
|
||||
bp->b_flags &= ~B_ASYNC;
|
||||
brelse(bp);
|
||||
anyfreed = 1;
|
||||
}
|
||||
if (nbp &&
|
||||
((LIST_NEXT(nbp, b_vnbufs) == NOLIST) || (nbp->b_vp != vp) ||
|
||||
(nbp->b_flags & B_DELWRI) == 0)) {
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
|
||||
vnode_pager_setsize(vp, length);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Associate a buffer with a vnode.
|
||||
*/
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.9 (Berkeley) 3/30/95
|
||||
* $Id: buf.h,v 1.46 1998/03/07 21:36:20 dyson Exp $
|
||||
* $Id: buf.h,v 1.47 1998/03/08 09:58:22 julian Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -98,6 +98,7 @@ struct buf {
|
||||
int b_kvasize; /* size of kva for buffer */
|
||||
daddr_t b_lblkno; /* Logical block number. */
|
||||
daddr_t b_blkno; /* Underlying physical block number. */
|
||||
off_t b_offset; /* Offset into file */
|
||||
/* Function to call upon completion. */
|
||||
void (*b_iodone) __P((struct buf *));
|
||||
/* For nested b_iodone's. */
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.9 (Berkeley) 3/30/95
|
||||
* $Id: buf.h,v 1.46 1998/03/07 21:36:20 dyson Exp $
|
||||
* $Id: buf.h,v 1.47 1998/03/08 09:58:22 julian Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -98,6 +98,7 @@ struct buf {
|
||||
int b_kvasize; /* size of kva for buffer */
|
||||
daddr_t b_lblkno; /* Logical block number. */
|
||||
daddr_t b_blkno; /* Underlying physical block number. */
|
||||
off_t b_offset; /* Offset into file */
|
||||
/* Function to call upon completion. */
|
||||
void (*b_iodone) __P((struct buf *));
|
||||
/* For nested b_iodone's. */
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
|
||||
* $Id: vnode.h,v 1.67 1998/03/07 21:36:27 dyson Exp $
|
||||
* $Id: vnode.h,v 1.68 1998/03/08 09:58:35 julian Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_VNODE_H_
|
||||
@ -489,7 +489,10 @@ int vget __P((struct vnode *vp, int lockflag, struct proc *p));
|
||||
void vgone __P((struct vnode *vp));
|
||||
void vhold __P((struct vnode *));
|
||||
int vinvalbuf __P((struct vnode *vp, int save, struct ucred *cred,
|
||||
|
||||
struct proc *p, int slpflag, int slptimeo));
|
||||
int vtruncbuf __P((struct vnode *vp, struct ucred *cred, struct proc *p,
|
||||
off_t length, int blksize));
|
||||
void vprint __P((char *label, struct vnode *vp));
|
||||
int vrecycle __P((struct vnode *vp, struct simplelock *inter_lkp,
|
||||
struct proc *p));
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95
|
||||
* $Id: ffs_inode.c,v 1.35 1998/03/07 21:36:33 dyson Exp $
|
||||
* $Id: ffs_inode.c,v 1.36 1998/03/08 09:58:55 julian Exp $
|
||||
*/
|
||||
|
||||
#include "opt_quota.h"
|
||||
@ -224,7 +224,7 @@ ffs_truncate(vp, length, flags, cred, p)
|
||||
(void) chkdq(oip, -oip->i_blocks, NOCRED, 0);
|
||||
#endif
|
||||
softdep_setup_freeblocks(oip, length);
|
||||
(void) vinvalbuf(ovp, 0, cred, p, 0, 0);
|
||||
(void) vtruncbuf(ovp, cred, p, length, fs->fs_bsize);
|
||||
oip->i_flag |= IN_CHANGE | IN_UPDATE;
|
||||
return (ffs_update(ovp, &tv, &tv, 0));
|
||||
}
|
||||
@ -237,10 +237,6 @@ ffs_truncate(vp, length, flags, cred, p)
|
||||
*/
|
||||
if (osize < length) {
|
||||
vnode_pager_setsize(ovp, length);
|
||||
#if 0
|
||||
offset = blkoff(fs, length - 1);
|
||||
lbn = lblkno(fs, length - 1);
|
||||
#endif
|
||||
aflags = B_CLRBUF;
|
||||
if (flags & IO_SYNC)
|
||||
aflags |= B_SYNC;
|
||||
@ -277,9 +273,6 @@ ffs_truncate(vp, length, flags, cred, p)
|
||||
aflags |= B_SYNC;
|
||||
error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp);
|
||||
if (error) {
|
||||
#if 0 /* kirk's version had this */
|
||||
vnode_pager_setsize(ovp, (u_long)osize);
|
||||
#endif
|
||||
return (error);
|
||||
}
|
||||
oip->i_size = length;
|
||||
@ -333,9 +326,7 @@ ffs_truncate(vp, length, flags, cred, p)
|
||||
bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
|
||||
bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
|
||||
oip->i_size = osize;
|
||||
vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
|
||||
allerror = vinvalbuf(ovp, vflags, cred, p, 0, 0);
|
||||
vnode_pager_setsize(ovp, length);
|
||||
allerror = vtruncbuf(ovp, cred, p, length, fs->fs_bsize);
|
||||
|
||||
/*
|
||||
* Indirect blocks first.
|
||||
@ -426,7 +417,6 @@ ffs_truncate(vp, length, flags, cred, p)
|
||||
if (oip->i_blocks < 0) /* sanity */
|
||||
oip->i_blocks = 0;
|
||||
oip->i_flag |= IN_CHANGE;
|
||||
vnode_pager_setsize(ovp, length);
|
||||
#ifdef QUOTA
|
||||
(void) chkdq(oip, -blocksreleased, NOCRED, 0);
|
||||
#endif
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_object.c,v 1.117 1998/03/08 06:25:59 dyson Exp $
|
||||
* $Id: vm_object.c,v 1.118 1998/03/08 18:05:59 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -436,7 +436,14 @@ vm_object_terminate(object)
|
||||
vp = (struct vnode *) object->handle;
|
||||
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
|
||||
|
||||
} else if (object->type != OBJT_DEAD) {
|
||||
/*
|
||||
* Let the pager know object is dead.
|
||||
*/
|
||||
vm_pager_deallocate(object);
|
||||
|
||||
}
|
||||
|
||||
if ((object->type != OBJT_VNODE) && (object->ref_count == 0)) {
|
||||
|
||||
/*
|
||||
* Now free the pages. For internal objects, this also removes them
|
||||
@ -451,20 +458,15 @@ vm_object_terminate(object)
|
||||
vm_page_free(p);
|
||||
cnt.v_pfree++;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (object->type != OBJT_DEAD) {
|
||||
/*
|
||||
* Let the pager know object is dead.
|
||||
*/
|
||||
vm_pager_deallocate(object);
|
||||
|
||||
}
|
||||
|
||||
if (object->ref_count == 0) {
|
||||
if ((object->type != OBJT_DEAD) || (object->resident_page_count == 0))
|
||||
vm_object_dispose(object);
|
||||
}
|
||||
if ((object->ref_count == 0) && (object->resident_page_count == 0))
|
||||
vm_object_dispose(object);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -34,7 +34,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_page.c,v 1.95 1998/03/07 21:37:13 dyson Exp $
|
||||
* $Id: vm_page.c,v 1.96 1998/03/08 06:27:30 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1427,7 +1427,12 @@ vm_page_bits(int base, int size)
|
||||
|
||||
if ((base == 0) && (size >= PAGE_SIZE))
|
||||
return VM_PAGE_BITS_ALL;
|
||||
|
||||
size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
|
||||
if (size > PAGE_SIZE - base) {
|
||||
size = PAGE_SIZE - base;
|
||||
}
|
||||
|
||||
base = (base % PAGE_SIZE) / DEV_BSIZE;
|
||||
chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
|
||||
return (chunk << base) & VM_PAGE_BITS_ALL;
|
||||
@ -1463,6 +1468,7 @@ vm_page_set_invalid(m, base, size)
|
||||
m->valid &= ~(bits = vm_page_bits(base, size));
|
||||
if (m->valid == 0)
|
||||
m->dirty &= ~bits;
|
||||
m->object->generation++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -65,7 +65,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pageout.c,v 1.118 1998/03/07 21:37:19 dyson Exp $
|
||||
* $Id: vm_pageout.c,v 1.119 1998/03/08 18:19:17 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -849,24 +849,9 @@ vm_pageout_scan()
|
||||
*/
|
||||
page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) -
|
||||
(cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
|
||||
page_shortage += addl_page_shortage;
|
||||
if (page_shortage <= 0) {
|
||||
if (pages_freed == 0) {
|
||||
page_shortage = cnt.v_free_min - (cnt.v_free_count + cnt.v_cache_count);
|
||||
} else {
|
||||
page_shortage = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the "inactive" loop finds that there is a shortage over and
|
||||
* above the page statistics variables, then we need to accomodate
|
||||
* that. This avoids potential deadlocks due to pages being temporarily
|
||||
* busy for I/O or other types of temporary wiring.
|
||||
*/
|
||||
if (addl_page_shortage) {
|
||||
if (page_shortage < 0)
|
||||
page_shortage = 0;
|
||||
page_shortage += addl_page_shortage;
|
||||
page_shortage = 0;
|
||||
}
|
||||
|
||||
pcount = cnt.v_active_count;
|
||||
@ -1070,6 +1055,12 @@ vm_pageout_page_stats()
|
||||
vm_page_t m,next;
|
||||
int pcount,tpcount; /* Number of pages to check */
|
||||
static int fullintervalcount = 0;
|
||||
int page_shortage;
|
||||
|
||||
page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
|
||||
(cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
|
||||
if (page_shortage <= 0)
|
||||
return;
|
||||
|
||||
pcount = cnt.v_active_count;
|
||||
fullintervalcount += vm_pageout_stats_interval;
|
||||
@ -1211,7 +1202,7 @@ vm_pageout()
|
||||
* Set interval in seconds for stats scan.
|
||||
*/
|
||||
if (vm_pageout_stats_interval == 0)
|
||||
vm_pageout_stats_interval = 4;
|
||||
vm_pageout_stats_interval = 5;
|
||||
if (vm_pageout_full_stats_interval == 0)
|
||||
vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
|
||||
|
||||
@ -1220,7 +1211,7 @@ vm_pageout()
|
||||
* Set maximum free per pass
|
||||
*/
|
||||
if (vm_pageout_stats_free_max == 0)
|
||||
vm_pageout_stats_free_max = 25;
|
||||
vm_pageout_stats_free_max = 5;
|
||||
|
||||
max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
|
||||
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pager.c,v 1.35 1998/02/23 08:22:40 dyson Exp $
|
||||
* $Id: vm_pager.c,v 1.36 1998/03/07 21:37:21 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -90,11 +90,85 @@ extern struct pagerops swappagerops;
|
||||
extern struct pagerops vnodepagerops;
|
||||
extern struct pagerops devicepagerops;
|
||||
|
||||
static int dead_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
|
||||
static vm_object_t dead_pager_alloc __P((void *, vm_size_t, vm_prot_t,
|
||||
vm_ooffset_t));
|
||||
static int dead_pager_putpages __P((vm_object_t, vm_page_t *, int, int, int *));
|
||||
static boolean_t dead_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
|
||||
static void dead_pager_dealloc __P((vm_object_t));
|
||||
|
||||
int
|
||||
dead_pager_getpages(obj, ma, count, req)
|
||||
vm_object_t obj;
|
||||
vm_page_t *ma;
|
||||
int count;
|
||||
int req;
|
||||
{
|
||||
return VM_PAGER_FAIL;
|
||||
}
|
||||
|
||||
vm_object_t
|
||||
dead_pager_alloc(handle, size, prot, off)
|
||||
void *handle;
|
||||
vm_size_t size;
|
||||
vm_prot_t prot;
|
||||
vm_ooffset_t off;
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
dead_pager_putpages(object, m, count, flags, rtvals)
|
||||
vm_object_t object;
|
||||
vm_page_t *m;
|
||||
int count;
|
||||
int flags;
|
||||
int *rtvals;
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < count; i++) {
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
}
|
||||
return VM_PAGER_AGAIN;
|
||||
}
|
||||
|
||||
int
|
||||
dead_pager_haspage(object, pindex, prev, next)
|
||||
vm_object_t object;
|
||||
vm_pindex_t pindex;
|
||||
int *prev;
|
||||
int *next;
|
||||
{
|
||||
if (prev)
|
||||
*prev = 0;
|
||||
if (next)
|
||||
*next = 0;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
void
|
||||
dead_pager_dealloc(object)
|
||||
vm_object_t object;
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
struct pagerops deadpagerops = {
|
||||
NULL,
|
||||
dead_pager_alloc,
|
||||
dead_pager_dealloc,
|
||||
dead_pager_getpages,
|
||||
dead_pager_putpages,
|
||||
dead_pager_haspage,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct pagerops *pagertab[] = {
|
||||
&defaultpagerops, /* OBJT_DEFAULT */
|
||||
&swappagerops, /* OBJT_SWAP */
|
||||
&vnodepagerops, /* OBJT_VNODE */
|
||||
&devicepagerops, /* OBJT_DEVICE */
|
||||
&deadpagerops /* OBJT_DEAD */
|
||||
};
|
||||
static int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
|
||||
* $Id: vnode_pager.c,v 1.89 1998/03/07 21:37:31 dyson Exp $
|
||||
* $Id: vnode_pager.c,v 1.90 1998/03/09 08:58:53 msmith Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -926,6 +926,8 @@ vnode_pager_lock(object)
|
||||
|
||||
while (vget(object->handle,
|
||||
LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) {
|
||||
if ((object->flags & OBJ_DEAD) || (object->type != OBJT_VNODE))
|
||||
return NULL;
|
||||
printf("vnode_pager_lock: retrying\n");
|
||||
}
|
||||
return object->handle;
|
||||
|
Loading…
Reference in New Issue
Block a user