Provide support for upcoming merged VM/buffer cache, and fixed a few bugs
that haven't appeared to manifest themselves (yet). Submitted by: John Dyson
This commit is contained in:
parent
c564966bf0
commit
a481f20029
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.28 1994/08/06 09:15:15 davidg Exp $
|
||||
* $Id: pmap.c,v 1.29 1994/08/06 10:25:36 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -695,6 +695,7 @@ pmap_alloc_pv_entry()
|
||||
* let the kernel see it
|
||||
*/
|
||||
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
|
||||
tlbflush();
|
||||
|
||||
entry = (pv_entry_t) pvva;
|
||||
/*
|
||||
|
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.28 1994/08/06 09:15:15 davidg Exp $
|
||||
* $Id: pmap.c,v 1.29 1994/08/06 10:25:36 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -695,6 +695,7 @@ pmap_alloc_pv_entry()
|
||||
* let the kernel see it
|
||||
*/
|
||||
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
|
||||
tlbflush();
|
||||
|
||||
entry = (pv_entry_t) pvva;
|
||||
/*
|
||||
|
@ -16,7 +16,7 @@
|
||||
* 4. Modifications may be freely made to this file if the above conditions
|
||||
* are met.
|
||||
*
|
||||
* $Id: kern_physio.c,v 1.3 1994/08/02 07:42:05 davidg Exp $
|
||||
* $Id: kern_physio.c,v 1.4 1994/08/06 09:15:28 davidg Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -27,6 +27,7 @@
|
||||
#include <vm/vm.h>
|
||||
|
||||
static void physwakeup();
|
||||
u_int minphys(struct buf *bp);
|
||||
|
||||
int
|
||||
physio(strategy, bp, dev, rw, minp, uio)
|
||||
@ -78,6 +79,9 @@ physio(strategy, bp, dev, rw, minp, uio)
|
||||
caddr_t adr;
|
||||
|
||||
bp->b_bcount = uio->uio_iov[i].iov_len;
|
||||
bp->b_bcount = minp( bp);
|
||||
if( minp != minphys)
|
||||
bp->b_bcount = minphys( bp);
|
||||
bp->b_bufsize = bp->b_bcount;
|
||||
bp->b_flags = B_BUSY | B_PHYS | B_CALL | bufflags;
|
||||
bp->b_iodone = physwakeup;
|
||||
|
@ -39,7 +39,7 @@
|
||||
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
|
||||
*
|
||||
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
|
||||
* $Id: swap_pager.c,v 1.4 1994/08/02 07:55:13 davidg Exp $
|
||||
* $Id: swap_pager.c,v 1.5 1994/08/06 09:15:36 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1366,17 +1366,6 @@ swap_pager_output(swp, m, count, flags, rtvals)
|
||||
* get a swap pager clean data structure, block until we get it
|
||||
*/
|
||||
if (swap_pager_free.tqh_first == NULL) {
|
||||
/*
|
||||
if (flags & B_ASYNC) {
|
||||
for(i=0;i<count;i++) {
|
||||
rtvals[i] = VM_PAGER_AGAIN;
|
||||
if( swb[i])
|
||||
--swb[i]->swb_locked;
|
||||
}
|
||||
return VM_PAGER_AGAIN;
|
||||
}
|
||||
*/
|
||||
|
||||
s = splbio();
|
||||
if( curproc == pageproc)
|
||||
(void) swap_pager_clean();
|
||||
@ -1442,9 +1431,11 @@ swap_pager_output(swp, m, count, flags, rtvals)
|
||||
bp->b_flags = B_BUSY;
|
||||
bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */
|
||||
bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
|
||||
crhold(bp->b_rcred);
|
||||
crhold(bp->b_wcred);
|
||||
bp->b_un.b_addr = (caddr_t) kva;
|
||||
if( bp->b_rcred != NOCRED)
|
||||
crhold(bp->b_rcred);
|
||||
if( bp->b_wcred != NOCRED)
|
||||
crhold(bp->b_wcred);
|
||||
bp->b_data = (caddr_t) kva;
|
||||
bp->b_blkno = reqaddr[0];
|
||||
bgetvp( swapdev_vp, bp);
|
||||
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_kern.c,v 1.3 1994/08/02 07:55:22 davidg Exp $
|
||||
* $Id: vm_kern.c,v 1.4 1994/08/06 09:15:38 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -371,10 +371,6 @@ kmem_malloc(map, size, canwait)
|
||||
vm_object_lock(kmem_object);
|
||||
m = vm_page_lookup(kmem_object, offset + i);
|
||||
vm_object_unlock(kmem_object);
|
||||
/*
|
||||
pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m),
|
||||
VM_PROT_DEFAULT, TRUE);
|
||||
*/
|
||||
pmap_kenter( addr + i, VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
pmap_update();
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_object.c,v 1.3 1994/08/02 07:55:29 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -353,7 +353,7 @@ vm_object_terminate(object)
|
||||
VM_PAGE_CHECK(p);
|
||||
|
||||
vm_page_lock_queues();
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
if (p->flags & PG_ACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
|
||||
p->flags &= ~PG_ACTIVE;
|
||||
@ -613,7 +613,6 @@ vm_object_page_clean(object, start, end, syncio, de_queue)
|
||||
vm_page_deactivate(p);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
p->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(p);
|
||||
goto again;
|
||||
}
|
||||
@ -713,6 +712,7 @@ vm_object_pmap_remove(object, start, end)
|
||||
register vm_offset_t end;
|
||||
{
|
||||
register vm_page_t p;
|
||||
int s;
|
||||
|
||||
if (object == NULL)
|
||||
return;
|
||||
@ -721,11 +721,14 @@ vm_object_pmap_remove(object, start, end)
|
||||
again:
|
||||
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
|
||||
if ((start <= p->offset) && (p->offset < end)) {
|
||||
s = splhigh();
|
||||
if (p->flags & PG_BUSY) {
|
||||
p->flags |= PG_WANTED;
|
||||
tsleep((caddr_t) p, PVM, "vmopmr", 0);
|
||||
splx(s);
|
||||
goto again;
|
||||
}
|
||||
splx(s);
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
|
||||
if ((p->flags & PG_CLEAN) == 0)
|
||||
p->flags |= PG_LAUNDRY;
|
||||
@ -1456,11 +1459,14 @@ vm_object_page_remove(object, start, end)
|
||||
for (p = object->memq.tqh_first; (p != NULL && size > 0); p = next) {
|
||||
next = p->listq.tqe_next;
|
||||
if ((start <= p->offset) && (p->offset < end)) {
|
||||
s=splhigh();
|
||||
if (p->flags & PG_BUSY) {
|
||||
p->flags |= PG_WANTED;
|
||||
tsleep((caddr_t) p, PVM, "vmopar", 0);
|
||||
splx(s);
|
||||
goto again;
|
||||
}
|
||||
splx(s);
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
|
||||
vm_page_lock_queues();
|
||||
vm_page_free(p);
|
||||
@ -1471,11 +1477,14 @@ vm_object_page_remove(object, start, end)
|
||||
} else {
|
||||
while (size > 0) {
|
||||
while (p = vm_page_lookup(object, start)) {
|
||||
s = splhigh();
|
||||
if (p->flags & PG_BUSY) {
|
||||
p->flags |= PG_WANTED;
|
||||
tsleep((caddr_t) p, PVM, "vmopar", 0);
|
||||
splx(s);
|
||||
goto again;
|
||||
}
|
||||
splx(s);
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
|
||||
vm_page_lock_queues();
|
||||
vm_page_free(p);
|
||||
|
@ -34,7 +34,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_page.c,v 1.2 1994/05/25 09:20:05 rgrimes Exp $
|
||||
* $Id: vm_page.c,v 1.3 1994/08/01 11:25:44 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -387,7 +387,7 @@ void vm_page_insert(mem, object, offset)
|
||||
*/
|
||||
|
||||
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
simple_lock(&bucket_lock);
|
||||
TAILQ_INSERT_TAIL(bucket, mem, hashq);
|
||||
simple_unlock(&bucket_lock);
|
||||
@ -434,7 +434,7 @@ void vm_page_remove(mem)
|
||||
*/
|
||||
|
||||
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
simple_lock(&bucket_lock);
|
||||
TAILQ_REMOVE(bucket, mem, hashq);
|
||||
simple_unlock(&bucket_lock);
|
||||
@ -479,7 +479,7 @@ vm_page_t vm_page_lookup(object, offset)
|
||||
|
||||
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
|
||||
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
simple_lock(&bucket_lock);
|
||||
for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
|
||||
VM_PAGE_CHECK(mem);
|
||||
@ -534,7 +534,7 @@ vm_page_alloc(object, offset)
|
||||
register vm_page_t mem;
|
||||
int s;
|
||||
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
simple_lock(&vm_page_queue_free_lock);
|
||||
if ( object != kernel_object &&
|
||||
object != kmem_object &&
|
||||
@ -596,7 +596,7 @@ void vm_page_free(mem)
|
||||
register vm_page_t mem;
|
||||
{
|
||||
int s;
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
vm_page_remove(mem);
|
||||
if (mem->flags & PG_ACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
|
||||
@ -667,7 +667,7 @@ void vm_page_wire(mem)
|
||||
VM_PAGE_CHECK(mem);
|
||||
|
||||
if (mem->wire_count == 0) {
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
if (mem->flags & PG_ACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
|
||||
cnt.v_active_count--;
|
||||
@ -698,7 +698,7 @@ void vm_page_unwire(mem)
|
||||
int s;
|
||||
VM_PAGE_CHECK(mem);
|
||||
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
|
||||
if( mem->wire_count)
|
||||
mem->wire_count--;
|
||||
@ -738,7 +738,7 @@ vm_page_deactivate(m)
|
||||
* Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
|
||||
*/
|
||||
|
||||
spl = splimp();
|
||||
spl = splhigh();
|
||||
if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
|
||||
m->hold_count == 0) {
|
||||
|
||||
@ -781,7 +781,7 @@ void vm_page_deactivate(m)
|
||||
int s;
|
||||
VM_PAGE_CHECK(m);
|
||||
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
/*
|
||||
* Only move active pages -- ignore locked or already
|
||||
* inactive ones.
|
||||
@ -824,7 +824,7 @@ void vm_page_activate(m)
|
||||
int s;
|
||||
VM_PAGE_CHECK(m);
|
||||
|
||||
s = splimp();
|
||||
s = splhigh();
|
||||
if (m->flags & PG_INACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
|
||||
cnt.v_inactive_count--;
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pager.c,v 1.3 1994/08/02 07:55:35 davidg Exp $
|
||||
* $Id: vm_pager.c,v 1.4 1994/08/06 09:15:40 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -280,8 +280,8 @@ vm_pager_map_page(m)
|
||||
vm_offset_t kva;
|
||||
|
||||
kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
|
||||
pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
|
||||
VM_PROT_DEFAULT, TRUE);
|
||||
pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
|
||||
pmap_update();
|
||||
return(kva);
|
||||
}
|
||||
|
||||
@ -289,6 +289,7 @@ void
|
||||
vm_pager_unmap_page(kva)
|
||||
vm_offset_t kva;
|
||||
{
|
||||
pmap_kremove(kva);
|
||||
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
|
||||
}
|
||||
|
||||
@ -298,7 +299,7 @@ vm_pager_atop(kva)
|
||||
{
|
||||
vm_offset_t pa;
|
||||
|
||||
pa = pmap_extract(vm_map_pmap(pager_map), kva);
|
||||
pa = pmap_kextract( kva);
|
||||
if (pa == 0)
|
||||
panic("vm_pager_atop");
|
||||
return (PHYS_TO_VM_PAGE(pa));
|
||||
@ -353,7 +354,6 @@ getpbuf() {
|
||||
|
||||
s = splbio();
|
||||
/* get a bp from the swap buffer header pool */
|
||||
tryagain:
|
||||
while ((bp = bswlist.tqh_first) == NULL) {
|
||||
bswneeded = 1;
|
||||
tsleep((caddr_t)&bswneeded, PVM, "wswbuf", 0);
|
||||
|
@ -37,7 +37,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
|
||||
* $Id: vnode_pager.c,v 1.4 1994/08/06 09:15:42 davidg Exp $
|
||||
* $Id: vnode_pager.c,v 1.5 1994/08/06 10:25:50 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -520,7 +520,6 @@ void
|
||||
vnode_pager_iodone(bp)
|
||||
struct buf *bp;
|
||||
{
|
||||
int s = splbio();
|
||||
bp->b_flags |= B_DONE;
|
||||
wakeup((caddr_t) bp);
|
||||
if( bp->b_flags & B_ASYNC) {
|
||||
@ -536,6 +535,10 @@ vnode_pager_iodone(bp)
|
||||
bp->b_bufsize - bp->b_bcount);
|
||||
|
||||
npages = (bp->b_bufsize + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
/*
|
||||
printf("bcount: %d, bufsize: %d, npages: %d\n",
|
||||
bp->b_bcount, bp->b_bufsize, npages);
|
||||
*/
|
||||
for( i = 0; i < npages; i++) {
|
||||
m = PHYS_TO_VM_PAGE(pmap_kextract(paddr + i * PAGE_SIZE));
|
||||
obj = m->object;
|
||||
@ -547,6 +550,7 @@ vnode_pager_iodone(bp)
|
||||
panic("vnode_pager_iodone: page is gone!!!");
|
||||
}
|
||||
}
|
||||
pmap_qremove( paddr, npages);
|
||||
if( obj) {
|
||||
--obj->paging_in_progress;
|
||||
if( obj->paging_in_progress == 0)
|
||||
@ -555,11 +559,8 @@ vnode_pager_iodone(bp)
|
||||
panic("vnode_pager_iodone: object is gone???");
|
||||
}
|
||||
HOLDRELE(bp->b_vp);
|
||||
splx(s);
|
||||
relpbuf(bp);
|
||||
return;
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -575,7 +576,6 @@ vnode_pager_input_smlfs(vnp, m)
|
||||
vm_offset_t paging_offset;
|
||||
struct vnode *dp, *vp;
|
||||
struct buf *bp;
|
||||
vm_offset_t mapsize;
|
||||
vm_offset_t foff;
|
||||
vm_offset_t kva;
|
||||
int fileaddr;
|
||||
@ -768,7 +768,6 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
vm_object_t object;
|
||||
vm_offset_t paging_offset;
|
||||
struct vnode *dp, *vp;
|
||||
vm_offset_t mapsize;
|
||||
int bsize;
|
||||
|
||||
int first, last;
|
||||
@ -797,7 +796,6 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
* originally, we did not check for an error return value -- assuming
|
||||
* an fs always has a bmap entry point -- that assumption is wrong!!!
|
||||
*/
|
||||
mapsize = 0;
|
||||
foff = m[reqpage]->offset + paging_offset;
|
||||
|
||||
/*
|
||||
@ -890,7 +888,7 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
* unmap the page and free the kva
|
||||
*/
|
||||
pmap_qremove( kva, 1);
|
||||
kmem_free_wakeup(pager_map, kva, mapsize);
|
||||
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* release the buffer back to the block subsystem
|
||||
@ -1087,7 +1085,6 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
|
||||
finishup:
|
||||
for (i = 0; i < count; i++) {
|
||||
pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
|
||||
m[i]->flags |= PG_CLEAN;
|
||||
m[i]->flags &= ~PG_LAUNDRY;
|
||||
if (i != reqpage) {
|
||||
@ -1189,7 +1186,6 @@ vnode_pager_output_smlfs(vnp, m)
|
||||
vm_offset_t paging_offset;
|
||||
struct vnode *dp, *vp;
|
||||
struct buf *bp;
|
||||
vm_offset_t mapsize;
|
||||
vm_offset_t foff;
|
||||
vm_offset_t kva;
|
||||
int fileaddr;
|
||||
@ -1286,7 +1282,6 @@ vnode_pager_output(vnp, m, count, rtvals)
|
||||
vm_offset_t paging_offset;
|
||||
struct vnode *dp, *vp;
|
||||
struct buf *bp;
|
||||
vm_offset_t mapsize;
|
||||
vm_offset_t reqaddr;
|
||||
int bsize;
|
||||
int s;
|
||||
|
Loading…
Reference in New Issue
Block a user