This commit is dual-purpose, to fix more of the pageout daemon

queue corruption problems, and to apply Gary Palmer's code cleanups.
David Greenman helped with these problems also.  There is still
a hang problem using X in small memory machines.
This commit is contained in:
John Dyson 1996-05-31 00:38:04 +00:00
parent 114a8cff43
commit f35329ac0f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=16026
10 changed files with 215 additions and 146 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.94 1996/05/22 17:07:14 peter Exp $
* $Id: pmap.c,v 1.95 1996/05/29 05:09:07 dyson Exp $
*/
/*
@ -187,8 +187,6 @@ static __inline int pmap_remove_entry __P((struct pmap *pmap, pv_entry_t *pv,
vm_offset_t va));
static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
vm_offset_t sva));
static vm_page_t
pmap_pte_vm_page __P((pmap_t pmap, vm_offset_t pt));
static boolean_t
pmap_testbit __P((vm_offset_t pa, int bit));
static __inline void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
@ -320,7 +318,6 @@ pmap_qenter(va, m, count)
int count;
{
int i;
int anyvalid = 0;
register unsigned *pte;
for (i = 0; i < count; i++) {
@ -408,6 +405,21 @@ pmap_is_managed(pa)
return 0;
}
static __inline int
pmap_unwire_pte_hold(vm_page_t m) {
vm_page_unhold(m);
if (m->hold_count == 0) {
--m->wire_count;
if (m->wire_count == 0) {
--cnt.v_wire_count;
m->dirty = 0;
vm_page_deactivate(m);
}
return 1;
}
return 0;
}
#if !defined(PMAP_DIAGNOSTIC)
__inline
#endif
@ -436,10 +448,6 @@ pmap_unuse_pt(pmap, va, mpte)
}
#endif
vm_page_unhold(mpte);
if ((mpte->hold_count == 0) &&
(mpte->wire_count == 0)) {
/*
* We don't free page-table-pages anymore because it can have a negative
* impact on perf at times. Now we just deactivate, and it'll get cleaned
@ -447,11 +455,7 @@ pmap_unuse_pt(pmap, va, mpte)
* brought back into the process address space by pmap_allocpte and be
* reactivated.
*/
mpte->dirty = 0;
vm_page_deactivate(mpte);
return 1;
}
return 0;
return pmap_unwire_pte_hold(mpte);
}
/*
@ -979,8 +983,6 @@ pmap_remove_pte(pmap, ptq, va)
{
unsigned oldpte;
pv_entry_t *ppv;
int i;
int s;
oldpte = *ptq;
*ptq = 0;
@ -1048,12 +1050,10 @@ pmap_remove(pmap, sva, eva)
register vm_offset_t eva;
{
register unsigned *ptbase;
vm_offset_t va;
vm_offset_t pdnxt;
vm_offset_t ptpaddr;
vm_offset_t sindex, eindex;
vm_page_t mpte;
int s;
int anyvalid;
if (pmap == NULL)
@ -1080,7 +1080,6 @@ pmap_remove(pmap, sva, eva)
sindex = i386_btop(sva);
eindex = i386_btop(eva);
for (; sindex < eindex; sindex = pdnxt) {
/*
@ -1096,16 +1095,18 @@ pmap_remove(pmap, sva, eva)
if (ptpaddr == 0)
continue;
if (sindex < i386_btop(UPT_MIN_ADDRESS)) {
/*
* get the vm_page_t for the page table page
*/
mpte = PHYS_TO_VM_PAGE(ptpaddr);
mpte = PHYS_TO_VM_PAGE(ptpaddr);
/*
* if the pte isn't wired or held, just skip it.
* if the pte isn't wired, just skip it.
*/
if ((mpte->hold_count == 0) && (mpte->wire_count == 0))
continue;
if (mpte->wire_count == 0)
continue;
}
/*
* Limit our scan to either the end of the va represented
@ -1140,9 +1141,8 @@ pmap_remove_pte_mapping(pa)
vm_offset_t pa;
{
register pv_entry_t pv, *ppv, npv;
register unsigned *pte, *ptbase;
register unsigned *pte;
vm_offset_t va;
int s;
int anyvalid = 0;
ppv = pa_to_pvh(pa);
@ -1214,6 +1214,8 @@ pmap_remove_all(pa)
pmap = pv->pv_pmap;
ptbase = get_ptbase(pmap);
va = pv->pv_va;
if (*pmap_pde(pmap, va) == 0)
continue;
pte = ptbase + i386_btop(va);
if (tpte = ((int) *pte)) {
pmap->pm_stats.resident_count--;
@ -1284,7 +1286,6 @@ pmap_protect(pmap, sva, eva, prot)
eindex = i386_btop(eva);
for (; sindex < eindex; sindex = pdnxt) {
int pbits;
pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex));
@ -1296,10 +1297,15 @@ pmap_protect(pmap, sva, eva, prot)
if (ptpaddr == 0)
continue;
mpte = PHYS_TO_VM_PAGE(ptpaddr);
/*
* Don't look at kernel page table pages
*/
if (sindex < i386_btop(UPT_MIN_ADDRESS)) {
mpte = PHYS_TO_VM_PAGE(ptpaddr);
if ((mpte->hold_count == 0) && (mpte->wire_count == 0))
continue;
if (mpte->wire_count == 0)
continue;
}
if (pdnxt > eindex) {
pdnxt = eindex;
@ -1365,6 +1371,7 @@ _pmap_allocpte(pmap, va, ptepindex)
{
vm_offset_t pteva, ptepa;
vm_page_t m;
int s;
/*
* Find or fabricate a new pagetable page
@ -1388,18 +1395,20 @@ _pmap_allocpte(pmap, va, ptepindex)
*/
pmap->pm_pteobj->flags |= OBJ_WRITEABLE;
if (m->hold_count == 0) {
s = splvm();
vm_page_unqueue(m);
splx(s);
++m->wire_count;
++cnt.v_wire_count;
}
/*
* Increment the hold count for the page table page
* (denoting a new mapping.)
*/
++m->hold_count;
/*
* Activate the pagetable page, if it isn't already
*/
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
/*
* Map the pagetable page into the process address space, if
* it isn't already there.
@ -1407,7 +1416,6 @@ _pmap_allocpte(pmap, va, ptepindex)
pteva = ((vm_offset_t) vtopte(va)) & PG_FRAME;
ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
if (ptepa == 0) {
int s;
pv_entry_t pv, *ppv;
pmap->pm_stats.resident_count++;
@ -1469,9 +1477,14 @@ pmap_allocpte(pmap, va)
*/
if ((ptepa & (PG_RW|PG_U|PG_V)) == (PG_RW|PG_U|PG_V)) {
m = PHYS_TO_VM_PAGE(ptepa);
if (m->hold_count == 0) {
int s = splvm();
vm_page_unqueue(m);
splx(s);
++m->wire_count;
++cnt.v_wire_count;
}
++m->hold_count;
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
return m;
}
return _pmap_allocpte(pmap, va, ptepindex);
@ -1500,9 +1513,7 @@ pmap_enter(pmap, va, pa, prot, wired)
register unsigned *pte;
vm_offset_t opa;
vm_offset_t origpte, newpte;
vm_offset_t ptepa;
vm_page_t mpte;
int s;
if (pmap == NULL)
return;
@ -1779,7 +1790,6 @@ pmap_prefault(pmap, addra, entry, object)
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m;
int pageorder_index;
if (entry->object.vm_object != object)
return;
@ -1893,7 +1903,6 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
unsigned src_frame, dst_frame;
pd_entry_t pde;
if (dst_addr != src_addr)
return;
@ -1943,8 +1952,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr, dstmpte,
(ptetemp & PG_FRAME));
} else {
--dstmpte->hold_count;
} else {
pmap_unwire_pte_hold(dstmpte);
}
if (dstmpte->hold_count >= srcmpte->hold_count)
break;
@ -2108,6 +2117,8 @@ pmap_testbit(pa, bit)
continue;
}
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
if (pte == NULL)
continue;
if ((int) *pte & bit) {
splx(s);
return TRUE;
@ -2127,7 +2138,7 @@ pmap_changebit(pa, bit, setem)
boolean_t setem;
{
register pv_entry_t pv, *ppv;
register unsigned *pte, npte;
register unsigned *pte;
vm_offset_t va;
int changed;
int s;
@ -2161,6 +2172,8 @@ pmap_changebit(pa, bit, setem)
}
pte = pmap_pte(pv->pv_pmap, va);
if (pte == NULL)
continue;
if (setem) {
*(int *)pte |= bit;
changed = 1;

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.94 1996/05/22 17:07:14 peter Exp $
* $Id: pmap.c,v 1.95 1996/05/29 05:09:07 dyson Exp $
*/
/*
@ -187,8 +187,6 @@ static __inline int pmap_remove_entry __P((struct pmap *pmap, pv_entry_t *pv,
vm_offset_t va));
static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
vm_offset_t sva));
static vm_page_t
pmap_pte_vm_page __P((pmap_t pmap, vm_offset_t pt));
static boolean_t
pmap_testbit __P((vm_offset_t pa, int bit));
static __inline void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
@ -320,7 +318,6 @@ pmap_qenter(va, m, count)
int count;
{
int i;
int anyvalid = 0;
register unsigned *pte;
for (i = 0; i < count; i++) {
@ -408,6 +405,21 @@ pmap_is_managed(pa)
return 0;
}
static __inline int
pmap_unwire_pte_hold(vm_page_t m) {
vm_page_unhold(m);
if (m->hold_count == 0) {
--m->wire_count;
if (m->wire_count == 0) {
--cnt.v_wire_count;
m->dirty = 0;
vm_page_deactivate(m);
}
return 1;
}
return 0;
}
#if !defined(PMAP_DIAGNOSTIC)
__inline
#endif
@ -436,10 +448,6 @@ pmap_unuse_pt(pmap, va, mpte)
}
#endif
vm_page_unhold(mpte);
if ((mpte->hold_count == 0) &&
(mpte->wire_count == 0)) {
/*
* We don't free page-table-pages anymore because it can have a negative
* impact on perf at times. Now we just deactivate, and it'll get cleaned
@ -447,11 +455,7 @@ pmap_unuse_pt(pmap, va, mpte)
* brought back into the process address space by pmap_allocpte and be
* reactivated.
*/
mpte->dirty = 0;
vm_page_deactivate(mpte);
return 1;
}
return 0;
return pmap_unwire_pte_hold(mpte);
}
/*
@ -979,8 +983,6 @@ pmap_remove_pte(pmap, ptq, va)
{
unsigned oldpte;
pv_entry_t *ppv;
int i;
int s;
oldpte = *ptq;
*ptq = 0;
@ -1048,12 +1050,10 @@ pmap_remove(pmap, sva, eva)
register vm_offset_t eva;
{
register unsigned *ptbase;
vm_offset_t va;
vm_offset_t pdnxt;
vm_offset_t ptpaddr;
vm_offset_t sindex, eindex;
vm_page_t mpte;
int s;
int anyvalid;
if (pmap == NULL)
@ -1080,7 +1080,6 @@ pmap_remove(pmap, sva, eva)
sindex = i386_btop(sva);
eindex = i386_btop(eva);
for (; sindex < eindex; sindex = pdnxt) {
/*
@ -1096,16 +1095,18 @@ pmap_remove(pmap, sva, eva)
if (ptpaddr == 0)
continue;
if (sindex < i386_btop(UPT_MIN_ADDRESS)) {
/*
* get the vm_page_t for the page table page
*/
mpte = PHYS_TO_VM_PAGE(ptpaddr);
mpte = PHYS_TO_VM_PAGE(ptpaddr);
/*
* if the pte isn't wired or held, just skip it.
* if the pte isn't wired, just skip it.
*/
if ((mpte->hold_count == 0) && (mpte->wire_count == 0))
continue;
if (mpte->wire_count == 0)
continue;
}
/*
* Limit our scan to either the end of the va represented
@ -1140,9 +1141,8 @@ pmap_remove_pte_mapping(pa)
vm_offset_t pa;
{
register pv_entry_t pv, *ppv, npv;
register unsigned *pte, *ptbase;
register unsigned *pte;
vm_offset_t va;
int s;
int anyvalid = 0;
ppv = pa_to_pvh(pa);
@ -1214,6 +1214,8 @@ pmap_remove_all(pa)
pmap = pv->pv_pmap;
ptbase = get_ptbase(pmap);
va = pv->pv_va;
if (*pmap_pde(pmap, va) == 0)
continue;
pte = ptbase + i386_btop(va);
if (tpte = ((int) *pte)) {
pmap->pm_stats.resident_count--;
@ -1284,7 +1286,6 @@ pmap_protect(pmap, sva, eva, prot)
eindex = i386_btop(eva);
for (; sindex < eindex; sindex = pdnxt) {
int pbits;
pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex));
@ -1296,10 +1297,15 @@ pmap_protect(pmap, sva, eva, prot)
if (ptpaddr == 0)
continue;
mpte = PHYS_TO_VM_PAGE(ptpaddr);
/*
* Don't look at kernel page table pages
*/
if (sindex < i386_btop(UPT_MIN_ADDRESS)) {
mpte = PHYS_TO_VM_PAGE(ptpaddr);
if ((mpte->hold_count == 0) && (mpte->wire_count == 0))
continue;
if (mpte->wire_count == 0)
continue;
}
if (pdnxt > eindex) {
pdnxt = eindex;
@ -1365,6 +1371,7 @@ _pmap_allocpte(pmap, va, ptepindex)
{
vm_offset_t pteva, ptepa;
vm_page_t m;
int s;
/*
* Find or fabricate a new pagetable page
@ -1388,18 +1395,20 @@ _pmap_allocpte(pmap, va, ptepindex)
*/
pmap->pm_pteobj->flags |= OBJ_WRITEABLE;
if (m->hold_count == 0) {
s = splvm();
vm_page_unqueue(m);
splx(s);
++m->wire_count;
++cnt.v_wire_count;
}
/*
* Increment the hold count for the page table page
* (denoting a new mapping.)
*/
++m->hold_count;
/*
* Activate the pagetable page, if it isn't already
*/
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
/*
* Map the pagetable page into the process address space, if
* it isn't already there.
@ -1407,7 +1416,6 @@ _pmap_allocpte(pmap, va, ptepindex)
pteva = ((vm_offset_t) vtopte(va)) & PG_FRAME;
ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
if (ptepa == 0) {
int s;
pv_entry_t pv, *ppv;
pmap->pm_stats.resident_count++;
@ -1469,9 +1477,14 @@ pmap_allocpte(pmap, va)
*/
if ((ptepa & (PG_RW|PG_U|PG_V)) == (PG_RW|PG_U|PG_V)) {
m = PHYS_TO_VM_PAGE(ptepa);
if (m->hold_count == 0) {
int s = splvm();
vm_page_unqueue(m);
splx(s);
++m->wire_count;
++cnt.v_wire_count;
}
++m->hold_count;
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
return m;
}
return _pmap_allocpte(pmap, va, ptepindex);
@ -1500,9 +1513,7 @@ pmap_enter(pmap, va, pa, prot, wired)
register unsigned *pte;
vm_offset_t opa;
vm_offset_t origpte, newpte;
vm_offset_t ptepa;
vm_page_t mpte;
int s;
if (pmap == NULL)
return;
@ -1779,7 +1790,6 @@ pmap_prefault(pmap, addra, entry, object)
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m;
int pageorder_index;
if (entry->object.vm_object != object)
return;
@ -1893,7 +1903,6 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
unsigned src_frame, dst_frame;
pd_entry_t pde;
if (dst_addr != src_addr)
return;
@ -1943,8 +1952,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr, dstmpte,
(ptetemp & PG_FRAME));
} else {
--dstmpte->hold_count;
} else {
pmap_unwire_pte_hold(dstmpte);
}
if (dstmpte->hold_count >= srcmpte->hold_count)
break;
@ -2108,6 +2117,8 @@ pmap_testbit(pa, bit)
continue;
}
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
if (pte == NULL)
continue;
if ((int) *pte & bit) {
splx(s);
return TRUE;
@ -2127,7 +2138,7 @@ pmap_changebit(pa, bit, setem)
boolean_t setem;
{
register pv_entry_t pv, *ppv;
register unsigned *pte, npte;
register unsigned *pte;
vm_offset_t va;
int changed;
int s;
@ -2161,6 +2172,8 @@ pmap_changebit(pa, bit, setem)
}
pte = pmap_pte(pv->pv_pmap, va);
if (pte == NULL)
continue;
if (setem) {
*(int *)pte |= bit;
changed = 1;

View File

@ -28,11 +28,12 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: default_pager.h,v 1.2 1995/12/11 04:57:58 dyson Exp $
* $Id: default_pager.h,v 1.3 1995/12/14 09:54:48 phk Exp $
*/
#ifndef _DEFAULT_PAGER_H_
#define _DEFAULT_PAGER_H_ 1
void default_pager_convert_to_swap __P((vm_object_t object));
#endif /* _DEFAULT_PAGER_H_ */

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.45 1996/05/19 07:36:45 dyson Exp $
* $Id: vm_fault.c,v 1.46 1996/05/26 05:30:33 dyson Exp $
*/
/*
@ -821,10 +821,12 @@ vm_fault_wire(map, start, end)
for (va = start; va < end; va += PAGE_SIZE) {
/*
while( curproc != pageproc &&
(cnt.v_free_count <= cnt.v_pageout_free_min)) {
VM_WAIT;
}
*/
rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE);
if (rv) {
@ -1005,6 +1007,15 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
object = m->object;
pindex = m->pindex;
/*
* we don't fault-ahead for device pager
*/
if (object->type == OBJT_DEVICE) {
*reqpage = 0;
marray[0] = m;
return 1;
}
/*
* if the requested page is not available, then give up now
*/

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.48 1996/05/02 09:34:51 phk Exp $
* $Id: vm_glue.c,v 1.49 1996/05/18 03:37:37 dyson Exp $
*/
#include "opt_ddb.h"
@ -196,7 +196,7 @@ vm_fork(p1, p2)
register struct proc *p1, *p2;
{
register struct user *up;
int error, i;
int i;
pmap_t pvp;
vm_object_t upobj;
@ -204,15 +204,6 @@ vm_fork(p1, p2)
VM_WAIT;
}
#if 0
/*
* avoid copying any of the parent's pagetables or other per-process
* objects that reside in the map by marking all of them
* non-inheritable
*/
(void) vm_map_inherit(&p1->p_vmspace->vm_map,
UPT_MIN_ADDRESS - UPAGES * PAGE_SIZE, VM_MAX_ADDRESS, VM_INHERIT_NONE);
#endif
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
if (p1->p_vmspace->vm_shm)
@ -328,13 +319,11 @@ faultin(p)
struct proc *p;
{
vm_offset_t i;
vm_offset_t ptaddr;
int s;
if ((p->p_flag & P_INMEM) == 0) {
pmap_t pmap = &p->p_vmspace->vm_pmap;
vm_page_t stkm, m;
int error;
vm_page_t m;
vm_object_t upobj = p->p_vmspace->vm_upages_obj;
++p->p_lock;
@ -536,9 +525,7 @@ static void
swapout(p)
register struct proc *p;
{
vm_map_t map = &p->p_vmspace->vm_map;
pmap_t pmap = &p->p_vmspace->vm_pmap;
vm_offset_t ptaddr;
int i;
#if defined(SWAP_DEBUG)

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.47 1996/05/23 00:45:54 dyson Exp $
* $Id: vm_map.c,v 1.48 1996/05/29 05:12:21 dyson Exp $
*/
/*
@ -89,6 +89,7 @@
#include <vm/vm_kern.h>
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
#include <vm/default_pager.h>
/*
* Virtual memory maps provide for the mapping, protection,
@ -217,8 +218,6 @@ vmspace_alloc(min, max, pageable)
register struct vmspace *vm;
if (mapvmpgcnt == 0 && mapvm == 0) {
int s;
mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
mapvm_start = mapvm = kmem_alloc_pageable(kernel_map,
mapvmpgcnt * PAGE_SIZE);
@ -1909,15 +1908,11 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
vm_map_t src_map, dst_map;
register vm_map_entry_t src_entry, dst_entry;
{
vm_pindex_t temp_pindex;
if (src_entry->is_sub_map || dst_entry->is_sub_map)
return;
if (src_entry->wired_count == 0) {
boolean_t src_needs_copy;
/*
* If the source entry is marked needs_copy, it is already
* write-protected.
@ -2000,7 +1995,6 @@ vmspace_fork(vm1)
vm_map_entry_t new_entry;
pmap_t new_pmap;
vm_object_t object;
vm_page_t p;
vm_map_lock(old_map);

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.42 1996/05/18 03:37:51 dyson Exp $
* $Id: vm_mmap.c,v 1.43 1996/05/19 07:36:49 dyson Exp $
*/
/*
@ -546,7 +546,7 @@ madvise(p, uap, retval)
{
vm_map_t map;
pmap_t pmap;
vm_offset_t start, end, addr, nextaddr;
vm_offset_t start, end;
/*
* Check for illegal addresses. Watch out for address wrap... Note
* that VM_*_ADDRESS are not constants due to casts (argh).

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.73 1996/05/23 00:45:58 dyson Exp $
* $Id: vm_object.c,v 1.74 1996/05/24 05:17:21 dyson Exp $
*/
/*
@ -263,7 +263,6 @@ vm_object_deallocate(object)
vm_object_t object;
{
vm_object_t temp;
vm_page_t p;
while (object != NULL) {
@ -510,6 +509,7 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
}
splx(s);
s = splvm();
maxf = 0;
for(i=1;i<vm_pageout_page_count;i++) {
if (tp = vm_page_lookup(object, pi + i)) {
@ -576,6 +576,7 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
vm_page_protect(ma[index], VM_PROT_READ);
}
runlen = maxb + maxf + 1;
splx(s);
/*
printf("maxb: %d, maxf: %d, runlen: %d, offset: %d\n", maxb, maxf, runlen, ma[0]->pindex);
*/

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.51 1996/05/18 03:37:57 dyson Exp $
* $Id: vm_page.c,v 1.52 1996/05/24 05:20:15 dyson Exp $
*/
/*
@ -588,6 +588,10 @@ vm_page_alloc(object, pindex, page_req)
m = TAILQ_FIRST(&vm_page_queue_cache);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
pagedaemon_wakeup();
return (NULL);
}
@ -606,6 +610,10 @@ vm_page_alloc(object, pindex, page_req)
m = TAILQ_FIRST(&vm_page_queue_cache);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
pagedaemon_wakeup();
return (NULL);
}
@ -625,6 +633,10 @@ vm_page_alloc(object, pindex, page_req)
m = TAILQ_FIRST(&vm_page_queue_cache);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
pagedaemon_wakeup();
return (NULL);
}
@ -661,7 +673,6 @@ vm_page_alloc(object, pindex, page_req)
m->flags = PG_BUSY;
}
m->wire_count = 0;
m->act_count = 0;
m->hold_count = 0;
m->busy = 0;
m->valid = 0;
@ -708,8 +719,6 @@ vm_page_activate(m)
vm_page_unqueue(m);
if (m->wire_count == 0) {
if (m->act_count < 5)
m->act_count = 5;
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->queue = PQ_ACTIVE;
cnt.v_active_count++;
@ -847,8 +856,6 @@ vm_page_unwire(m)
cnt.v_wire_count--;
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->queue = PQ_ACTIVE;
if (m->act_count < 5)
m->act_count = 5;
cnt.v_active_count++;
}
splx(s);
@ -868,7 +875,7 @@ void
vm_page_deactivate(m)
register vm_page_t m;
{
int spl;
int s;
/*
* Only move active pages -- ignore locked or already inactive ones.
@ -880,7 +887,7 @@ vm_page_deactivate(m)
if (m->queue == PQ_INACTIVE)
return;
spl = splvm();
s = splvm();
if (m->wire_count == 0 && m->hold_count == 0) {
if (m->queue == PQ_CACHE)
cnt.v_reactivated++;
@ -889,7 +896,7 @@ vm_page_deactivate(m)
m->queue = PQ_INACTIVE;
cnt.v_inactive_count++;
}
splx(spl);
splx(s);
}
/*

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.74 1996/05/29 05:15:33 dyson Exp $
* $Id: vm_pageout.c,v 1.75 1996/05/29 06:33:30 dyson Exp $
*/
/*
@ -104,6 +104,7 @@
static void vm_pageout __P((void));
static int vm_pageout_clean __P((vm_page_t, int));
static int vm_pageout_scan __P((void));
static int vm_pageout_free_page_calc __P((vm_size_t count));
struct proc *pageproc;
static struct kproc_desc page_kp = {
@ -170,7 +171,7 @@ vm_pageout_clean(m, sync)
int sync;
{
register vm_object_t object;
vm_page_t mc[2*VM_PAGEOUT_PAGE_COUNT];
vm_page_t mc[2*vm_pageout_page_count];
int pageout_count;
int i, forward_okay, backward_okay, page_base;
vm_pindex_t pindex = m->pindex;
@ -182,7 +183,7 @@ vm_pageout_clean(m, sync)
* Try to avoid the deadlock.
*/
if ((sync != VM_PAGEOUT_FORCE) &&
(object->type != OBJT_SWAP) &&
(object->type == OBJT_DEFAULT) &&
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
return 0;
@ -199,9 +200,9 @@ vm_pageout_clean(m, sync)
if (!sync && object->backing_object) {
vm_object_collapse(object);
}
mc[VM_PAGEOUT_PAGE_COUNT] = m;
mc[vm_pageout_page_count] = m;
pageout_count = 1;
page_base = VM_PAGEOUT_PAGE_COUNT;
page_base = vm_pageout_page_count;
forward_okay = TRUE;
if (pindex != 0)
backward_okay = TRUE;
@ -244,7 +245,7 @@ vm_pageout_clean(m, sync)
(sync == VM_PAGEOUT_FORCE)) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
mc[VM_PAGEOUT_PAGE_COUNT + i] = p;
mc[vm_pageout_page_count + i] = p;
pageout_count++;
if (pageout_count == vm_pageout_page_count)
break;
@ -278,7 +279,7 @@ vm_pageout_clean(m, sync)
(sync == VM_PAGEOUT_FORCE)) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
mc[VM_PAGEOUT_PAGE_COUNT - i] = p;
mc[vm_pageout_page_count - i] = p;
pageout_count++;
page_base--;
if (pageout_count == vm_pageout_page_count)
@ -524,8 +525,6 @@ vm_pageout_scan()
vm_object_t object;
int force_wakeup = 0;
int vnodes_skipped = 0;
int usagefloor;
int i;
int s;
@ -538,11 +537,11 @@ vm_pageout_scan()
*/
pages_freed = 0;
addl_page_shortage = 0;
maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
MAXLAUNDER : cnt.v_inactive_target;
rescan0:
addl_page_shortage = 0;
maxscan = cnt.v_inactive_count;
for( m = TAILQ_FIRST(&vm_page_queue_inactive);
@ -554,14 +553,17 @@ vm_pageout_scan()
cnt.v_pdpages++;
if (m->queue != PQ_INACTIVE)
if (m->queue != PQ_INACTIVE) {
goto rescan0;
}
next = TAILQ_NEXT(m, pageq);
if (m->hold_count) {
s = splvm();
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
splx(s);
addl_page_shortage++;
continue;
}
@ -608,8 +610,10 @@ vm_pageout_scan()
object = m->object;
if (object->flags & OBJ_DEAD) {
s = splvm();
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
splx(s);
continue;
}
@ -620,14 +624,20 @@ vm_pageout_scan()
(m->hold_count == 0) &&
(m->busy == 0) &&
(m->flags & PG_BUSY) == 0) {
s = splvm();
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
splx(s);
}
if (object->flags & OBJ_MIGHTBEDIRTY)
++vnodes_skipped;
continue;
}
/*
* The page might have been moved to another queue
* during potential blocking in vget() above.
*/
if (m->queue != PQ_INACTIVE) {
if (object->flags & OBJ_MIGHTBEDIRTY)
++vnodes_skipped;
@ -635,16 +645,24 @@ vm_pageout_scan()
continue;
}
/*
* The page may have been busied during the blocking in
* vput(); We don't move the page back onto the end of
* the queue so that statistics are more correct if we don't.
*/
if (m->busy || (m->flags & PG_BUSY)) {
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
vput(vp);
continue;
}
/*
* If the page has become held, then skip it
*/
if (m->hold_count) {
s = splvm();
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
splx(s);
if (object->flags & OBJ_MIGHTBEDIRTY)
++vnodes_skipped;
vput(vp);
@ -689,9 +707,23 @@ vm_pageout_scan()
}
pcount = cnt.v_active_count;
s = splvm();
m = TAILQ_FIRST(&vm_page_queue_active);
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
if (m->queue != PQ_ACTIVE) {
#if defined(DIAGNOSTIC)
printf("vm_pageout_scan: page not on active queue: %d, pindex: 0x%x, flags: 0x%x, ",
m->queue, m->pindex, m->flags);
if (m->object == kmem_object)
printf("kmem object\n");
else if (m->object == kernel_object)
printf("kernel object\n");
else
printf("object type: %d\n", m->object->type);
#endif
break;
}
next = TAILQ_NEXT(m, pageq);
/*
@ -700,8 +732,10 @@ vm_pageout_scan()
if ((m->busy != 0) ||
(m->flags & PG_BUSY) ||
(m->hold_count != 0)) {
s = splvm();
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
splx(s);
m = next;
continue;
}
@ -722,8 +756,10 @@ vm_pageout_scan()
if ( (m->object->ref_count != 0) &&
(m->flags & PG_REFERENCED) ) {
m->flags &= ~PG_REFERENCED;
s = splvm();
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
splx(s);
} else {
m->flags &= ~PG_REFERENCED;
if (page_shortage > 0) {
@ -740,8 +776,8 @@ vm_pageout_scan()
}
m = next;
}
splx(s);
s = splvm();
/*
* We try to maintain some *really* free pages, this allows interrupt
* code to be guaranteed space.
@ -753,6 +789,7 @@ vm_pageout_scan()
vm_page_free(m);
cnt.v_dfree++;
}
splx(s);
/*
* If we didn't get enough free pages, and we have skipped a vnode
@ -838,14 +875,16 @@ vm_size_t count;
cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
else
cnt.v_free_min = 4;
cnt.v_pageout_free_min = 2 + VM_PAGEOUT_PAGE_COUNT
+ cnt.v_interrupt_free_min;
cnt.v_free_reserved = 4 + cnt.v_pageout_free_min + (count / 768);
cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
cnt.v_interrupt_free_min;
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (count / 768);
cnt.v_free_min += cnt.v_free_reserved;
return 1;
}
#ifdef unused
int
vm_pageout_free_pages(object, add)
vm_object_t object;
@ -853,6 +892,7 @@ int add;
{
return vm_pageout_free_page_calc(object->size);
}
#endif
/*
* vm_pageout is the high level pageout daemon.
@ -867,6 +907,8 @@ vm_pageout()
*/
cnt.v_interrupt_free_min = 2;
if (cnt.v_page_count < 2000)
vm_pageout_page_count = 8;
vm_pageout_free_page_calc(cnt.v_page_count);
/*