VM level code cleanups.

1)	Start using TSM.
	Struct procs continue to point to upages structure, after being freed.
	Struct vmspace continues to point to pte object and kva space for kstack.
	u_map is now superfluous.
2)	vm_map's don't need to be reference counted.  They always exist either
	in the kernel or in a vmspace.  The vmspaces are managed by reference
	counts.
3)	Remove the "wired" vm_map nonsense.
4)	No need to keep a cache of kernel stack kva's.
5)	Get rid of strange looking ++var, and change to var++.
6)	Change more data structures to use our "zone" allocator.  Added
	struct proc, struct vmspace and struct vnode.  This saves a significant
	amount of kva space and physical memory.  Additionally, this enables
	TSM for the zone managed memory.
7)	Keep ioopt disabled for now.
8)	Remove the now bogus "single use" map concept.
9)	Use generation counts or id's for data structures residing in TSM, where
	it allows us to avoid unneeded restart overhead during traversals, where
	blocking might occur.
10)	Account better for memory deficits, so the pageout daemon will be able
	to make enough memory available (experimental.)
11)	Fix some vnode locking problems. (From Tor, I think.)
12)	Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp.
	(experimental.)
13)	Significantly shrink, cleanup, and make slightly faster the vm_fault.c
	code.  Use generation counts, get rid of unneded collpase operations,
	and clean up the cluster code.
14)	Make vm_zone more suitable for TSM.

This commit is partially as a result of discussions and contributions from
other people, including DG, Tor Egge, PHK, and probably others that I
have forgotten to attribute (so let me know, if I forgot.)

This is not the infamous, final cleanup of the vnode stuff, but a necessary
step.  Vnode mgmt should be correct, but things might still change, and
there is still some missing stuff (like ioopt, and physical backing of
non-merged cache files, debugging of layering concepts.)
This commit is contained in:
John Dyson 1998-01-22 17:30:44 +00:00
parent ececc3f031
commit 2d8acc0f4a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=32702
38 changed files with 492 additions and 605 deletions

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.280 1997/12/27 02:28:27 peter Exp $
* $Id: machdep.c,v 1.281 1998/01/12 05:16:03 dyson Exp $
*/
#include "apm.h"
@ -343,21 +343,19 @@ cpu_startup(dummy)
#ifdef BOUNCE_BUFFERS
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) +
maxbkva + pager_map_size, TRUE);
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
maxbkva + pager_map_size);
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva);
#else
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size, TRUE);
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
#endif
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE), TRUE);
(nbuf*BKVASIZE));
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+PAGE_SIZE)), TRUE);
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(maxproc*UPAGES*PAGE_SIZE), FALSE);
(16*(ARG_MAX+PAGE_SIZE)));
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
@ -371,7 +369,7 @@ cpu_startup(dummy)
mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
bzero(mclrefcnt, mb_map_size / MCLBYTES);
mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
mb_map_size, FALSE);
mb_map_size);
mb_map->system_map = 1;
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.176 1997/12/22 10:06:09 dyson Exp $
* $Id: pmap.c,v 1.177 1998/01/17 09:16:18 dyson Exp $
*/
/*
@ -218,9 +218,6 @@ static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
vm_offset_t pmap_kmem_choose(vm_offset_t addr) ;
void pmap_collect(void);
#define PDSTACKMAX 6
static vm_offset_t pdstack[PDSTACKMAX];
static int pdstackptr;
unsigned pdir4mb;
/*
@ -867,13 +864,19 @@ pmap_new_proc(p)
/*
* allocate object for the upages
*/
upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
p->p_upages_obj = upobj;
if ((upobj = p->p_upages_obj) == NULL) {
upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
p->p_upages_obj = upobj;
}
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("pmap_new_proc: u_map allocation failed");
if ((up = p->p_addr) == NULL) {
up = (struct user *) kmem_alloc_pageable(kernel_map,
UPAGES * PAGE_SIZE);
if (up == NULL)
panic("pmap_new_proc: u_map allocation failed");
p->p_addr = up;
}
ptek = (unsigned *) vtopte((vm_offset_t) up);
@ -890,7 +893,7 @@ pmap_new_proc(p)
* Wire the page
*/
m->wire_count++;
++cnt.v_wire_count;
cnt.v_wire_count++;
/*
* Enter the page into the kernel address space.
@ -901,8 +904,6 @@ pmap_new_proc(p)
m->flags |= PG_MAPPED|PG_WRITEABLE;
m->valid = VM_PAGE_BITS_ALL;
}
p->p_addr = up;
}
/*
@ -918,13 +919,14 @@ pmap_dispose_proc(p)
vm_page_t m;
unsigned *ptek;
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
upobj = p->p_upages_obj;
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_dispose_proc: upage already missing???");
*(ptek + i) = 0;
if (cpu_class >= CPUCLASS_586)
invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE);
@ -934,9 +936,6 @@ pmap_dispose_proc(p)
if (cpu_class < CPUCLASS_586)
invltlb();
vm_object_deallocate(upobj);
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
}
/*
@ -1120,6 +1119,7 @@ pmap_pinit0(pmap)
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
}
#else
void
@ -1139,23 +1139,20 @@ pmap_pinit(pmap)
register struct pmap *pmap;
{
vm_page_t ptdpg;
/*
* No need to allocate page table space yet but we do need a valid
* page directory table.
*/
if (pdstackptr > 0) {
--pdstackptr;
pmap->pm_pdir = (pd_entry_t *)pdstack[pdstackptr];
} else {
if (pmap->pm_pdir == NULL)
pmap->pm_pdir =
(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
}
/*
* allocate object for the ptes
*/
pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1);
if (pmap->pm_pteobj == NULL)
pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1);
/*
* allocate the page directory page
@ -1187,6 +1184,7 @@ pmap_pinit(pmap)
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
}
static int
@ -1218,7 +1216,7 @@ pmap_release_free_page(pmap, p)
* Remove the page table page from the processes address space.
*/
pde[p->pindex] = 0;
--pmap->pm_stats.resident_count;
pmap->pm_stats.resident_count--;
if (p->hold_count) {
panic("pmap_release: freeing held page table page");
@ -1236,8 +1234,7 @@ pmap_release_free_page(pmap, p)
pmap_kremove((vm_offset_t) pmap->pm_pdir);
}
if (pmap->pm_ptphint &&
(pmap->pm_ptphint->pindex == p->pindex))
if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
pmap->pm_ptphint = NULL;
vm_page_free_zero(p);
@ -1286,14 +1283,14 @@ _pmap_allocpte(pmap, ptepindex)
}
if (m->wire_count == 0)
++cnt.v_wire_count;
++m->wire_count;
cnt.v_wire_count++;
m->wire_count++;
/*
* Increment the hold count for the page table page
* (denoting a new mapping.)
*/
++m->hold_count;
m->hold_count++;
/*
* Map the pagetable page into the process address space, if
@ -1375,7 +1372,7 @@ pmap_allocpte(pmap, va)
m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
pmap->pm_ptphint = m;
}
++m->hold_count;
m->hold_count++;
return m;
}
/*
@ -1400,6 +1397,7 @@ pmap_release(pmap)
{
vm_page_t p,n,ptdpg;
vm_object_t object = pmap->pm_pteobj;
int curgeneration;
#if defined(DIAGNOSTIC)
if (object->ref_count != 1)
@ -1408,29 +1406,22 @@ pmap_release(pmap)
ptdpg = NULL;
retry:
curgeneration = object->generation;
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
n = TAILQ_NEXT(p, listq);
if (p->pindex == PTDPTDI) {
ptdpg = p;
continue;
}
if (!pmap_release_free_page(pmap, p))
goto retry;
while (1) {
if (!pmap_release_free_page(pmap, p) &&
(object->generation != curgeneration))
goto retry;
}
}
if (ptdpg && !pmap_release_free_page(pmap, ptdpg))
goto retry;
vm_object_deallocate(object);
if (pdstackptr < PDSTACKMAX) {
pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir;
++pdstackptr;
} else {
int pdstmp = pdstackptr - 1;
kmem_free(kernel_map, pdstack[pdstmp], PAGE_SIZE);
pdstack[pdstmp] = (vm_offset_t) pmap->pm_pdir;
}
pmap->pm_pdir = 0;
}
/*
@ -1456,7 +1447,7 @@ pmap_growkernel(vm_offset_t addr)
nkpt = 0;
while (pdir_pde(PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
++nkpt;
nkpt++;
}
}
addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
@ -1465,7 +1456,7 @@ pmap_growkernel(vm_offset_t addr)
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
continue;
}
++nkpt;
nkpt++;
ptpkva = (vm_offset_t) vtopte(addr);
ptpidx = (ptpkva >> PAGE_SHIFT);
/*
@ -1520,7 +1511,6 @@ pmap_destroy(pmap)
if (count == 0) {
pmap_release(pmap);
panic("destroying a pmap is not yet implemented");
/* free((caddr_t) pmap, M_VMPMAP); */
}
}
@ -1642,7 +1632,7 @@ pmap_remove_entry(pmap, ppv, va)
if (pv) {
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
ppv->pv_list_count--;
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
}
@ -1681,7 +1671,7 @@ pmap_insert_entry(pmap, va, mpte, pa)
ppv = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
++ppv->pv_list_count;
ppv->pv_list_count++;
splx(s);
}
@ -1922,7 +1912,7 @@ pmap_remove_all(pa)
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
ppv->pv_list_count--;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
@ -2132,7 +2122,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
}
if (mpte)
--mpte->hold_count;
mpte->hold_count--;
goto validate;
}
@ -2221,7 +2211,7 @@ pmap_enter_quick(pmap, va, pa, mpte)
*/
ptepindex = va >> PDRSHIFT;
if (mpte && (mpte->pindex == ptepindex)) {
++mpte->hold_count;
mpte->hold_count++;
} else {
retry:
/*
@ -2245,7 +2235,7 @@ pmap_enter_quick(pmap, va, pa, mpte)
}
if (mpte == NULL)
goto retry;
++mpte->hold_count;
mpte->hold_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex);
}
@ -2449,35 +2439,35 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
* of pmap_object_init_pt, except it runs at page fault time instead
* of mmap time.
*/
#define PFBAK 3
#define PFFOR 3
#define PFBAK 4
#define PFFOR 4
#define PAGEORDER_SIZE (PFBAK+PFFOR)
static int pmap_prefault_pageorder[] = {
-PAGE_SIZE, PAGE_SIZE,
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-3 * PAGE_SIZE, 3 * PAGE_SIZE
-4 * PAGE_SIZE, 4 * PAGE_SIZE
};
void
pmap_prefault(pmap, addra, entry, object)
pmap_prefault(pmap, addra, entry)
pmap_t pmap;
vm_offset_t addra;
vm_map_entry_t entry;
vm_object_t object;
{
int i;
vm_offset_t starta;
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m, mpte;
if (entry->object.vm_object != object)
return;
vm_object_t object;
if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap))
return;
object = entry->object.vm_object;
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
@ -2519,7 +2509,6 @@ pmap_prefault(pmap, addra, entry, object)
break;
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
@ -2667,8 +2656,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
break;
}
addr += PAGE_SIZE;
++src_pte;
++dst_pte;
src_pte++;
dst_pte++;
}
}
}
@ -2882,7 +2871,7 @@ pmap_remove_pages(pmap, sva, eva)
npv = TAILQ_NEXT(pv, pv_plist);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
--ppv->pv_list_count;
ppv->pv_list_count--;
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
@ -3252,7 +3241,7 @@ void
pmap_activate(struct proc *p)
{
#if defined(SWTCH_OPTIM_STATS)
++tlb_flush_count;
tlb_flush_count++;
#endif
load_cr3(p->p_addr->u_pcb.pcb_cr3 =
vtophys(p->p_vmspace->vm_pmap.pm_pdir));

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.94 1998/01/15 07:32:21 gibbs Exp $
* $Id: vm_machdep.c,v 1.95 1998/01/19 04:16:16 tegge Exp $
*/
#include "npx.h"
@ -699,6 +699,8 @@ cpu_wait(p)
{
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
}

View File

@ -37,7 +37,7 @@
*
* @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
*
* $Id: procfs_mem.c,v 1.26 1997/08/02 14:32:14 bde Exp $
* $Id: procfs_mem.c,v 1.27 1997/08/12 04:34:28 sef Exp $
*/
/*
@ -107,7 +107,7 @@ procfs_rwmem(p, uio)
int page_offset; /* offset into page */
vm_map_entry_t out_entry;
vm_prot_t out_prot;
boolean_t wired, single_use;
boolean_t wired;
vm_pindex_t pindex;
u_int len;
vm_page_t m;
@ -180,7 +180,7 @@ procfs_rwmem(p, uio)
tmap = map;
error = vm_map_lookup(&tmap, pageno, reqprot,
&out_entry, &object, &pindex, &out_prot,
&wired, &single_use);
&wired);
if (error) {
error = EFAULT;

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.280 1997/12/27 02:28:27 peter Exp $
* $Id: machdep.c,v 1.281 1998/01/12 05:16:03 dyson Exp $
*/
#include "apm.h"
@ -343,21 +343,19 @@ cpu_startup(dummy)
#ifdef BOUNCE_BUFFERS
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) +
maxbkva + pager_map_size, TRUE);
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
maxbkva + pager_map_size);
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva);
#else
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size, TRUE);
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
#endif
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
(nbuf*BKVASIZE), TRUE);
(nbuf*BKVASIZE));
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
(nswbuf*MAXPHYS) + pager_map_size);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+PAGE_SIZE)), TRUE);
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(maxproc*UPAGES*PAGE_SIZE), FALSE);
(16*(ARG_MAX+PAGE_SIZE)));
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
@ -371,7 +369,7 @@ cpu_startup(dummy)
mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
bzero(mclrefcnt, mb_map_size / MCLBYTES);
mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
mb_map_size, FALSE);
mb_map_size);
mb_map->system_map = 1;
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.176 1997/12/22 10:06:09 dyson Exp $
* $Id: pmap.c,v 1.177 1998/01/17 09:16:18 dyson Exp $
*/
/*
@ -218,9 +218,6 @@ static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
vm_offset_t pmap_kmem_choose(vm_offset_t addr) ;
void pmap_collect(void);
#define PDSTACKMAX 6
static vm_offset_t pdstack[PDSTACKMAX];
static int pdstackptr;
unsigned pdir4mb;
/*
@ -867,13 +864,19 @@ pmap_new_proc(p)
/*
* allocate object for the upages
*/
upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
p->p_upages_obj = upobj;
if ((upobj = p->p_upages_obj) == NULL) {
upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
p->p_upages_obj = upobj;
}
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("pmap_new_proc: u_map allocation failed");
if ((up = p->p_addr) == NULL) {
up = (struct user *) kmem_alloc_pageable(kernel_map,
UPAGES * PAGE_SIZE);
if (up == NULL)
panic("pmap_new_proc: u_map allocation failed");
p->p_addr = up;
}
ptek = (unsigned *) vtopte((vm_offset_t) up);
@ -890,7 +893,7 @@ pmap_new_proc(p)
* Wire the page
*/
m->wire_count++;
++cnt.v_wire_count;
cnt.v_wire_count++;
/*
* Enter the page into the kernel address space.
@ -901,8 +904,6 @@ pmap_new_proc(p)
m->flags |= PG_MAPPED|PG_WRITEABLE;
m->valid = VM_PAGE_BITS_ALL;
}
p->p_addr = up;
}
/*
@ -918,13 +919,14 @@ pmap_dispose_proc(p)
vm_page_t m;
unsigned *ptek;
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
upobj = p->p_upages_obj;
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_dispose_proc: upage already missing???");
*(ptek + i) = 0;
if (cpu_class >= CPUCLASS_586)
invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE);
@ -934,9 +936,6 @@ pmap_dispose_proc(p)
if (cpu_class < CPUCLASS_586)
invltlb();
vm_object_deallocate(upobj);
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
}
/*
@ -1120,6 +1119,7 @@ pmap_pinit0(pmap)
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
}
#else
void
@ -1139,23 +1139,20 @@ pmap_pinit(pmap)
register struct pmap *pmap;
{
vm_page_t ptdpg;
/*
* No need to allocate page table space yet but we do need a valid
* page directory table.
*/
if (pdstackptr > 0) {
--pdstackptr;
pmap->pm_pdir = (pd_entry_t *)pdstack[pdstackptr];
} else {
if (pmap->pm_pdir == NULL)
pmap->pm_pdir =
(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
}
/*
* allocate object for the ptes
*/
pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1);
if (pmap->pm_pteobj == NULL)
pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1);
/*
* allocate the page directory page
@ -1187,6 +1184,7 @@ pmap_pinit(pmap)
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
}
static int
@ -1218,7 +1216,7 @@ pmap_release_free_page(pmap, p)
* Remove the page table page from the processes address space.
*/
pde[p->pindex] = 0;
--pmap->pm_stats.resident_count;
pmap->pm_stats.resident_count--;
if (p->hold_count) {
panic("pmap_release: freeing held page table page");
@ -1236,8 +1234,7 @@ pmap_release_free_page(pmap, p)
pmap_kremove((vm_offset_t) pmap->pm_pdir);
}
if (pmap->pm_ptphint &&
(pmap->pm_ptphint->pindex == p->pindex))
if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
pmap->pm_ptphint = NULL;
vm_page_free_zero(p);
@ -1286,14 +1283,14 @@ _pmap_allocpte(pmap, ptepindex)
}
if (m->wire_count == 0)
++cnt.v_wire_count;
++m->wire_count;
cnt.v_wire_count++;
m->wire_count++;
/*
* Increment the hold count for the page table page
* (denoting a new mapping.)
*/
++m->hold_count;
m->hold_count++;
/*
* Map the pagetable page into the process address space, if
@ -1375,7 +1372,7 @@ pmap_allocpte(pmap, va)
m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
pmap->pm_ptphint = m;
}
++m->hold_count;
m->hold_count++;
return m;
}
/*
@ -1400,6 +1397,7 @@ pmap_release(pmap)
{
vm_page_t p,n,ptdpg;
vm_object_t object = pmap->pm_pteobj;
int curgeneration;
#if defined(DIAGNOSTIC)
if (object->ref_count != 1)
@ -1408,29 +1406,22 @@ pmap_release(pmap)
ptdpg = NULL;
retry:
curgeneration = object->generation;
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
n = TAILQ_NEXT(p, listq);
if (p->pindex == PTDPTDI) {
ptdpg = p;
continue;
}
if (!pmap_release_free_page(pmap, p))
goto retry;
while (1) {
if (!pmap_release_free_page(pmap, p) &&
(object->generation != curgeneration))
goto retry;
}
}
if (ptdpg && !pmap_release_free_page(pmap, ptdpg))
goto retry;
vm_object_deallocate(object);
if (pdstackptr < PDSTACKMAX) {
pdstack[pdstackptr] = (vm_offset_t) pmap->pm_pdir;
++pdstackptr;
} else {
int pdstmp = pdstackptr - 1;
kmem_free(kernel_map, pdstack[pdstmp], PAGE_SIZE);
pdstack[pdstmp] = (vm_offset_t) pmap->pm_pdir;
}
pmap->pm_pdir = 0;
}
/*
@ -1456,7 +1447,7 @@ pmap_growkernel(vm_offset_t addr)
nkpt = 0;
while (pdir_pde(PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
++nkpt;
nkpt++;
}
}
addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
@ -1465,7 +1456,7 @@ pmap_growkernel(vm_offset_t addr)
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
continue;
}
++nkpt;
nkpt++;
ptpkva = (vm_offset_t) vtopte(addr);
ptpidx = (ptpkva >> PAGE_SHIFT);
/*
@ -1520,7 +1511,6 @@ pmap_destroy(pmap)
if (count == 0) {
pmap_release(pmap);
panic("destroying a pmap is not yet implemented");
/* free((caddr_t) pmap, M_VMPMAP); */
}
}
@ -1642,7 +1632,7 @@ pmap_remove_entry(pmap, ppv, va)
if (pv) {
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
ppv->pv_list_count--;
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
}
@ -1681,7 +1671,7 @@ pmap_insert_entry(pmap, va, mpte, pa)
ppv = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
++ppv->pv_list_count;
ppv->pv_list_count++;
splx(s);
}
@ -1922,7 +1912,7 @@ pmap_remove_all(pa)
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
ppv->pv_list_count--;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
@ -2132,7 +2122,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
}
if (mpte)
--mpte->hold_count;
mpte->hold_count--;
goto validate;
}
@ -2221,7 +2211,7 @@ pmap_enter_quick(pmap, va, pa, mpte)
*/
ptepindex = va >> PDRSHIFT;
if (mpte && (mpte->pindex == ptepindex)) {
++mpte->hold_count;
mpte->hold_count++;
} else {
retry:
/*
@ -2245,7 +2235,7 @@ pmap_enter_quick(pmap, va, pa, mpte)
}
if (mpte == NULL)
goto retry;
++mpte->hold_count;
mpte->hold_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex);
}
@ -2449,35 +2439,35 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
* of pmap_object_init_pt, except it runs at page fault time instead
* of mmap time.
*/
#define PFBAK 3
#define PFFOR 3
#define PFBAK 4
#define PFFOR 4
#define PAGEORDER_SIZE (PFBAK+PFFOR)
static int pmap_prefault_pageorder[] = {
-PAGE_SIZE, PAGE_SIZE,
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-3 * PAGE_SIZE, 3 * PAGE_SIZE
-4 * PAGE_SIZE, 4 * PAGE_SIZE
};
void
pmap_prefault(pmap, addra, entry, object)
pmap_prefault(pmap, addra, entry)
pmap_t pmap;
vm_offset_t addra;
vm_map_entry_t entry;
vm_object_t object;
{
int i;
vm_offset_t starta;
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m, mpte;
if (entry->object.vm_object != object)
return;
vm_object_t object;
if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap))
return;
object = entry->object.vm_object;
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
@ -2519,7 +2509,6 @@ pmap_prefault(pmap, addra, entry, object)
break;
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
@ -2667,8 +2656,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
break;
}
addr += PAGE_SIZE;
++src_pte;
++dst_pte;
src_pte++;
dst_pte++;
}
}
}
@ -2882,7 +2871,7 @@ pmap_remove_pages(pmap, sva, eva)
npv = TAILQ_NEXT(pv, pv_plist);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
--ppv->pv_list_count;
ppv->pv_list_count--;
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
@ -3252,7 +3241,7 @@ void
pmap_activate(struct proc *p)
{
#if defined(SWTCH_OPTIM_STATS)
++tlb_flush_count;
tlb_flush_count++;
#endif
load_cr3(p->p_addr->u_pcb.pcb_cr3 =
vtophys(p->p_vmspace->vm_pmap.pm_pdir));

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.94 1998/01/15 07:32:21 gibbs Exp $
* $Id: vm_machdep.c,v 1.95 1998/01/19 04:16:16 tegge Exp $
*/
#include "npx.h"
@ -699,6 +699,8 @@ cpu_wait(p)
{
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* @(#)init_main.c 8.9 (Berkeley) 1/21/94
* $Id: init_main.c,v 1.78 1997/12/12 04:00:57 dyson Exp $
* $Id: init_main.c,v 1.79 1997/12/14 02:10:12 dyson Exp $
*/
#include "opt_devfs.h"
@ -403,7 +403,7 @@ proc0_init(dummy)
vmspace0.vm_refcnt = 1;
pmap_pinit0(&vmspace0.vm_pmap);
vm_map_init(&vmspace0.vm_map, round_page(VM_MIN_ADDRESS),
trunc_page(VM_MAXUSER_ADDRESS), TRUE);
trunc_page(VM_MAXUSER_ADDRESS));
vmspace0.vm_map.pmap = &vmspace0.vm_pmap;
p->p_addr = proc0paddr; /* XXX */

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
* $Id: kern_exit.c,v 1.63 1997/12/08 01:06:36 sef Exp $
* $Id: kern_exit.c,v 1.64 1997/12/16 17:40:14 eivind Exp $
*/
#include "opt_compat.h"
@ -71,6 +71,7 @@
#include <sys/lock.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_zone.h>
static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
@ -492,7 +493,7 @@ wait1(q, uap, compat)
* release while still running in process context.
*/
cpu_wait(p);
FREE(p, M_PROC);
zfree(proc_zone, p);
nprocs--;
return (0);
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
* $Id: kern_fork.c,v 1.49 1997/11/20 16:36:17 bde Exp $
* $Id: kern_fork.c,v 1.50 1997/12/12 04:00:58 dyson Exp $
*/
#include "opt_ktrace.h"
@ -60,6 +60,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#ifdef SMP
static int fast_vfork = 0; /* Doesn't work on SMP yet. */
@ -226,7 +227,7 @@ fork1(p1, flags)
}
/* Allocate new proc. */
MALLOC(newproc, struct proc *, sizeof(struct proc), M_PROC, M_WAITOK);
newproc = zalloc(proc_zone);
/*
* Setup linkage for kernel based threading

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
* $Id: kern_malloc.c,v 1.37 1997/10/28 19:00:53 phk Exp $
* $Id: kern_malloc.c,v 1.38 1997/12/05 05:36:36 dyson Exp $
*/
#include <sys/param.h>
@ -387,8 +387,7 @@ kmeminit(dummy)
kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
(vm_size_t)(npg * sizeof(struct kmemusage)));
kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE),
FALSE);
(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
kmem_map->system_map = 1;
for (indx = 0; indx < MINBUCKET + 16; indx++) {
if (1 << indx >= PAGE_SIZE)

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
* $Id: kern_proc.c,v 1.29 1997/10/11 18:31:23 phk Exp $
* $Id: kern_proc.c,v 1.30 1997/10/12 20:23:52 phk Exp $
*/
#include <sys/param.h>
@ -47,6 +47,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <sys/user.h>
#include <vm/vm_zone.h>
static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
MALLOC_DEFINE(M_SESSION, "session", "session header");
@ -82,6 +83,7 @@ struct pgrphashhead *pgrphashtbl;
u_long pgrphash;
struct proclist allproc;
struct proclist zombproc;
vm_zone_t proc_zone;
/*
* Initialize global process hashing structures.
@ -95,6 +97,7 @@ procinit()
pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
proc_zone = zinit("PROC", sizeof (struct proc), 0, 0, 5);
}
/*

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
* $Id: kern_subr.c,v 1.14 1997/12/19 09:03:23 dyson Exp $
* $Id: kern_subr.c,v 1.15 1998/01/06 05:15:41 dyson Exp $
*/
#include <sys/param.h>
@ -44,6 +44,7 @@
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/vnode.h>
#include <vm/vm.h>
#include <vm/vm_prot.h>
@ -141,7 +142,7 @@ uiomoveco(cp, n, uio, obj)
case UIO_USERSPACE:
case UIO_USERISPACE:
if (uio->uio_rw == UIO_READ) {
if (((cnt & PAGE_MASK) == 0) &&
if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
((((int) iov->iov_base) & PAGE_MASK) == 0) &&
((uio->uio_offset & PAGE_MASK) == 0) &&
((((int) cp) & PAGE_MASK) == 0)) {
@ -190,6 +191,8 @@ uioread(n, uio, obj, nread)
int error;
*nread = 0;
if (vfs_ioopt > 1)
return 0;
error = 0;
while (n > 0 && uio->uio_resid) {

View File

@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: sys_process.c,v 1.32 1997/11/12 12:28:12 tegge Exp $
* $Id: sys_process.c,v 1.33 1997/12/06 04:11:10 sef Exp $
*/
#include <sys/param.h>
@ -62,7 +62,7 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
vm_offset_t pageno; /* page number */
vm_map_entry_t out_entry;
vm_prot_t out_prot;
boolean_t wired, single_use;
boolean_t wired;
vm_pindex_t pindex;
/* Map page into kernel space */
@ -74,7 +74,7 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
tmap = map;
rv = vm_map_lookup (&tmap, pageno, VM_PROT_READ, &out_entry,
&object, &pindex, &out_prot, &wired, &single_use);
&object, &pindex, &out_prot, &wired);
if (rv != KERN_SUCCESS)
return EINVAL;
@ -110,7 +110,7 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
vm_offset_t pageno; /* page number */
vm_map_entry_t out_entry;
vm_prot_t out_prot;
boolean_t wired, single_use;
boolean_t wired;
vm_pindex_t pindex;
boolean_t fix_prot = 0;
@ -148,7 +148,7 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
tmap = map;
rv = vm_map_lookup (&tmap, pageno, VM_PROT_WRITE, &out_entry,
&object, &pindex, &out_prot, &wired, &single_use);
&object, &pindex, &out_prot, &wired);
if (rv != KERN_SUCCESS) {
return EINVAL;
}

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vfs_bio.c,v 1.142 1998/01/12 01:46:25 dyson Exp $
* $Id: vfs_bio.c,v 1.143 1998/01/17 09:16:26 dyson Exp $
*/
/*
@ -177,6 +177,7 @@ bufinit()
bp->b_wcred = NOCRED;
bp->b_qindex = QUEUE_EMPTY;
bp->b_vnbufs.le_next = NOLIST;
bp->b_generation = 0;
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
}
@ -654,6 +655,7 @@ brelse(struct buf * bp)
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
bp->b_dev = NODEV;
kvafreespace += bp->b_kvasize;
bp->b_generation++;
/* buffers with junk contents */
} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
@ -663,6 +665,7 @@ brelse(struct buf * bp)
LIST_REMOVE(bp, b_hash);
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
bp->b_dev = NODEV;
bp->b_generation++;
/* buffers that are locked */
} else if (bp->b_flags & B_LOCKED) {
@ -1083,6 +1086,7 @@ getnewbuf(struct vnode *vp, daddr_t blkno,
brelvp(bp);
fillbuf:
bp->b_generation++;
/* we are not free, nor do we contain interesting data */
if (bp->b_rcred != NOCRED) {
@ -1348,6 +1352,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
int s;
struct bufhashhdr *bh;
int maxsize;
int generation;
if (vp->v_mount) {
maxsize = vp->v_mount->mnt_stat.f_iosize;
@ -1370,18 +1375,23 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
if (numfreebuffers < lofreebuffers) {
waitfreebuffers(slpflag, slptimeo);
}
if ((bp = gbincore(vp, blkno))) {
loop1:
generation = bp->b_generation;
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
if (bp->b_usecount < BUF_MAXUSE)
++bp->b_usecount;
if (!tsleep(bp,
(PRIBIO + 1) | slpflag, "getblk", slptimeo))
goto loop;
splx(s);
return (struct buf *) NULL;
(PRIBIO + 1) | slpflag, "getblk", slptimeo)) {
if (bp->b_generation != generation)
goto loop;
goto loop1;
} else {
splx(s);
return (struct buf *) NULL;
}
}
bp->b_flags |= B_BUSY | B_CACHE;
bremfree(bp);
@ -1394,6 +1404,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
*/
if (bp->b_bcount != size) {
bp->b_generation++;
if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) {
allocbuf(bp, size);
} else {
@ -1683,6 +1694,7 @@ allocbuf(struct buf * bp, int size)
m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
if (!m) {
VM_WAIT;
vm_pageout_deficit += (desiredpages - bp->b_npages);
goto doretry;
}
/*
@ -2240,6 +2252,7 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
VM_ALLOC_NORMAL);
if (!p) {
vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
VM_WAIT;
goto tryagain;
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.123 1998/01/12 03:15:01 dyson Exp $
* $Id: vfs_subr.c,v 1.124 1998/01/17 09:16:28 dyson Exp $
*/
/*
@ -68,6 +68,7 @@
#include <vm/vm_map.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#include <vm/vm_zone.h>
#include <sys/sysctl.h>
#include <miscfs/specfs/specdev.h>
@ -120,6 +121,7 @@ struct simplelock mntvnode_slock;
struct simplelock vnode_free_list_slock;
static struct simplelock spechash_slock;
struct nfs_public nfs_pub; /* publicly exported FS */
static vm_zone_t vnode_zone;
int desiredvnodes;
SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, "");
@ -144,6 +146,7 @@ vntblinit()
TAILQ_INIT(&vnode_tobefree_list);
simple_lock_init(&vnode_free_list_slock);
CIRCLEQ_INIT(&mountlist);
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
}
/*
@ -457,8 +460,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_writecount = 0; /* XXX */
} else {
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
simple_lock_init(&vp->v_interlock);
vp->v_dd = vp;
@ -917,6 +919,7 @@ vget(vp, flags, p)
((vp->v_object == NULL) ||
(vp->v_object->flags & OBJ_DEAD))) {
vfs_object_create(vp, curproc, curproc->p_ucred, 0);
simple_lock(&vp->v_interlock);
}
if (flags & LK_TYPE_MASK) {
if (error = vn_lock(vp, flags | LK_INTERLOCK, p))
@ -1390,6 +1393,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
simple_lock(&vp->v_interlock);
/*
* Delete from old mount point vnode list, if on one.

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.123 1998/01/12 03:15:01 dyson Exp $
* $Id: vfs_subr.c,v 1.124 1998/01/17 09:16:28 dyson Exp $
*/
/*
@ -68,6 +68,7 @@
#include <vm/vm_map.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#include <vm/vm_zone.h>
#include <sys/sysctl.h>
#include <miscfs/specfs/specdev.h>
@ -120,6 +121,7 @@ struct simplelock mntvnode_slock;
struct simplelock vnode_free_list_slock;
static struct simplelock spechash_slock;
struct nfs_public nfs_pub; /* publicly exported FS */
static vm_zone_t vnode_zone;
int desiredvnodes;
SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, "");
@ -144,6 +146,7 @@ vntblinit()
TAILQ_INIT(&vnode_tobefree_list);
simple_lock_init(&vnode_free_list_slock);
CIRCLEQ_INIT(&mountlist);
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
}
/*
@ -457,8 +460,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_writecount = 0; /* XXX */
} else {
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
vp = (struct vnode *) zalloc(vnode_zone);
bzero((char *) vp, sizeof *vp);
simple_lock_init(&vp->v_interlock);
vp->v_dd = vp;
@ -917,6 +919,7 @@ vget(vp, flags, p)
((vp->v_object == NULL) ||
(vp->v_object->flags & OBJ_DEAD))) {
vfs_object_create(vp, curproc, curproc->p_ucred, 0);
simple_lock(&vp->v_interlock);
}
if (flags & LK_TYPE_MASK) {
if (error = vn_lock(vp, flags | LK_INTERLOCK, p))
@ -1390,6 +1393,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
simple_lock(&vp->v_interlock);
/*
* Delete from old mount point vnode list, if on one.

View File

@ -37,7 +37,7 @@
*
* @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
*
* $Id: procfs_mem.c,v 1.26 1997/08/02 14:32:14 bde Exp $
* $Id: procfs_mem.c,v 1.27 1997/08/12 04:34:28 sef Exp $
*/
/*
@ -107,7 +107,7 @@ procfs_rwmem(p, uio)
int page_offset; /* offset into page */
vm_map_entry_t out_entry;
vm_prot_t out_prot;
boolean_t wired, single_use;
boolean_t wired;
vm_pindex_t pindex;
u_int len;
vm_page_t m;
@ -180,7 +180,7 @@ procfs_rwmem(p, uio)
tmap = map;
error = vm_map_lookup(&tmap, pageno, reqprot,
&out_entry, &object, &pindex, &out_prot,
&wired, &single_use);
&wired);
if (error) {
error = EFAULT;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.9 (Berkeley) 3/30/95
* $Id: buf.h,v 1.43 1997/10/23 11:32:06 gibbs Exp $
* $Id: buf.h,v 1.44 1997/12/02 21:07:14 phk Exp $
*/
#ifndef _SYS_BUF_H_
@ -87,6 +87,7 @@ struct buf {
struct vnode *b_vp; /* Device vnode. */
int b_dirtyoff; /* Offset in buffer of dirty region. */
int b_dirtyend; /* Offset of end of dirty region. */
int b_generation; /* Generation count of buffer */
struct ucred *b_rcred; /* Read credentials reference. */
struct ucred *b_wcred; /* Write credentials reference. */
int b_validoff; /* Offset in buffer of valid region. */

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.9 (Berkeley) 3/30/95
* $Id: buf.h,v 1.43 1997/10/23 11:32:06 gibbs Exp $
* $Id: buf.h,v 1.44 1997/12/02 21:07:14 phk Exp $
*/
#ifndef _SYS_BUF_H_
@ -87,6 +87,7 @@ struct buf {
struct vnode *b_vp; /* Device vnode. */
int b_dirtyoff; /* Offset in buffer of dirty region. */
int b_dirtyend; /* Offset of end of dirty region. */
int b_generation; /* Generation count of buffer */
struct ucred *b_rcred; /* Read credentials reference. */
struct ucred *b_wcred; /* Write credentials reference. */
int b_validoff; /* Offset in buffer of valid region. */

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)proc.h 8.15 (Berkeley) 5/19/95
* $Id: proc.h,v 1.51 1997/12/12 04:00:48 dyson Exp $
* $Id: proc.h,v 1.52 1997/12/20 03:05:36 sef Exp $
*/
#ifndef _SYS_PROC_H_
@ -319,6 +319,9 @@ struct prochd {
struct proc *pfind __P((pid_t)); /* Find process by id. */
struct pgrp *pgfind __P((pid_t)); /* Find process group by id. */
struct vm_zone;
extern struct vm_zone *proc_zone;
int chgproccnt __P((uid_t uid, int diff));
int enterpgrp __P((struct proc *p, pid_t pgid, int mksess));
void fixjobc __P((struct proc *p, struct pgrp *pgrp, int entering));

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
* $Id: ffs_vfsops.c,v 1.64 1998/01/06 05:23:41 dyson Exp $
* $Id: ffs_vfsops.c,v 1.65 1998/01/17 09:16:43 dyson Exp $
*/
#include "opt_quota.h"
@ -386,6 +386,7 @@ ffs_reload(mp, cred, p)
* increases the opportunity for metadata caching.
*/
if ((devvp->v_type == VBLK) && (major(dev) < nblkdev)) {
simple_lock(&devvp->v_interlock);
vfs_object_create(devvp, p, p->p_ucred, 0);
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_lookup.c 8.15 (Berkeley) 6/16/95
* $Id: ufs_lookup.c,v 1.17 1997/09/10 19:47:37 phk Exp $
* $Id: ufs_lookup.c,v 1.18 1997/10/16 10:50:17 phk Exp $
*/
#include <sys/param.h>
@ -278,6 +278,7 @@ ufs_lookup(ap)
namlen = ep->d_namlen;
# endif
if (namlen == cnp->cn_namelen &&
(cnp->cn_nameptr[0] == ep->d_name[0]) &&
!bcmp(cnp->cn_nameptr, ep->d_name,
(unsigned)namlen)) {
/*

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
* $Id: ufs_readwrite.c,v 1.38 1997/12/29 01:03:50 dyson Exp $
* $Id: ufs_readwrite.c,v 1.39 1998/01/06 05:24:04 dyson Exp $
*/
#ifdef LFS_READWRITE
@ -112,7 +112,7 @@ READ(ap)
if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
break;
#if 1
#if 0
if ((vfs_ioopt > 1) && vp->v_object) {
int nread, toread;
vm_object_reference(vp->v_object);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: pmap.h,v 1.24 1997/08/05 23:03:24 dyson Exp $
* $Id: pmap.h,v 1.25 1997/12/14 02:10:30 dyson Exp $
*/
/*
@ -123,8 +123,7 @@ void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_remove_pages __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_zero_page __P((vm_offset_t));
void pmap_prefault __P((pmap_t pmap, vm_offset_t addra,
vm_map_entry_t entry, vm_object_t object));
void pmap_prefault __P((pmap_t, vm_offset_t, vm_map_entry_t));
int pmap_mincore __P((pmap_t pmap, vm_offset_t addr));
void pmap_new_proc __P((struct proc *p));
void pmap_dispose_proc __P((struct proc *p));

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.80 1997/12/24 15:05:21 dyson Exp $
* $Id: swap_pager.c,v 1.81 1998/01/17 09:16:47 dyson Exp $
*/
/*
@ -477,30 +477,31 @@ swap_pager_free_swap(object)
/*
* Free left over swap blocks
*/
s = splvm();
swb = object->un_pager.swp.swp_blocks;
if (!swb)
if (swb == NULL) {
return;
}
s = splvm();
for (i = 0; i < object->un_pager.swp.swp_nblocks; i++, swb++) {
for (j = 0; j < SWB_NPAGES; j++) {
if (swb->swb_block[j] != SWB_EMPTY) {
/*
* initially the length of the run is zero
*/
* initially the length of the run is zero
*/
if (block_count == 0) {
first_block = swb->swb_block[j];
block_count = btodb(PAGE_SIZE);
swb->swb_block[j] = SWB_EMPTY;
/*
* if the new block can be included into the current run
*/
* if the new block can be included into the current run
*/
} else if (swb->swb_block[j] == first_block + block_count) {
block_count += btodb(PAGE_SIZE);
swb->swb_block[j] = SWB_EMPTY;
/*
* terminate the previous run, and start a new one
*/
* terminate the previous run, and start a new one
*/
} else {
swap_pager_freeswapspace(object, first_block,
(unsigned) first_block + block_count - 1);

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
* $Id: vm_extern.h,v 1.35 1997/12/06 02:23:27 dyson Exp $
* $Id: vm_extern.h,v 1.36 1997/12/31 02:35:29 alex Exp $
*/
#ifndef _VM_EXTERN_H_
@ -70,7 +70,7 @@ void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_init __P((vm_offset_t, vm_offset_t));
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t));
void munmapfd __P((struct proc *, int));
int pager_cache __P((vm_object_t, boolean_t));
int swaponvp __P((struct proc *, struct vnode *, dev_t , u_long));
@ -86,7 +86,7 @@ int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int,
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_exec __P((struct proc *));
void vmspace_unshare __P((struct proc *));

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.74 1998/01/12 01:44:25 dyson Exp $
* $Id: vm_fault.c,v 1.75 1998/01/17 09:16:49 dyson Exp $
*/
/*
@ -131,12 +131,13 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
vm_prot_t prot;
int result;
boolean_t wired;
boolean_t su;
boolean_t lookup_still_valid;
int map_generation;
vm_page_t old_m;
vm_object_t next_object;
vm_page_t marray[VM_FAULT_READ];
int hardfault = 0;
int faultcount;
struct vnode *vp = NULL;
struct proc *p = curproc; /* XXX */
@ -184,6 +185,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
RetryFault:;
faultcount = 0;
/*
* Find the backing store object and offset into it to begin the
@ -191,7 +193,7 @@ RetryFault:;
*/
if ((result = vm_map_lookup(&map, vaddr,
fault_type, &entry, &first_object,
&first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
if ((result != KERN_PROTECTION_FAILURE) ||
((fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)) {
return result;
@ -206,7 +208,7 @@ RetryFault:;
*/
result = vm_map_lookup(&map, vaddr,
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
&entry, &first_object, &first_pindex, &prot, &wired, &su);
&entry, &first_object, &first_pindex, &prot, &wired);
if (result != KERN_SUCCESS) {
return result;
}
@ -220,6 +222,8 @@ RetryFault:;
entry->max_protection &= ~VM_PROT_WRITE;
}
map_generation = map->timestamp;
if (entry->eflags & MAP_ENTRY_NOFAULT) {
panic("vm_fault: fault on nofault entry, addr: %lx",
vaddr);
@ -363,15 +367,20 @@ RetryFault:;
if (object->type != OBJT_DEFAULT &&
(((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) {
int rv;
int faultcount;
int reqpage;
int ahead, behind;
ahead = VM_FAULT_READ_AHEAD;
behind = VM_FAULT_READ_BEHIND;
if (first_object->behavior == OBJ_RANDOM) {
ahead = 0;
behind = 0;
} else {
behind = (vaddr - entry->start) >> PAGE_SHIFT;
if (behind > VM_FAULT_READ_BEHIND)
behind = VM_FAULT_READ_BEHIND;
ahead = ((entry->end - vaddr) >> PAGE_SHIFT) - 1;
if (ahead > VM_FAULT_READ_AHEAD)
ahead = VM_FAULT_READ_AHEAD;
}
if ((first_object->type != OBJT_DEVICE) &&
@ -568,7 +577,7 @@ RetryFault:;
* first object. Note that we must mark the page dirty in the
* first object so that it will go out to swap when needed.
*/
if (lookup_still_valid &&
if (map_generation == map->timestamp &&
/*
* Only one shadow object
*/
@ -589,8 +598,17 @@ RetryFault:;
/*
* We don't chase down the shadow chain
*/
(object == first_object->backing_object)) {
(object == first_object->backing_object) &&
/*
* grab the lock if we need to
*/
(lookup_still_valid ||
(((entry->eflags & MAP_ENTRY_IS_A_MAP) == 0) &&
lockmgr(&map->lock,
LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) == 0))) {
lookup_still_valid = 1;
/*
* get rid of the unnecessary page
*/
@ -611,91 +629,12 @@ RetryFault:;
vm_page_copy(m, first_m);
}
/*
* This code handles the case where there are two references to the
* backing object, and one reference is getting a copy of the
* page. If the other reference is the only other object that
* points to the backing object, then perform a virtual copy
* from the backing object to the other object after the
* page is copied to the current first_object. If the other
* object already has the page, we destroy it in the backing object
* performing an optimized collapse-type operation. We don't
* bother removing the page from the backing object's swap space.
*/
if (lookup_still_valid &&
/*
* make sure that we have two shadow objs
*/
(object->shadow_count == 2) &&
/*
* And no COW refs -- note that there are sometimes
* temp refs to objs, but ignore that case -- we just
* punt.
*/
(object->ref_count == 2) &&
/*
* Noone else can look us up
*/
(object->handle == NULL) &&
/*
* Not something that can be referenced elsewhere
*/
((object->type == OBJT_DEFAULT) ||
(object->type == OBJT_SWAP)) &&
/*
* We don't bother chasing down object chain
*/
(object == first_object->backing_object)) {
vm_object_t other_object;
vm_pindex_t other_pindex, other_pindex_offset;
vm_page_t tm;
other_object = TAILQ_FIRST(&object->shadow_head);
if (other_object == first_object)
other_object = TAILQ_NEXT(other_object, shadow_list);
if (!other_object)
panic("vm_fault: other object missing");
if (other_object &&
(other_object->type == OBJT_DEFAULT) &&
(other_object->paging_in_progress == 0)) {
other_pindex_offset =
OFF_TO_IDX(other_object->backing_object_offset);
if (pindex >= other_pindex_offset) {
other_pindex = pindex - other_pindex_offset;
/*
* If the other object has the page, just free it.
*/
if ((tm = vm_page_lookup(other_object, other_pindex))) {
if ((tm->flags & PG_BUSY) == 0 &&
tm->busy == 0 &&
tm->valid == VM_PAGE_BITS_ALL) {
/*
* get rid of the unnecessary page
*/
vm_page_protect(m, VM_PROT_NONE);
PAGE_WAKEUP(m);
vm_page_free(m);
m = NULL;
tm->dirty = VM_PAGE_BITS_ALL;
first_m->dirty = VM_PAGE_BITS_ALL;
}
} else {
/*
* If the other object doesn't have the page,
* then we move it there.
*/
vm_page_rename(m, other_object, other_pindex);
m->dirty = VM_PAGE_BITS_ALL;
m->valid = VM_PAGE_BITS_ALL;
}
}
}
}
if (m) {
if (m->queue != PQ_ACTIVE)
if (m->queue != PQ_ACTIVE) {
vm_page_activate(m);
m->act_count = 0;
}
/*
* We no longer need the old page or object.
*/
@ -712,16 +651,6 @@ RetryFault:;
object = first_object;
pindex = first_pindex;
/*
* Now that we've gotten the copy out of the way,
* let's try to collapse the top object.
*
* But we have to play ugly games with
* paging_in_progress to do that...
*/
vm_object_pip_wakeup(object);
vm_object_collapse(object);
object->paging_in_progress++;
} else {
prot &= ~VM_PROT_WRITE;
}
@ -732,7 +661,8 @@ RetryFault:;
* lookup.
*/
if (!lookup_still_valid) {
if (!lookup_still_valid &&
(map->timestamp != map_generation)) {
vm_object_t retry_object;
vm_pindex_t retry_pindex;
vm_prot_t retry_prot;
@ -751,7 +681,8 @@ RetryFault:;
* and will merely take another fault.
*/
result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE,
&entry, &retry_object, &retry_pindex, &retry_prot, &wired, &su);
&entry, &retry_object, &retry_pindex, &retry_prot, &wired);
map_generation = map->timestamp;
/*
* If we don't need the page any longer, put it on the active
@ -808,8 +739,9 @@ RetryFault:;
m->flags &= ~PG_ZERO;
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0))
pmap_prefault(map->pmap, vaddr, entry, first_object);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
pmap_prefault(map->pmap, vaddr, entry);
}
m->flags |= PG_MAPPED|PG_REFERENCED;
if (fault_flags & VM_FAULT_HOLD)
@ -912,6 +844,7 @@ vm_fault_user_wire(map, start, end)
* Inform the physical mapping system that the range of addresses may
* not fault, so that page tables and such can be locked down as well.
*/
pmap_pageable(pmap, start, end, FALSE);
/*
@ -1087,12 +1020,10 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t *marray;
int *reqpage;
{
int i;
int i,j;
vm_object_t object;
vm_pindex_t pindex, startpindex, endpindex, tpindex;
vm_offset_t size;
vm_page_t rtm;
int treqpage;
int cbehind, cahead;
object = m->object;
@ -1112,8 +1043,9 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
*/
if (!vm_pager_has_page(object,
OFF_TO_IDX(object->paging_offset) + pindex, &cbehind, &cahead))
OFF_TO_IDX(object->paging_offset) + pindex, &cbehind, &cahead)) {
return 0;
}
if ((cbehind == 0) && (cahead == 0)) {
*reqpage = 0;
@ -1135,91 +1067,78 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
if ((rahead + rbehind) >
((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
pagedaemon_wakeup();
*reqpage = 0;
marray[0] = m;
*reqpage = 0;
return 1;
}
/*
* scan backward for the read behind pages -- in memory or on disk not
* in same object
* scan backward for the read behind pages -- in memory
*/
tpindex = pindex - 1;
if (tpindex < pindex) {
if (rbehind > pindex)
if (pindex > 0) {
if (rbehind > pindex) {
rbehind = pindex;
startpindex = pindex - rbehind;
while (tpindex >= startpindex) {
startpindex = 0;
} else {
startpindex = pindex - rbehind;
}
for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
if (vm_page_lookup( object, tpindex)) {
startpindex = tpindex + 1;
break;
}
if (tpindex == 0)
break;
tpindex -= 1;
}
for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
if (rtm == NULL) {
for (j = 0; j < i; j++) {
FREE_PAGE(marray[j]);
}
marray[0] = m;
*reqpage = 0;
return 1;
}
marray[i] = rtm;
}
} else {
startpindex = pindex;
startpindex = 0;
i = 0;
}
marray[i] = m;
/* page offset of the required page */
*reqpage = i;
tpindex = pindex + 1;
i++;
/*
* scan forward for the read ahead pages -- in memory or on disk not
* in same object
* scan forward for the read ahead pages
*/
tpindex = pindex + 1;
endpindex = pindex + (rahead + 1);
endpindex = tpindex + rahead;
if (endpindex > object->size)
endpindex = object->size;
while (tpindex < endpindex) {
if ( vm_page_lookup(object, tpindex)) {
for( ; tpindex < endpindex; i++, tpindex++) {
if (vm_page_lookup(object, tpindex)) {
break;
}
tpindex += 1;
}
endpindex = tpindex;
/* calculate number of bytes of pages */
size = endpindex - startpindex;
/* calculate the page offset of the required page */
treqpage = pindex - startpindex;
/* see if we have space (again) */
if ((cnt.v_free_count + cnt.v_cache_count) >
(cnt.v_free_reserved + size)) {
/*
* get our pages and don't block for them
*/
for (i = 0; i < size; i++) {
if (i != treqpage) {
rtm = vm_page_alloc(object,
startpindex + i,
VM_ALLOC_NORMAL);
if (rtm == NULL) {
if (i < treqpage) {
int j;
for (j = 0; j < i; j++) {
FREE_PAGE(marray[j]);
}
*reqpage = 0;
marray[0] = m;
return 1;
} else {
size = i;
*reqpage = treqpage;
return size;
}
}
marray[i] = rtm;
} else {
marray[i] = m;
}
}
*reqpage = treqpage;
return size;
rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
if (rtm == NULL) {
break;
}
marray[i] = rtm;
}
*reqpage = 0;
marray[0] = m;
return 1;
/* return number of bytes of pages */
return i;
}

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.68 1997/12/06 02:23:30 dyson Exp $
* $Id: vm_glue.c,v 1.69 1997/12/11 02:10:55 dyson Exp $
*/
#include "opt_rlimit.h"
@ -448,7 +448,6 @@ int action;
continue;
++vm->vm_refcnt;
vm_map_reference(&vm->vm_map);
/*
* do not swapout a process that is waiting for VM
* data structures there is a possible deadlock.
@ -456,7 +455,6 @@ int action;
if (lockmgr(&vm->vm_map.lock,
LK_EXCLUSIVE | LK_NOWAIT,
(void *)0, curproc)) {
vm_map_deallocate(&vm->vm_map);
vmspace_free(vm);
continue;
}
@ -469,7 +467,6 @@ int action;
((action & VM_SWAP_IDLE) &&
(p->p_slptime > swap_idle_threshold2))) {
swapout(p);
vm_map_deallocate(&vm->vm_map);
vmspace_free(vm);
didswap++;
goto retry;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.38 1997/08/02 14:33:26 bde Exp $
* $Id: vm_kern.c,v 1.39 1997/08/05 00:01:52 dyson Exp $
*/
/*
@ -227,11 +227,10 @@ kmem_free(map, addr, size)
* pageable Can the region be paged
*/
vm_map_t
kmem_suballoc(parent, min, max, size, pageable)
kmem_suballoc(parent, min, max, size)
register vm_map_t parent;
vm_offset_t *min, *max;
register vm_size_t size;
boolean_t pageable;
{
register int ret;
vm_map_t result;
@ -247,7 +246,7 @@ kmem_suballoc(parent, min, max, size, pageable)
}
*max = *min + size;
pmap_reference(vm_map_pmap(parent));
result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable);
result = vm_map_create(vm_map_pmap(parent), *min, *max);
if (result == NULL)
panic("kmem_suballoc: cannot create submap");
if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
@ -439,7 +438,7 @@ kmem_init(start, end)
{
register vm_map_t m;
m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE);
m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
vm_map_lock(m);
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
kernel_map = m;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.106 1998/01/17 09:16:51 dyson Exp $
* $Id: vm_map.c,v 1.107 1998/01/21 12:18:00 dyson Exp $
*/
/*
@ -158,7 +158,7 @@ extern char kstack[];
extern int inmprotect;
static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
static vm_zone_t mapentzone, kmapentzone, mapzone;
static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
static struct vm_object kmapentobj, mapentobj, mapobj;
#define MAP_ENTRY_INIT 128
struct vm_map_entry map_entry_init[MAX_MAPENT];
@ -195,18 +195,18 @@ vm_map_startup()
* The remaining fields must be initialized by the caller.
*/
struct vmspace *
vmspace_alloc(min, max, pageable)
vmspace_alloc(min, max)
vm_offset_t min, max;
int pageable;
{
register struct vmspace *vm;
MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
vm_map_init(&vm->vm_map, min, max, pageable);
vm = zalloc(vmspace_zone);
bzero(&vm->vm_map, sizeof vm->vm_map);
vm_map_init(&vm->vm_map, min, max);
pmap_pinit(&vm->vm_pmap);
vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
vm->vm_refcnt = 1;
vm->vm_shm = NULL;
return (vm);
}
@ -218,6 +218,7 @@ vm_init2(void) {
NULL, 0, 0, 0, 1);
zinitna(mapzone, &mapobj,
NULL, 0, 0, 0, 1);
vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
pmap_init2();
vm_object_init2();
}
@ -242,13 +243,8 @@ vmspace_free(vm)
vm->vm_map.max_offset);
vm_map_unlock(&vm->vm_map);
while( vm->vm_map.ref_count != 1)
tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
--vm->vm_map.ref_count;
pmap_release(&vm->vm_pmap);
FREE(vm, M_VMMAP);
} else {
wakeup(&vm->vm_map.ref_count);
zfree(vmspace_zone, vm);
}
}
@ -260,15 +256,14 @@ vmspace_free(vm)
* the given lower and upper address bounds.
*/
vm_map_t
vm_map_create(pmap, min, max, pageable)
vm_map_create(pmap, min, max)
pmap_t pmap;
vm_offset_t min, max;
boolean_t pageable;
{
register vm_map_t result;
result = zalloc(mapzone);
vm_map_init(result, min, max, pageable);
vm_map_init(result, min, max);
result->pmap = pmap;
return (result);
}
@ -279,25 +274,21 @@ vm_map_create(pmap, min, max, pageable)
* The pmap is set elsewhere.
*/
void
vm_map_init(map, min, max, pageable)
vm_map_init(map, min, max)
register struct vm_map *map;
vm_offset_t min, max;
boolean_t pageable;
{
map->header.next = map->header.prev = &map->header;
map->nentries = 0;
map->size = 0;
map->ref_count = 1;
map->is_main_map = TRUE;
map->system_map = 0;
map->min_offset = min;
map->max_offset = max;
map->entries_pageable = pageable;
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
simple_lock_init(&map->ref_lock);
}
/*
@ -348,67 +339,6 @@ vm_map_entry_create(map)
(entry)->prev->next = (entry)->next; \
}
/*
* vm_map_reference:
*
* Creates another valid reference to the given map.
*
*/
void
vm_map_reference(map)
register vm_map_t map;
{
if (map == NULL)
return;
map->ref_count++;
}
/*
* vm_map_deallocate:
*
* Removes a reference from the specified map,
* destroying it if no references remain.
* The map should not be locked.
*/
void
vm_map_deallocate(map)
register vm_map_t map;
{
register int c;
if (map == NULL)
return;
c = map->ref_count;
if (c == 0)
panic("vm_map_deallocate: deallocating already freed map");
if (c != 1) {
--map->ref_count;
wakeup(&map->ref_count);
return;
}
/*
* Lock the map, to wait out all other references to it.
*/
vm_map_lock_drain_interlock(map);
(void) vm_map_delete(map, map->min_offset, map->max_offset);
--map->ref_count;
if( map->ref_count != 0) {
vm_map_unlock(map);
return;
}
pmap_destroy(map->pmap);
vm_map_unlock(map);
zfree(mapzone, map);
}
/*
* SAVE_HINT:
*
@ -870,9 +800,7 @@ _vm_map_clip_start(map, entry, start)
vm_map_entry_link(map, entry->prev, new_entry);
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
vm_map_reference(new_entry->object.share_map);
else
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
vm_object_reference(new_entry->object.vm_object);
}
@ -931,9 +859,7 @@ _vm_map_clip_end(map, entry, end)
vm_map_entry_link(map, entry, new_entry);
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
vm_map_reference(new_entry->object.share_map);
else
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
vm_object_reference(new_entry->object.vm_object);
}
@ -995,8 +921,8 @@ vm_map_submap(map, start, end, submap)
if ((entry->start == start) && (entry->end == end) &&
((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
(entry->object.vm_object == NULL)) {
entry->object.sub_map = submap;
entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
vm_map_reference(entry->object.sub_map = submap);
result = KERN_SUCCESS;
}
vm_map_unlock(map);
@ -1117,6 +1043,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
current = current->next;
}
map->timestamp++;
vm_map_unlock(map);
return (KERN_SUCCESS);
}
@ -1792,9 +1719,7 @@ vm_map_entry_delete(map, entry)
vm_map_entry_unlink(map, entry);
map->size -= entry->end - entry->start;
if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
vm_map_deallocate(entry->object.share_map);
} else {
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
vm_object_deallocate(entry->object.vm_object);
}
@ -1997,27 +1922,10 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
* write-protected.
*/
if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
boolean_t su;
/*
* If the source entry has only one mapping, we can
* just protect the virtual address range.
*/
if (!(su = src_map->is_main_map)) {
su = (src_map->ref_count == 1);
}
if (su) {
pmap_protect(src_map->pmap,
src_entry->start,
src_entry->end,
src_entry->protection & ~VM_PROT_WRITE);
} else {
vm_object_pmap_copy(src_entry->object.vm_object,
OFF_TO_IDX(src_entry->offset),
OFF_TO_IDX(src_entry->offset + (src_entry->end
- src_entry->start)));
}
pmap_protect(src_map->pmap,
src_entry->start,
src_entry->end,
src_entry->protection & ~VM_PROT_WRITE);
}
/*
@ -2074,8 +1982,7 @@ vmspace_fork(vm1)
vm_map_lock(old_map);
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
old_map->entries_pageable);
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
(caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
new_pmap = &vm2->vm_pmap; /* XXX */
@ -2171,8 +2078,7 @@ vmspace_exec(struct proc *p) {
struct vmspace *newvmspace;
vm_map_t map = &p->p_vmspace->vm_map;
newvmspace = vmspace_alloc(map->min_offset, map->max_offset,
map->entries_pageable);
newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
(caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
/*
@ -2182,12 +2088,10 @@ vmspace_exec(struct proc *p) {
* run it down. Even though there is little or no chance of blocking
* here, it is a good idea to keep this form for future mods.
*/
vm_map_reference(&oldvmspace->vm_map);
vmspace_free(oldvmspace);
p->p_vmspace = newvmspace;
if (p == curproc)
pmap_activate(p);
vm_map_deallocate(&oldvmspace->vm_map);
}
/*
@ -2203,12 +2107,10 @@ vmspace_unshare(struct proc *p) {
if (oldvmspace->vm_refcnt == 1)
return;
newvmspace = vmspace_fork(oldvmspace);
vm_map_reference(&oldvmspace->vm_map);
vmspace_free(oldvmspace);
p->p_vmspace = newvmspace;
if (p == curproc)
pmap_activate(p);
vm_map_deallocate(&oldvmspace->vm_map);
}
@ -2242,8 +2144,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_object_t *object, /* OUT */
vm_pindex_t *pindex, /* OUT */
vm_prot_t *out_prot, /* OUT */
boolean_t *wired, /* OUT */
boolean_t *single_use) /* OUT */
boolean_t *wired) /* OUT */
{
vm_map_t share_map;
vm_offset_t share_offset;
@ -2407,9 +2308,10 @@ RetryLookup:;
* don't allow writes.
*/
prot &= (~VM_PROT_WRITE);
prot &= ~VM_PROT_WRITE;
}
}
/*
* Create an object if necessary.
*/
@ -2440,12 +2342,7 @@ RetryLookup:;
* Return whether this is the only map sharing this data.
*/
if (!su) {
su = (share_map->ref_count == 1);
}
*out_prot = prot;
*single_use = su;
return (KERN_SUCCESS);
#undef RETURN
@ -2493,43 +2390,43 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
{
vm_map_t map;
vm_object_t first_object, oldobject, object;
vm_map_entry_t first_entry, entry;
vm_map_entry_t entry;
vm_prot_t prot;
boolean_t wired, su;
boolean_t wired;
int tcnt, rv;
vm_offset_t uaddr, start, end;
vm_offset_t uaddr, start, end, tend;
vm_pindex_t first_pindex, osize, oindex;
off_t ooffset;
int skipinit, allremoved;
int cnt;
if (npages)
*npages = 0;
allremoved = 0;
cnt = cnta;
uaddr = uaddra;
while (cnt > 0) {
map = mapa;
uaddr = uaddra;
skipinit = 0;
if ((vm_map_lookup(&map, uaddr,
VM_PROT_READ, &first_entry, &first_object,
&first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
VM_PROT_READ, &entry, &first_object,
&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
return EFAULT;
}
vm_map_clip_start(map, first_entry, uaddr);
vm_map_clip_start(map, entry, uaddr);
tcnt = cnt;
if ((uaddr + tcnt) > first_entry->end)
tcnt = first_entry->end - uaddr;
tend = uaddr + tcnt;
if (tend > entry->end) {
tcnt = entry->end - uaddr;
tend = entry->end;
}
vm_map_clip_end(map, first_entry, uaddr + tcnt);
vm_map_clip_end(map, entry, tend);
start = first_entry->start;
end = first_entry->end;
start = entry->start;
end = entry->end;
osize = atop(tcnt);
@ -2539,12 +2436,12 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
for (idx = 0; idx < osize; idx++) {
vm_page_t m;
if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
vm_map_lookup_done(map, first_entry);
vm_map_lookup_done(map, entry);
return 0;
}
if ((m->flags & PG_BUSY) ||
((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
vm_map_lookup_done(map, first_entry);
vm_map_lookup_done(map, entry);
return 0;
}
}
@ -2554,7 +2451,44 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
* If we are changing an existing map entry, just redirect
* the object, and change mappings.
*/
if ((first_object->ref_count == 1) &&
if ((first_object->type == OBJT_VNODE) &&
((oldobject = entry->object.vm_object) == first_object)) {
if ((entry->offset != cp) || (oldobject != srcobject)) {
/*
* Remove old window into the file
*/
pmap_remove (map->pmap, uaddr, tend);
/*
* Force copy on write for mmaped regions
*/
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
/*
* Point the object appropriately
*/
if (oldobject != srcobject) {
/*
* Set the object optimization hint flag
*/
srcobject->flags |= OBJ_OPT;
vm_object_reference(srcobject);
entry->object.vm_object = srcobject;
if (oldobject) {
vm_object_deallocate(oldobject);
}
}
entry->offset = cp;
map->timestamp++;
} else {
pmap_remove (map->pmap, uaddr, tend);
}
} else if ((first_object->ref_count == 1) &&
(first_object->size == osize) &&
((first_object->type == OBJT_DEFAULT) ||
(first_object->type == OBJT_SWAP)) ) {
@ -2566,10 +2500,7 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
/*
* Remove old window into the file
*/
if (!allremoved) {
pmap_remove (map->pmap, uaddra, uaddra + cnt);
allremoved = 1;
}
pmap_remove (map->pmap, uaddr, tend);
/*
* Remove unneeded old pages
@ -2607,22 +2538,19 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
TAILQ_REMOVE(&oldobject->shadow_head,
first_object, shadow_list);
oldobject->shadow_count--;
if (oldobject->shadow_count == 0)
oldobject->flags &= ~OBJ_OPT;
vm_object_deallocate(oldobject);
}
TAILQ_INSERT_TAIL(&srcobject->shadow_head,
first_object, shadow_list);
srcobject->shadow_count++;
srcobject->flags |= OBJ_OPT;
first_object->backing_object = srcobject;
}
first_object->backing_object_offset = cp;
map->timestamp++;
} else {
skipinit = 1;
pmap_remove (map->pmap, uaddr, tend);
}
/*
* Otherwise, we have to do a logical mmap.
@ -2632,29 +2560,28 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
srcobject->flags |= OBJ_OPT;
vm_object_reference(srcobject);
object = srcobject;
ooffset = cp;
vm_object_shadow(&object, &ooffset, osize);
if (!allremoved) {
pmap_remove (map->pmap, uaddra, uaddra + cnt);
allremoved = 1;
}
pmap_remove (map->pmap, uaddr, tend);
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
vm_map_lock_upgrade(map);
if (first_entry == &map->header) {
if (entry == &map->header) {
map->first_free = &map->header;
} else if (map->first_free->start >= start) {
map->first_free = first_entry->prev;
map->first_free = entry->prev;
}
SAVE_HINT(map, first_entry->prev);
vm_map_entry_delete(map, first_entry);
SAVE_HINT(map, entry->prev);
vm_map_entry_delete(map, entry);
rv = vm_map_insert(map, object, ooffset, start, end,
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
object = srcobject;
ooffset = cp;
#if 0
vm_object_shadow(&object, &ooffset, osize);
#endif
rv = vm_map_insert(map, object, ooffset, start, tend,
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
if (rv != KERN_SUCCESS)
panic("vm_uiomove: could not insert new entry: %d", rv);
@ -2663,15 +2590,14 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
/*
* Map the window directly, if it is already in memory
*/
if (!skipinit)
pmap_object_init_pt(map->pmap, uaddra,
srcobject, (vm_pindex_t) OFF_TO_IDX(cp), tcnt, 0);
pmap_object_init_pt(map->pmap, uaddr,
srcobject, oindex, tcnt, 0);
map->timestamp++;
vm_map_unlock(map);
cnt -= tcnt;
uaddra += tcnt;
uaddr += tcnt;
cp += tcnt;
if (npages)
*npages += osize;
@ -2714,8 +2640,7 @@ vm_freeze_copyopts(object, froma, toa)
vm_object_t robject, robjectn;
vm_pindex_t idx, from, to;
if ((vfs_ioopt == 0) ||
(object == NULL) ||
if ((object == NULL) ||
((object->flags & OBJ_OPT) == 0))
return;
@ -2836,9 +2761,9 @@ DB_SHOW_COMMAND(map, vm_map_print)
register vm_map_entry_t entry;
db_iprintf("%s map 0x%x: pmap=0x%x, ref=%d, nentries=%d, version=%d\n",
db_iprintf("%s map 0x%x: pmap=0x%x, nentries=%d, version=%d\n",
(map->is_main_map ? "Task" : "Share"),
(int) map, (int) (map->pmap), map->ref_count, map->nentries,
(int) map, (int) (map->pmap), map->nentries,
map->timestamp);
nlines++;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.h,v 1.30 1998/01/06 05:26:00 dyson Exp $
* $Id: vm_map.h,v 1.31 1998/01/17 09:16:52 dyson Exp $
*/
/*
@ -132,12 +132,9 @@ struct vm_map {
vm_size_t size; /* virtual size */
unsigned char is_main_map; /* Am I a main map? */
unsigned char system_map; /* Am I a system map? */
int ref_count; /* Reference count */
struct simplelock ref_lock; /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
unsigned int timestamp; /* Version number */
vm_map_entry_t first_free; /* First free space hint */
boolean_t entries_pageable; /* map entries pageable?? */
struct pmap *pmap; /* Physical map */
#define min_offset header.start
#define max_offset header.end
@ -312,16 +309,16 @@ extern vm_size_t kentry_data_size;
boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
int vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, boolean_t, boolean_t));
struct pmap;
vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t, boolean_t));
vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t));
void vm_map_deallocate __P((vm_map_t));
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int));
int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t, boolean_t));
void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t));
int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
vm_pindex_t *, vm_prot_t *, boolean_t *, boolean_t *));
vm_pindex_t *, vm_prot_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.106 1998/01/12 01:44:38 dyson Exp $
* $Id: vm_object.c,v 1.107 1998/01/17 09:16:55 dyson Exp $
*/
/*
@ -169,6 +169,7 @@ _vm_object_allocate(type, size, object)
object->page_hint = NULL;
object->last_read = 0;
object->generation++;
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
vm_object_count++;
@ -268,6 +269,7 @@ vm_object_vndeallocate(object)
object->ref_count--;
if (object->ref_count == 0) {
vp->v_flag &= ~VTEXT;
object->flags &= ~OBJ_OPT;
}
vrele(vp);
}
@ -372,7 +374,7 @@ vm_object_deallocate(object)
if (temp) {
TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
temp->shadow_count--;
if (temp->shadow_count == 0)
if (temp->ref_count == 0)
temp->flags &= ~OBJ_OPT;
}
vm_object_terminate(object);
@ -455,6 +457,19 @@ vm_object_terminate(object)
vm_pager_deallocate(object);
if (object->ref_count == 0) {
vm_object_dispose(object);
}
}
/*
* vm_object_dispose
*
* Dispose the object.
*/
void
vm_object_dispose(object)
vm_object_t object;
{
simple_lock(&vm_object_list_lock);
TAILQ_REMOVE(&vm_object_list, object, object_list);
vm_object_count--;
@ -464,7 +479,6 @@ vm_object_terminate(object)
*/
zfree(obj_zone, object);
wakeup(object);
}
}
/*
@ -498,6 +512,7 @@ vm_object_page_clean(object, start, end, syncio)
vm_page_t maf[vm_pageout_page_count];
vm_page_t mab[vm_pageout_page_count];
vm_page_t ma[vm_pageout_page_count];
int curgeneration;
struct proc *pproc = curproc; /* XXX */
if (object->type != OBJT_VNODE ||
@ -521,6 +536,8 @@ vm_object_page_clean(object, start, end, syncio)
p->flags |= PG_CLEANCHK;
rescan:
curgeneration = object->generation;
for(p = TAILQ_FIRST(&object->memq); p; p = np) {
np = TAILQ_NEXT(p, listq);
@ -540,11 +557,13 @@ vm_object_page_clean(object, start, end, syncio)
}
s = splvm();
if ((p->flags & PG_BUSY) || p->busy) {
while ((p->flags & PG_BUSY) || p->busy) {
p->flags |= PG_WANTED|PG_REFERENCED;
tsleep(p, PVM, "vpcwai", 0);
splx(s);
goto rescan;
if (object->generation != curgeneration) {
splx(s);
goto rescan;
}
}
splx(s);
@ -617,7 +636,8 @@ vm_object_page_clean(object, start, end, syncio)
runlen = maxb + maxf + 1;
splx(s);
vm_pageout_flush(ma, runlen, 0);
goto rescan;
if (object->generation != curgeneration)
goto rescan;
}
VOP_FSYNC(vp, NULL, syncio, curproc);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.41 1998/01/06 05:26:07 dyson Exp $
* $Id: vm_object.h,v 1.42 1998/01/17 09:16:56 dyson Exp $
*/
/*
@ -87,6 +87,7 @@ struct vm_object {
TAILQ_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
TAILQ_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
TAILQ_HEAD(, vm_page) memq; /* list of resident pages */
int generation; /* generation ID */
objtype_t type; /* type of pager */
vm_size_t size; /* Object size */
int ref_count; /* How many refs?? */
@ -168,6 +169,7 @@ void vm_object_collapse __P((vm_object_t));
void vm_object_copy __P((vm_object_t, vm_pindex_t, vm_object_t *, vm_pindex_t *, boolean_t *));
void vm_object_deallocate __P((vm_object_t));
void vm_object_terminate __P((vm_object_t));
void vm_object_dispose __P((vm_object_t));
void vm_object_vndeallocate __P((vm_object_t));
void vm_object_init __P((void));
void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.85 1998/01/12 01:44:41 dyson Exp $
* $Id: vm_page.c,v 1.86 1998/01/17 09:16:59 dyson Exp $
*/
/*
@ -94,6 +94,7 @@ static vm_page_t vm_page_select_free __P((vm_object_t object,
* page structure.
*/
static int vm_page_bucket_generation; /* generation id for buckets */
static struct pglist *vm_page_buckets; /* Array of buckets */
static int vm_page_bucket_count; /* How big is array? */
static int vm_page_hash_mask; /* Mask for hash function */
@ -404,6 +405,7 @@ vm_page_insert(m, object, pindex)
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
TAILQ_INSERT_TAIL(bucket, m, hashq);
vm_page_bucket_generation++;
/*
* Now link into the object's list of backed pages.
@ -412,6 +414,7 @@ vm_page_insert(m, object, pindex)
TAILQ_INSERT_TAIL(&object->memq, m, listq);
m->flags |= PG_TABLED;
m->object->page_hint = m;
m->object->generation++;
/*
* And show that the object has one more resident page.
@ -448,6 +451,7 @@ vm_page_remove(m)
bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
TAILQ_REMOVE(bucket, m, hashq);
vm_page_bucket_generation++;
/*
* Now remove from the object's list of backed pages.
@ -460,6 +464,7 @@ vm_page_remove(m)
*/
m->object->resident_page_count--;
m->object->generation++;
m->flags &= ~PG_TABLED;
}
@ -480,6 +485,7 @@ vm_page_lookup(object, pindex)
{
register vm_page_t m;
register struct pglist *bucket;
int curgeneration;
int s;
/*
@ -488,15 +494,16 @@ vm_page_lookup(object, pindex)
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
s = splvm();
restart:
curgeneration = vm_page_bucket_generation;
for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
if (curgeneration != vm_page_bucket_generation)
goto restart;
if ((m->object == object) && (m->pindex == pindex)) {
splx(s);
m->object->page_hint = m;
return (m);
}
}
splx(s);
return (NULL);
}
@ -786,6 +793,7 @@ vm_page_alloc(object, pindex, page_req)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@ -807,6 +815,7 @@ vm_page_alloc(object, pindex, page_req)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@ -830,6 +839,7 @@ vm_page_alloc(object, pindex, page_req)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@ -845,6 +855,7 @@ vm_page_alloc(object, pindex, page_req)
#endif
} else {
splx(s);
vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@ -883,8 +894,6 @@ vm_page_alloc(object, pindex, page_req)
/* XXX before splx until vm_page_insert is safe */
vm_page_insert(m, object, pindex);
splx(s);
/*
* Don't wakeup too often - wakeup the pageout daemon when
* we would be nearly out of memory.
@ -894,7 +903,6 @@ vm_page_alloc(object, pindex, page_req)
(cnt.v_free_count < cnt.v_pageout_free_min))
pagedaemon_wakeup();
s = splvm();
if ((qtype == PQ_CACHE) &&
((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) &&
oldobject && (oldobject->type == OBJT_VNODE) &&

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.107 1998/01/12 01:44:44 dyson Exp $
* $Id: vm_pageout.c,v 1.108 1998/01/17 09:17:01 dyson Exp $
*/
/*
@ -126,9 +126,9 @@ SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
#endif
int vm_pages_needed; /* Event on which pageout daemon sleeps */
int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */
int vm_pages_needed=0; /* Event on which pageout daemon sleeps */
int vm_pageout_deficit=0; /* Estimated number of pages deficit */
int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
extern int npendingio;
#if !defined(NO_SWAPPING)
@ -535,9 +535,7 @@ vm_pageout_map_deactivate_pages(map, desired)
vm_map_entry_t tmpe;
vm_object_t obj, bigobj;
vm_map_reference(map);
if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
vm_map_deallocate(map);
return;
}
@ -587,7 +585,6 @@ vm_pageout_map_deactivate_pages(map, desired)
pmap_remove(vm_map_pmap(map),
VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
vm_map_unlock(map);
vm_map_deallocate(map);
return;
}
#endif
@ -645,7 +642,7 @@ vm_pageout_scan()
*/
pages_freed = 0;
addl_page_shortage = 0;
addl_page_shortage = vm_pageout_deficit;
if (max_page_launder == 0)
max_page_launder = 1;
@ -1166,7 +1163,7 @@ vm_size_t count;
cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
cnt.v_interrupt_free_min;
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
cnt.v_pageout_free_min + (count / 2048) + PQ_L2_SIZE;
cnt.v_free_min += cnt.v_free_reserved;
return 1;
}
@ -1259,6 +1256,7 @@ vm_pageout()
splx(s);
vm_pager_sync();
vm_pageout_scan();
vm_pageout_deficit = 0;
vm_pager_sync();
wakeup(&cnt.v_free_count);
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.h,v 1.21 1997/12/06 02:23:36 dyson Exp $
* $Id: vm_pageout.h,v 1.22 1998/01/12 01:44:46 dyson Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@ -78,6 +78,7 @@
extern int vm_page_max_wired;
extern int vm_pages_needed; /* should be some "event" structure */
extern int vm_pageout_pages_needed;
extern int vm_pageout_deficit;
#define VM_PAGEOUT_ASYNC 0
#define VM_PAGEOUT_SYNC 1

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vm_zone.c,v 1.13 1997/12/15 05:16:09 dyson Exp $
* $Id: vm_zone.c,v 1.14 1997/12/22 11:48:13 dyson Exp $
*/
#include <sys/param.h>
@ -39,6 +39,11 @@ static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
* Note that the initial implementation of this had coloring, and
* absolutely no improvement (actually perf degradation) occurred.
*
* Note also that the zones are type stable. The only restriction is
* that the first two longwords of a data structure can be changed
* between allocations. Any data that must be stable between allocations
* must reside in areas after the first two longwords.
*
* zinitna, zinit, zbootinit are the initialization routines.
* zalloc, zfree, are the interrupt/lock unsafe allocation/free routines.
* zalloci, zfreei, are the interrupt/lock safe allocation/free routines.
@ -183,6 +188,7 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
z->znalloc = 0;
simple_lock_init(&z->zlock);
bzero(item, nitems * z->zsize);
z->zitems = NULL;
for (i = 0; i < nitems; i++) {
((void **) item)[0] = z->zitems;
@ -285,14 +291,16 @@ _zget(vm_zone_t z)
item = (char *) z->zkva + z->zpagecount * PAGE_SIZE;
for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
i++) {
vm_offset_t zkva;
m = vm_page_alloc(z->zobj, z->zpagecount,
z->zallocflag);
if (m == NULL)
break;
pmap_kenter(z->zkva + z->zpagecount * PAGE_SIZE,
VM_PAGE_TO_PHYS(m));
zkva = z->zkva + z->zpagecount * PAGE_SIZE;
pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
bzero((caddr_t) zkva, PAGE_SIZE);
z->zpagecount++;
}
nitems = (i * PAGE_SIZE) / z->zsize;
@ -314,13 +322,13 @@ _zget(vm_zone_t z)
*/
if (lockstatus(&kernel_map->lock)) {
int s;
s = splhigh();
s = splvm();
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
splx(s);
} else {
item = (void *) kmem_alloc(kernel_map, nbytes);
}
bzero(item, nbytes);
nitems = nbytes / z->zsize;
}
z->ztotal += nitems;