These changes embody the support of the fully coherent merged VM buffer cache,

much higher filesystem I/O performance, and much better paging performance. It
represents the culmination of over 6 months of R&D.

The majority of the merged VM/cache work is by John Dyson.

The following highlights the most significant changes. Additionally, there are
(mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to
support the new VM/buffer scheme.

vfs_bio.c:
Significant rewrite of most of vfs_bio to support the merged VM buffer cache
scheme.  The scheme is almost fully compatible with the old filesystem
interface.  Significant improvement in the number of opportunities for write
clustering.

vfs_cluster.c, vfs_subr.c
Upgrade and performance enhancements in vfs layer code to support merged
VM/buffer cache.  Fixup of vfs_cluster to eliminate the bogus pagemove stuff.

vm_object.c:
Yet more improvements in the collapse code.  Elimination of some windows that
can cause list corruption.

vm_pageout.c:
Fixed it, it really works better now.  Somehow in 2.0, some "enhancements"
broke the code.  This code has been reworked from the ground-up.

vm_fault.c, vm_page.c, pmap.c, vm_object.c
Support for small-block filesystems with merged VM/buffer cache scheme.

pmap.c vm_map.c
Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of
kernel PTs.

vm_glue.c
Much simpler and more effective swapping code.  No more gratuitous swapping.

proc.h
Fixed the problem that the p_lock flag was not being cleared on a fork.

swap_pager.c, vnode_pager.c
Removal of old vfs_bio cruft to support the past pseudo-coherency.  Now the
code doesn't need it anymore.

machdep.c
Changes to better support the parameter values for the merged VM/buffer cache
scheme.

machdep.c, kern_exec.c, vm_glue.c
Implemented a seperate submap for temporary exec string space and another one
to contain process upages. This eliminates all map fragmentation problems
that previously existed.

ffs_inode.c, ufs_inode.c, ufs_readwrite.c
Changes for merged VM/buffer cache.  Add "bypass" support for sneaking in on
busy buffers.

Submitted by:	John Dyson and David Greenman
This commit is contained in:
David Greenman 1995-01-09 16:06:02 +00:00
parent bf8af43789
commit 0d94caffca
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=5455
90 changed files with 7304 additions and 6375 deletions

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.98 1994/12/11 03:33:58 davidg Exp $
* $Id: machdep.c,v 1.99 1995/01/05 19:51:14 se Exp $
*/
#include "npx.h"
@ -114,17 +114,12 @@ char cpu_model[sizeof("Cy486DLC") + 1];
/*
* Declare these as initialized data so we can patch them.
*/
int nswbuf = 0;
int nswbuf = 128;
#ifdef NBUF
int nbuf = NBUF;
#else
int nbuf = 0;
#endif
#ifdef BUFPAGES
int bufpages = BUFPAGES;
#else
int bufpages = 0;
#endif
#ifdef BOUNCE_BUFFERS
extern char *bouncememory;
@ -170,9 +165,7 @@ cpu_startup()
vm_offset_t maxaddr;
vm_size_t size = 0;
int firstaddr;
#ifdef BOUNCE_BUFFERS
vm_offset_t minaddr;
#endif /* BOUNCE_BUFFERS */
if (boothowto & RB_VERBOSE)
bootverbose++;
@ -261,33 +254,11 @@ cpu_startup()
valloc(msghdrs, struct msg, msginfo.msgtql);
valloc(msqids, struct msqid_ds, msginfo.msgmni);
#endif
/*
* Determine how many buffers to allocate.
* Use 20% of memory of memory beyond the first 2MB
* Insure a minimum of 16 fs buffers.
* We allocate 1/2 as many swap buffer headers as file i/o buffers.
*/
if (bufpages == 0)
bufpages = ((physmem << PGSHIFT) - 2048*1024) / NBPG / 6;
if (bufpages < 64)
bufpages = 64;
/*
* We must still limit the maximum number of buffers to be no
* more than 750 because we'll run out of kernel VM otherwise.
*/
bufpages = min(bufpages, 1500);
if (nbuf == 0) {
nbuf = bufpages / 2;
if (nbuf < 32)
nbuf = 32;
}
freebufspace = bufpages * NBPG;
if (nswbuf == 0) {
nswbuf = (nbuf / 2) &~ 1; /* force even */
if (nswbuf > 64)
nswbuf = 64; /* sanity */
}
if (nbuf == 0)
nbuf = min(physmem / 30, 256);
nswbuf = nbuf;
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
@ -296,8 +267,10 @@ cpu_startup()
* If there is more than 16MB of memory, allocate some bounce buffers
*/
if (Maxmem > 4096) {
if (bouncepages == 0)
bouncepages = 96; /* largest physio size + extra */
if (bouncepages == 0) {
bouncepages = 64;
bouncepages += ((Maxmem - 4096) / 2048) * 32;
}
v = (caddr_t)((vm_offset_t)((vm_offset_t)v + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
valloc(bouncememory, char, bouncepages * PAGE_SIZE);
}
@ -333,6 +306,10 @@ cpu_startup()
(nbuf*MAXBSIZE), TRUE);
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*ARG_MAX), TRUE);
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(maxproc*UPAGES*PAGE_SIZE), FALSE);
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
@ -353,8 +330,6 @@ cpu_startup()
if (boothowto & RB_CONFIG)
userconfig();
printf("avail memory = %d (%d pages)\n", ptoa(cnt.v_free_count), cnt.v_free_count);
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
#ifdef BOUNCE_BUFFERS
/*
@ -744,9 +719,11 @@ boot(arghowto)
for (iter = 0; iter < 20; iter++) {
nbusy = 0;
for (bp = &buf[nbuf]; --bp >= buf; )
if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
for (bp = &buf[nbuf]; --bp >= buf; ) {
if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) {
nbusy++;
}
}
if (nbusy == 0)
break;
printf("%d ", nbusy);
@ -1642,4 +1619,3 @@ disk_externalize(int drive, void *userp, size_t *maxlen)
*maxlen -= sizeof drive;
return copyout(&drive, userp, sizeof drive);
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.38 1994/12/18 03:36:27 davidg Exp $
* $Id: pmap.c,v 1.39 1994/12/18 14:16:22 davidg Exp $
*/
/*
@ -142,8 +142,6 @@ int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
vm_offset_t vm_first_phys, vm_last_phys;
static inline boolean_t pmap_testbit();
static inline void pmap_changebit();
static inline int pmap_is_managed();
static inline void * vm_get_pmap();
static inline void vm_put_pmap();
@ -152,6 +150,7 @@ static void pmap_alloc_pv_entry();
static inline pv_entry_t get_pv_entry();
static inline void pmap_use_pt();
static inline void pmap_unuse_pt();
int nkpt;
extern vm_offset_t clean_sva, clean_eva;
@ -230,8 +229,7 @@ pmap_extract(pmap, va)
}
pa = *(int *) avtopte(va);
}
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
return pa;
return ((pa & PG_FRAME) | (va & ~PG_FRAME));
}
return 0;
@ -358,6 +356,7 @@ pmap_bootstrap(firstaddr, loadaddr)
simple_lock_init(&kernel_pmap->pm_lock);
kernel_pmap->pm_count = 1;
nkpt = NKPT;
#if BSDVM_COMPAT
/*
@ -376,10 +375,8 @@ pmap_bootstrap(firstaddr, loadaddr)
virtual_avail = va;
#endif
/*
* reserve special hunk of memory for use by bus dma as a bounce
* buffer (contiguous virtual *and* physical memory). for now,
* assume vm does not use memory beneath hole, and we know that
* the bootstrap uses top 32k of base memory. -wfj
* Reserve special hunk of memory for use by bus dma as a bounce
* buffer (contiguous virtual *and* physical memory).
*/
{
extern vm_offset_t isaphysmem;
@ -420,8 +417,7 @@ pmap_init(phys_start, phys_end)
addr = (vm_offset_t) KERNBASE + IdlePTD;
vm_object_reference(kernel_object);
(void) vm_map_find(kernel_map, kernel_object, addr,
&addr, (4 + NKPT) * NBPG, FALSE);
&addr, (4 + NKPDE) * NBPG, FALSE);
/*
* calculate the number of pv_entries needed
@ -542,7 +538,7 @@ pmap_pinit(pmap)
pmap->pm_pdir = (pd_entry_t *) vm_get_pmap();
/* wire in kernel global address entries */
bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, NKPT*PTESIZE);
bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, nkpt*PTESIZE);
/* install self-referential address mapping entry */
*(int *)(pmap->pm_pdir+PTDPTDI) =
@ -552,6 +548,65 @@ pmap_pinit(pmap)
simple_lock_init(&pmap->pm_lock);
}
/*
* grow the number of kernel page table entries, if needed
*/
vm_page_t nkpg;
vm_offset_t kernel_vm_end;
void
pmap_growkernel(vm_offset_t addr) {
struct proc *p;
struct pmap *pmap;
int s;
s = splhigh();
if (kernel_vm_end == 0) {
kernel_vm_end = KERNBASE;
nkpt = 0;
while(pdir_pde(PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
++nkpt;
}
}
addr = (addr + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
while( kernel_vm_end < addr) {
if( pdir_pde( PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
continue;
}
++nkpt;
if( !nkpg) {
nkpg = vm_page_alloc(kernel_object, 0, TRUE);
vm_page_remove(nkpg);
pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
if( !nkpg)
panic("pmap_growkernel: no memory to grow kernel");
}
pdir_pde( PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW);
nkpg = NULL;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
if( p->p_vmspace) {
pmap = &p->p_vmspace->vm_pmap;
*pmap_pde( pmap, kernel_vm_end) = pdir_pde( PTD, kernel_vm_end);
}
}
kernel_vm_end = (kernel_vm_end + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
}
#if 0
if( !nkpg) {
nkpg = vm_page_alloc(kernel_object, 0, TRUE);
vm_page_remove(nkpg);
pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
}
#endif
splx(s);
}
/*
* Retire the given physical map from service.
* Should only be called if the map contains
@ -674,7 +729,7 @@ pmap_alloc_pv_entry()
/*
* allocate a physical page out of the vm system
*/
m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map));
m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map), TRUE);
if (m) {
int newentries;
int i;
@ -738,20 +793,18 @@ static pt_entry_t *
get_pt_entry(pmap)
pmap_t pmap;
{
pt_entry_t *ptp;
vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
/* are we current address space or kernel? */
if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) {
ptp=PTmap;
return PTmap;
}
/* otherwise, we are alternate address space */
} else {
if ( frame != ((int) APTDpde & PG_FRAME)) {
APTDpde = pmap->pm_pdir[PTDPTDI];
pmap_update();
}
ptp=APTmap;
}
return ptp;
if ( frame != ((int) APTDpde & PG_FRAME)) {
APTDpde = pmap->pm_pdir[PTDPTDI];
pmap_update();
}
return APTmap;
}
/*
@ -842,17 +895,11 @@ pmap_remove(pmap, sva, eva)
*ptq = 0;
if (pmap_is_managed(pa)) {
if ((int) oldpte & (PG_M | PG_U)) {
if ((int) oldpte & PG_M) {
if ((sva < USRSTACK || sva > UPT_MAX_ADDRESS) ||
(sva >= USRSTACK && sva < USRSTACK+(UPAGES*NBPG))) {
if (sva < clean_sva || sva >= clean_eva) {
m = PHYS_TO_VM_PAGE(pa);
if ((int) oldpte & PG_M) {
m->flags &= ~PG_CLEAN;
}
if ((int) oldpte & PG_U) {
m->flags |= PG_REFERENCED;
}
PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL;
}
}
}
@ -938,11 +985,12 @@ pmap_remove(pmap, sva, eva)
continue;
}
if ((((int) oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS))
|| (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
if (va < clean_sva || va >= clean_eva ) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
if ((int) oldpte & PG_M) {
if ((va < USRSTACK || va > UPT_MAX_ADDRESS) ||
(va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
if (va < clean_sva || va >= clean_eva) {
PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL;
}
}
}
@ -992,28 +1040,29 @@ pmap_remove_all(pa)
while (pv->pv_pmap != NULL) {
pmap = pv->pv_pmap;
ptp = get_pt_entry(pmap);
va = i386_btop(pv->pv_va);
pte = ptp + va;
va = pv->pv_va;
pte = ptp + i386_btop(va);
if (pmap_pte_w(pte))
pmap->pm_stats.wired_count--;
if ( *pte) {
if (*pte) {
pmap->pm_stats.resident_count--;
anyvalid++;
/*
* update the vm_page_t clean bit
* Update the vm_page_t clean and reference bits.
*/
if ( (m->flags & PG_CLEAN) &&
((((int) *pte) & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS))
|| (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) {
if (pv->pv_va < clean_sva || pv->pv_va >= clean_eva) {
m->flags &= ~PG_CLEAN;
if ((int) *pte & PG_M) {
if ((va < USRSTACK || va > UPT_MAX_ADDRESS) ||
(va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
if (va < clean_sva || va >= clean_eva) {
PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL;
}
}
}
*pte = 0;
}
pmap_unuse_pt(pmap, pv->pv_va);
pmap_unuse_pt(pmap, va);
npv = pv->pv_next;
if (npv) {
@ -1150,13 +1199,15 @@ pmap_enter(pmap, va, pa, prot, wired)
va = i386_trunc_page(va);
pa = i386_trunc_page(pa);
if (va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
if (va > VM_MAX_KERNEL_ADDRESS)
panic("pmap_enter: toobig");
/*
* Page Directory table entry not valid, we need a new PT page
*/
if ( *pmap_pde(pmap, va) == 0) {
pg("ptdi %x, va %x", pmap->pm_pdir[PTDPTDI], va);
if (*pmap_pde(pmap, va) == 0) {
printf("kernel page directory invalid pdir=0x%x, va=0x%x\n", pmap->pm_pdir[PTDPTDI], va);
panic("invalid kernel page directory");
}
pte = pmap_pte(pmap, va);
@ -1315,7 +1366,7 @@ pmap_qremove(va, count)
{
int i;
register pt_entry_t *pte;
for(i=0;i<count;i++) {
for (i=0;i<count;i++) {
pte = vtopte(va + i * NBPG);
*pte = 0;
}
@ -1371,7 +1422,7 @@ pmap_kremove( va)
* but is *MUCH* faster than pmap_enter...
*/
static inline int
static inline void
pmap_enter_quick(pmap, va, pa)
register pmap_t pmap;
vm_offset_t va;
@ -1380,7 +1431,6 @@ pmap_enter_quick(pmap, va, pa)
register pt_entry_t *pte;
register pv_entry_t pv, npv;
int s;
int anyvalid = 0;
/*
* Enter on the PV list if part of our managed memory
@ -1389,7 +1439,9 @@ pmap_enter_quick(pmap, va, pa)
*/
pte = vtopte(va);
if (pmap_pte_pa(pte)) {
/* a fault on the page table might occur here */
if (*pte) {
pmap_remove(pmap, va, va + PAGE_SIZE);
}
@ -1399,8 +1451,8 @@ pmap_enter_quick(pmap, va, pa)
* No entries yet, use header as the first entry
*/
if (pv->pv_pmap == NULL) {
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_va = va;
pv->pv_next = NULL;
}
/*
@ -1423,14 +1475,12 @@ pmap_enter_quick(pmap, va, pa)
*/
pmap->pm_stats.resident_count++;
if (*pte)
anyvalid++;
/*
* Now validate mapping with desired protection/wiring.
*/
*pte = (pt_entry_t) ( (int) (pa | PG_V | PG_u));
return (anyvalid);
return;
}
/*
@ -1446,12 +1496,10 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
vm_offset_t offset;
vm_offset_t size;
{
vm_offset_t tmpoff;
vm_page_t p;
vm_offset_t v;
vm_offset_t objbytes;
int anyvalid = 0;
int bits;
if (!pmap)
return;
@ -1460,7 +1508,7 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
* if we are processing a major portion of the object, then
* scan the entire thing.
*/
if( size > object->size / 2) {
if (size > (object->size >> 1)) {
objbytes = size;
p = object->memq.tqh_first;
while ((p != NULL) && (objbytes != 0)) {
@ -1475,12 +1523,12 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
continue;
}
if ((p->flags & (PG_BUSY|PG_FICTITIOUS|PG_FAKE)) == 0 ) {
if ((p->bmapped == 0) &&
(p->busy == 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY|PG_FICTITIOUS|PG_CACHE)) == 0 ) {
vm_page_hold(p);
v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff)));
/* a fault might occur here */
*(volatile char *)v += 0;
anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
p = p->listq.tqe_next;
@ -1490,23 +1538,20 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
/*
* else lookup the pages one-by-one.
*/
for(tmpoff = 0; tmpoff < size; tmpoff += NBPG) {
for (tmpoff = 0; tmpoff < size; tmpoff += NBPG) {
p = vm_page_lookup(object, tmpoff + offset);
if (p) {
if( (p->flags & (PG_BUSY|PG_FICTITIOUS|PG_FAKE)) == 0) {
if ((p->bmapped == 0) &&
(p->busy == 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY|PG_FICTITIOUS|PG_CACHE)) == 0) {
vm_page_hold(p);
v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff)));
/* a fault might occur here */
*(volatile char *)v += 0;
anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
}
}
}
if (anyvalid)
pmap_update();
}
/*
@ -1685,7 +1730,7 @@ pmap_page_exists(pmap, pa)
* note that the testbit/changebit routines are inline,
* and a lot of things compile-time evaluate.
*/
static inline boolean_t
boolean_t
pmap_testbit(pa, bit)
register vm_offset_t pa;
int bit;
@ -1749,7 +1794,7 @@ pmap_testbit(pa, bit)
/*
* this routine is used to modify bits in ptes
*/
static inline void
void
pmap_changebit(pa, bit, setem)
vm_offset_t pa;
int bit;
@ -1816,70 +1861,6 @@ pmap_page_protect(phys, prot)
}
}
/*
* Clear the modify bits on the specified physical page.
*/
void
pmap_clear_modify(pa)
vm_offset_t pa;
{
pmap_changebit(pa, PG_M, FALSE);
}
/*
* pmap_clear_reference:
*
* Clear the reference bit on the specified physical page.
*/
void
pmap_clear_reference(pa)
vm_offset_t pa;
{
pmap_changebit(pa, PG_U, FALSE);
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page is referenced
* by any physical maps.
*/
boolean_t
pmap_is_referenced(pa)
vm_offset_t pa;
{
return(pmap_testbit(pa, PG_U));
}
/*
* pmap_is_modified:
*
* Return whether or not the specified physical page is modified
* by any physical maps.
*/
boolean_t
pmap_is_modified(pa)
vm_offset_t pa;
{
return(pmap_testbit(pa, PG_M));
}
/*
* Routine: pmap_copy_on_write
* Function:
* Remove write privileges from all
* physical maps for this physical page.
*/
void
pmap_copy_on_write(pa)
vm_offset_t pa;
{
pmap_changebit(pa, PG_RW, FALSE);
}
vm_offset_t
pmap_phys_address(ppn)
int ppn;

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.41 1994/10/30 20:25:21 bde Exp $
* $Id: trap.c,v 1.42 1994/12/24 07:22:58 bde Exp $
*/
/*
@ -421,8 +421,6 @@ trap_pfault(frame, usermode)
*(volatile char *)v += 0;
ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
if( ptepg->hold_count == 0)
ptepg->act_count += 3;
vm_page_hold(ptepg);
/* Fault in the user page: */

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.28 1994/09/02 04:12:07 davidg Exp $
* $Id: vm_machdep.c,v 1.29 1994/10/08 22:19:51 phk Exp $
*/
#include "npx.h"
@ -321,6 +321,8 @@ vm_bounce_alloc(bp)
pa = pmap_kextract(va);
if (pa >= SIXTEENMEG)
++dobounceflag;
if( pa == 0)
panic("vm_bounce_alloc: Unmapped page");
va += NBPG;
}
if (dobounceflag == 0)
@ -492,11 +494,14 @@ vm_bounce_init()
if (!bounceallocarray)
panic("Cannot allocate bounce resource array\n");
bzero(bounceallocarray, bounceallocarraysize * sizeof(unsigned));
bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
if (!bouncepa)
panic("Cannot allocate physical memory array\n");
for(i=0;i<bounceallocarraysize;i++) {
bounceallocarray[i] = 0xffffffff;
}
for(i=0;i<bouncepages;i++) {
vm_offset_t pa;
if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG)
@ -504,6 +509,7 @@ vm_bounce_init()
if( pa == 0)
panic("bounce memory not resident");
bouncepa[i] = pa;
bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int))));
}
bouncefree = bouncepages;
@ -603,9 +609,9 @@ cpu_wait(p) struct proc *p; {
/* extern vm_map_t upages_map; */
/* drop per-process resources */
pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
pmap_remove(vm_map_pmap(u_map), (vm_offset_t) p->p_addr,
((vm_offset_t) p->p_addr) + ctob(UPAGES));
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
vmspace_free(p->p_vmspace);
}

View File

@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.18 1994/11/14 14:12:24 bde Exp $
* $Id: pmap.h,v 1.19 1994/12/18 03:11:46 davidg Exp $
*/
#ifndef _MACHINE_PMAP_H_
@ -62,7 +62,11 @@ typedef unsigned int *pt_entry_t;
* given to the user (NUPDE)
*/
#ifndef NKPT
#if 0
#define NKPT 26 /* actual number of kernel page tables */
#else
#define NKPT 9 /* actual number of kernel page tables */
#endif
#endif
#ifndef NKPDE
#define NKPDE 63 /* addressable number of page tables/pde's */
@ -126,6 +130,43 @@ pmap_kextract(vm_offset_t va)
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
return pa;
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page was referenced
* by any physical maps.
*/
#define pmap_is_referenced(pa) pmap_testbit((pa), PG_U)
/*
* pmap_is_modified:
*
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
#define pmap_is_modified(pa) pmap_testbit((pa), PG_M)
/*
* Clear the modify bits on the specified physical page.
*/
#define pmap_clear_modify(pa) pmap_changebit((pa), PG_M, FALSE)
/*
* pmap_clear_reference:
*
* Clear the reference bit on the specified physical page.
*/
#define pmap_clear_reference(pa) pmap_changebit((pa), PG_U, FALSE)
/*
* Routine: pmap_copy_on_write
* Function:
* Remove write privileges from all
* physical maps for this physical page.
*/
#define pmap_copy_on_write(pa) pmap_changebit((pa), PG_RW, FALSE)
#endif
/*
@ -196,6 +237,7 @@ pv_entry_t pv_table; /* array of entries, one per page */
struct pcb;
void pmap_activate __P((pmap_t, struct pcb *));
void pmap_changebit __P((vm_offset_t, int, boolean_t));
pmap_t pmap_kernel __P((void));
boolean_t pmap_page_exists __P((pmap_t, vm_offset_t));
pt_entry_t *pmap_pte(pmap_t, vm_offset_t);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
* $Id: vmparam.h,v 1.15 1994/09/12 11:38:20 davidg Exp $
* $Id: vmparam.h,v 1.16 1994/09/23 07:00:12 davidg Exp $
*/
@ -124,11 +124,11 @@
#define UPDT VM_MIN_KERNEL_ADDRESS
#define KPT_MIN_ADDRESS ((vm_offset_t)KERNBASE - NBPG*(NKPDE+1))
#define KPT_MAX_ADDRESS ((vm_offset_t)KERNBASE - NBPG)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)KERNBASE + NKPT*NBPG*NPTEPG)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)KERNBASE + NKPDE*NBPG*NPTEPG)
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
#define VM_KMEM_SIZE (16 * 1024 * 1024)
#define VM_KMEM_SIZE (32 * 1024 * 1024)
#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
#endif /* _MACHINE_VMPARAM_H_ */

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)param.c 8.2 (Berkeley) 1/21/94
* $Id: param.c,v 1.3 1994/08/02 07:38:30 davidg Exp $
* $Id: param.c,v 1.4 1994/09/13 14:46:24 dfr Exp $
*/
#include <sys/param.h>
@ -81,14 +81,9 @@ int tick = 1000000 / HZ;
int tickadj = 30000 / (60 * HZ); /* can adjust 30ms in 60s */
struct timezone tz = { TIMEZONE, DST };
#define NPROC (20 + 16 * MAXUSERS)
int maxproc = NPROC;
#define NTEXT NPROC
int vm_cache_max = NTEXT/2 + 16;
#define NVNODE (NPROC + NTEXT + 100)
int desiredvnodes = NVNODE;
int maxfiles = 3 * (NPROC + MAXUSERS) + 80;
int ncallout = 16 + NPROC;
int nclist = 60 + 12 * MAXUSERS;
int maxproc = NPROC; /* maximum # of processes */
int maxfiles = 256; /* open files per process limit */
int ncallout = 16 + NPROC; /* maximum # of timer events */
int nmbclusters = NMBCLUSTERS;
int fscale = FSCALE; /* kernel uses `FSCALE', user uses `fscale' */

View File

@ -1,4 +1,4 @@
/* $Id: msdosfs_denode.c,v 1.5 1994/12/12 12:35:43 bde Exp $ */
/* $Id: msdosfs_denode.c,v 1.6 1994/12/27 12:37:35 bde Exp $ */
/* $NetBSD: msdosfs_denode.c,v 1.9 1994/08/21 18:44:00 ws Exp $ */
/*-
@ -477,7 +477,7 @@ detrunc(dep, length, flags, cred, p)
#endif
return error;
}
vnode_pager_uncache(DETOV(dep)); /* what's this for? */
/* vnode_pager_uncache(DETOV(dep)); /* what's this for? */
/*
* is this the right place for it?
*/

View File

@ -1,4 +1,4 @@
/* $Id: msdosfs_vnops.c,v 1.10 1994/12/12 12:35:50 bde Exp $ */
/* $Id: msdosfs_vnops.c,v 1.11 1994/12/27 12:37:36 bde Exp $ */
/* $NetBSD: msdosfs_vnops.c,v 1.20 1994/08/21 18:44:13 ws Exp $ */
/*-
@ -704,7 +704,6 @@ msdosfs_write(ap)
dep->de_FileSize = uio->uio_offset + n;
vnode_pager_setsize(vp, dep->de_FileSize); /* why? */
}
(void) vnode_pager_uncache(vp); /* why not? */
/*
* Should these vnode_pager_* functions be done on dir
* files?
@ -725,7 +724,6 @@ msdosfs_write(ap)
if (ioflag & IO_SYNC)
(void) bwrite(bp);
else if (n + croffset == pmp->pm_bpcluster) {
bp->b_flags |= B_AGE;
bawrite(bp);
} else
bdwrite(bp);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_bmap.c 8.6 (Berkeley) 1/21/94
* $Id: ufs_bmap.c,v 1.3 1994/08/02 07:54:52 davidg Exp $
* $Id: ufs_bmap.c,v 1.4 1994/10/08 06:57:21 phk Exp $
*/
#include <sys/param.h>
@ -128,12 +128,12 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
if (runp) {
/*
* XXX
* If MAXBSIZE is the largest transfer the disks can handle,
* If MAXPHYS is the largest transfer the disks can handle,
* we probably want maxrun to be 1 block less so that we
* don't create a block larger than the device can handle.
*/
*runp = 0;
maxrun = MAXBSIZE / mp->mnt_stat.f_iosize - 1;
maxrun = MAXPHYS / mp->mnt_stat.f_iosize - 1;
}
xap = ap == NULL ? a : ap;
@ -179,7 +179,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
xap->in_exists = 1;
bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
if (bp->b_flags & B_CACHE) {
trace(TR_BREADHIT, pack(vp, size), metalbn);
}
#ifdef DIAGNOSTIC
@ -190,6 +190,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
trace(TR_BREADMISS, pack(vp, size), metalbn);
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
VOP_STRATEGY(bp);
curproc->p_stats->p_ru.ru_inblock++; /* XXX */
error = biowait(bp);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_bmap.c 8.6 (Berkeley) 1/21/94
* $Id: ufs_bmap.c,v 1.3 1994/08/02 07:54:52 davidg Exp $
* $Id: ufs_bmap.c,v 1.4 1994/10/08 06:57:21 phk Exp $
*/
#include <sys/param.h>
@ -128,12 +128,12 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
if (runp) {
/*
* XXX
* If MAXBSIZE is the largest transfer the disks can handle,
* If MAXPHYS is the largest transfer the disks can handle,
* we probably want maxrun to be 1 block less so that we
* don't create a block larger than the device can handle.
*/
*runp = 0;
maxrun = MAXBSIZE / mp->mnt_stat.f_iosize - 1;
maxrun = MAXPHYS / mp->mnt_stat.f_iosize - 1;
}
xap = ap == NULL ? a : ap;
@ -179,7 +179,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
xap->in_exists = 1;
bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
if (bp->b_flags & B_CACHE) {
trace(TR_BREADHIT, pack(vp, size), metalbn);
}
#ifdef DIAGNOSTIC
@ -190,6 +190,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
trace(TR_BREADMISS, pack(vp, size), metalbn);
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
VOP_STRATEGY(bp);
curproc->p_stats->p_ru.ru_inblock++; /* XXX */
error = biowait(bp);

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.98 1994/12/11 03:33:58 davidg Exp $
* $Id: machdep.c,v 1.99 1995/01/05 19:51:14 se Exp $
*/
#include "npx.h"
@ -114,17 +114,12 @@ char cpu_model[sizeof("Cy486DLC") + 1];
/*
* Declare these as initialized data so we can patch them.
*/
int nswbuf = 0;
int nswbuf = 128;
#ifdef NBUF
int nbuf = NBUF;
#else
int nbuf = 0;
#endif
#ifdef BUFPAGES
int bufpages = BUFPAGES;
#else
int bufpages = 0;
#endif
#ifdef BOUNCE_BUFFERS
extern char *bouncememory;
@ -170,9 +165,7 @@ cpu_startup()
vm_offset_t maxaddr;
vm_size_t size = 0;
int firstaddr;
#ifdef BOUNCE_BUFFERS
vm_offset_t minaddr;
#endif /* BOUNCE_BUFFERS */
if (boothowto & RB_VERBOSE)
bootverbose++;
@ -261,33 +254,11 @@ cpu_startup()
valloc(msghdrs, struct msg, msginfo.msgtql);
valloc(msqids, struct msqid_ds, msginfo.msgmni);
#endif
/*
* Determine how many buffers to allocate.
* Use 20% of memory of memory beyond the first 2MB
* Insure a minimum of 16 fs buffers.
* We allocate 1/2 as many swap buffer headers as file i/o buffers.
*/
if (bufpages == 0)
bufpages = ((physmem << PGSHIFT) - 2048*1024) / NBPG / 6;
if (bufpages < 64)
bufpages = 64;
/*
* We must still limit the maximum number of buffers to be no
* more than 750 because we'll run out of kernel VM otherwise.
*/
bufpages = min(bufpages, 1500);
if (nbuf == 0) {
nbuf = bufpages / 2;
if (nbuf < 32)
nbuf = 32;
}
freebufspace = bufpages * NBPG;
if (nswbuf == 0) {
nswbuf = (nbuf / 2) &~ 1; /* force even */
if (nswbuf > 64)
nswbuf = 64; /* sanity */
}
if (nbuf == 0)
nbuf = min(physmem / 30, 256);
nswbuf = nbuf;
valloc(swbuf, struct buf, nswbuf);
valloc(buf, struct buf, nbuf);
@ -296,8 +267,10 @@ cpu_startup()
* If there is more than 16MB of memory, allocate some bounce buffers
*/
if (Maxmem > 4096) {
if (bouncepages == 0)
bouncepages = 96; /* largest physio size + extra */
if (bouncepages == 0) {
bouncepages = 64;
bouncepages += ((Maxmem - 4096) / 2048) * 32;
}
v = (caddr_t)((vm_offset_t)((vm_offset_t)v + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
valloc(bouncememory, char, bouncepages * PAGE_SIZE);
}
@ -333,6 +306,10 @@ cpu_startup()
(nbuf*MAXBSIZE), TRUE);
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*ARG_MAX), TRUE);
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(maxproc*UPAGES*PAGE_SIZE), FALSE);
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
@ -353,8 +330,6 @@ cpu_startup()
if (boothowto & RB_CONFIG)
userconfig();
printf("avail memory = %d (%d pages)\n", ptoa(cnt.v_free_count), cnt.v_free_count);
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
#ifdef BOUNCE_BUFFERS
/*
@ -744,9 +719,11 @@ boot(arghowto)
for (iter = 0; iter < 20; iter++) {
nbusy = 0;
for (bp = &buf[nbuf]; --bp >= buf; )
if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
for (bp = &buf[nbuf]; --bp >= buf; ) {
if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) {
nbusy++;
}
}
if (nbusy == 0)
break;
printf("%d ", nbusy);
@ -1642,4 +1619,3 @@ disk_externalize(int drive, void *userp, size_t *maxlen)
*maxlen -= sizeof drive;
return copyout(&drive, userp, sizeof drive);
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.38 1994/12/18 03:36:27 davidg Exp $
* $Id: pmap.c,v 1.39 1994/12/18 14:16:22 davidg Exp $
*/
/*
@ -142,8 +142,6 @@ int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
vm_offset_t vm_first_phys, vm_last_phys;
static inline boolean_t pmap_testbit();
static inline void pmap_changebit();
static inline int pmap_is_managed();
static inline void * vm_get_pmap();
static inline void vm_put_pmap();
@ -152,6 +150,7 @@ static void pmap_alloc_pv_entry();
static inline pv_entry_t get_pv_entry();
static inline void pmap_use_pt();
static inline void pmap_unuse_pt();
int nkpt;
extern vm_offset_t clean_sva, clean_eva;
@ -230,8 +229,7 @@ pmap_extract(pmap, va)
}
pa = *(int *) avtopte(va);
}
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
return pa;
return ((pa & PG_FRAME) | (va & ~PG_FRAME));
}
return 0;
@ -358,6 +356,7 @@ pmap_bootstrap(firstaddr, loadaddr)
simple_lock_init(&kernel_pmap->pm_lock);
kernel_pmap->pm_count = 1;
nkpt = NKPT;
#if BSDVM_COMPAT
/*
@ -376,10 +375,8 @@ pmap_bootstrap(firstaddr, loadaddr)
virtual_avail = va;
#endif
/*
* reserve special hunk of memory for use by bus dma as a bounce
* buffer (contiguous virtual *and* physical memory). for now,
* assume vm does not use memory beneath hole, and we know that
* the bootstrap uses top 32k of base memory. -wfj
* Reserve special hunk of memory for use by bus dma as a bounce
* buffer (contiguous virtual *and* physical memory).
*/
{
extern vm_offset_t isaphysmem;
@ -420,8 +417,7 @@ pmap_init(phys_start, phys_end)
addr = (vm_offset_t) KERNBASE + IdlePTD;
vm_object_reference(kernel_object);
(void) vm_map_find(kernel_map, kernel_object, addr,
&addr, (4 + NKPT) * NBPG, FALSE);
&addr, (4 + NKPDE) * NBPG, FALSE);
/*
* calculate the number of pv_entries needed
@ -542,7 +538,7 @@ pmap_pinit(pmap)
pmap->pm_pdir = (pd_entry_t *) vm_get_pmap();
/* wire in kernel global address entries */
bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, NKPT*PTESIZE);
bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, nkpt*PTESIZE);
/* install self-referential address mapping entry */
*(int *)(pmap->pm_pdir+PTDPTDI) =
@ -552,6 +548,65 @@ pmap_pinit(pmap)
simple_lock_init(&pmap->pm_lock);
}
/*
* grow the number of kernel page table entries, if needed
*/
vm_page_t nkpg;
vm_offset_t kernel_vm_end;
void
pmap_growkernel(vm_offset_t addr) {
struct proc *p;
struct pmap *pmap;
int s;
s = splhigh();
if (kernel_vm_end == 0) {
kernel_vm_end = KERNBASE;
nkpt = 0;
while(pdir_pde(PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
++nkpt;
}
}
addr = (addr + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
while( kernel_vm_end < addr) {
if( pdir_pde( PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
continue;
}
++nkpt;
if( !nkpg) {
nkpg = vm_page_alloc(kernel_object, 0, TRUE);
vm_page_remove(nkpg);
pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
if( !nkpg)
panic("pmap_growkernel: no memory to grow kernel");
}
pdir_pde( PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW);
nkpg = NULL;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
if( p->p_vmspace) {
pmap = &p->p_vmspace->vm_pmap;
*pmap_pde( pmap, kernel_vm_end) = pdir_pde( PTD, kernel_vm_end);
}
}
kernel_vm_end = (kernel_vm_end + NBPG*NPTEPG) & ~(NBPG*NPTEPG-1);
}
#if 0
if( !nkpg) {
nkpg = vm_page_alloc(kernel_object, 0, TRUE);
vm_page_remove(nkpg);
pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
}
#endif
splx(s);
}
/*
* Retire the given physical map from service.
* Should only be called if the map contains
@ -674,7 +729,7 @@ pmap_alloc_pv_entry()
/*
* allocate a physical page out of the vm system
*/
m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map));
m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map), TRUE);
if (m) {
int newentries;
int i;
@ -738,20 +793,18 @@ static pt_entry_t *
get_pt_entry(pmap)
pmap_t pmap;
{
pt_entry_t *ptp;
vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
/* are we current address space or kernel? */
if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) {
ptp=PTmap;
return PTmap;
}
/* otherwise, we are alternate address space */
} else {
if ( frame != ((int) APTDpde & PG_FRAME)) {
APTDpde = pmap->pm_pdir[PTDPTDI];
pmap_update();
}
ptp=APTmap;
}
return ptp;
if ( frame != ((int) APTDpde & PG_FRAME)) {
APTDpde = pmap->pm_pdir[PTDPTDI];
pmap_update();
}
return APTmap;
}
/*
@ -842,17 +895,11 @@ pmap_remove(pmap, sva, eva)
*ptq = 0;
if (pmap_is_managed(pa)) {
if ((int) oldpte & (PG_M | PG_U)) {
if ((int) oldpte & PG_M) {
if ((sva < USRSTACK || sva > UPT_MAX_ADDRESS) ||
(sva >= USRSTACK && sva < USRSTACK+(UPAGES*NBPG))) {
if (sva < clean_sva || sva >= clean_eva) {
m = PHYS_TO_VM_PAGE(pa);
if ((int) oldpte & PG_M) {
m->flags &= ~PG_CLEAN;
}
if ((int) oldpte & PG_U) {
m->flags |= PG_REFERENCED;
}
PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL;
}
}
}
@ -938,11 +985,12 @@ pmap_remove(pmap, sva, eva)
continue;
}
if ((((int) oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS))
|| (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
if (va < clean_sva || va >= clean_eva ) {
m = PHYS_TO_VM_PAGE(pa);
m->flags &= ~PG_CLEAN;
if ((int) oldpte & PG_M) {
if ((va < USRSTACK || va > UPT_MAX_ADDRESS) ||
(va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
if (va < clean_sva || va >= clean_eva) {
PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL;
}
}
}
@ -992,28 +1040,29 @@ pmap_remove_all(pa)
while (pv->pv_pmap != NULL) {
pmap = pv->pv_pmap;
ptp = get_pt_entry(pmap);
va = i386_btop(pv->pv_va);
pte = ptp + va;
va = pv->pv_va;
pte = ptp + i386_btop(va);
if (pmap_pte_w(pte))
pmap->pm_stats.wired_count--;
if ( *pte) {
if (*pte) {
pmap->pm_stats.resident_count--;
anyvalid++;
/*
* update the vm_page_t clean bit
* Update the vm_page_t clean and reference bits.
*/
if ( (m->flags & PG_CLEAN) &&
((((int) *pte) & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS))
|| (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) {
if (pv->pv_va < clean_sva || pv->pv_va >= clean_eva) {
m->flags &= ~PG_CLEAN;
if ((int) *pte & PG_M) {
if ((va < USRSTACK || va > UPT_MAX_ADDRESS) ||
(va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
if (va < clean_sva || va >= clean_eva) {
PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL;
}
}
}
*pte = 0;
}
pmap_unuse_pt(pmap, pv->pv_va);
pmap_unuse_pt(pmap, va);
npv = pv->pv_next;
if (npv) {
@ -1150,13 +1199,15 @@ pmap_enter(pmap, va, pa, prot, wired)
va = i386_trunc_page(va);
pa = i386_trunc_page(pa);
if (va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
if (va > VM_MAX_KERNEL_ADDRESS)
panic("pmap_enter: toobig");
/*
* Page Directory table entry not valid, we need a new PT page
*/
if ( *pmap_pde(pmap, va) == 0) {
pg("ptdi %x, va %x", pmap->pm_pdir[PTDPTDI], va);
if (*pmap_pde(pmap, va) == 0) {
printf("kernel page directory invalid pdir=0x%x, va=0x%x\n", pmap->pm_pdir[PTDPTDI], va);
panic("invalid kernel page directory");
}
pte = pmap_pte(pmap, va);
@ -1315,7 +1366,7 @@ pmap_qremove(va, count)
{
int i;
register pt_entry_t *pte;
for(i=0;i<count;i++) {
for (i=0;i<count;i++) {
pte = vtopte(va + i * NBPG);
*pte = 0;
}
@ -1371,7 +1422,7 @@ pmap_kremove( va)
* but is *MUCH* faster than pmap_enter...
*/
static inline int
static inline void
pmap_enter_quick(pmap, va, pa)
register pmap_t pmap;
vm_offset_t va;
@ -1380,7 +1431,6 @@ pmap_enter_quick(pmap, va, pa)
register pt_entry_t *pte;
register pv_entry_t pv, npv;
int s;
int anyvalid = 0;
/*
* Enter on the PV list if part of our managed memory
@ -1389,7 +1439,9 @@ pmap_enter_quick(pmap, va, pa)
*/
pte = vtopte(va);
if (pmap_pte_pa(pte)) {
/* a fault on the page table might occur here */
if (*pte) {
pmap_remove(pmap, va, va + PAGE_SIZE);
}
@ -1399,8 +1451,8 @@ pmap_enter_quick(pmap, va, pa)
* No entries yet, use header as the first entry
*/
if (pv->pv_pmap == NULL) {
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_va = va;
pv->pv_next = NULL;
}
/*
@ -1423,14 +1475,12 @@ pmap_enter_quick(pmap, va, pa)
*/
pmap->pm_stats.resident_count++;
if (*pte)
anyvalid++;
/*
* Now validate mapping with desired protection/wiring.
*/
*pte = (pt_entry_t) ( (int) (pa | PG_V | PG_u));
return (anyvalid);
return;
}
/*
@ -1446,12 +1496,10 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
vm_offset_t offset;
vm_offset_t size;
{
vm_offset_t tmpoff;
vm_page_t p;
vm_offset_t v;
vm_offset_t objbytes;
int anyvalid = 0;
int bits;
if (!pmap)
return;
@ -1460,7 +1508,7 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
* if we are processing a major portion of the object, then
* scan the entire thing.
*/
if( size > object->size / 2) {
if (size > (object->size >> 1)) {
objbytes = size;
p = object->memq.tqh_first;
while ((p != NULL) && (objbytes != 0)) {
@ -1475,12 +1523,12 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
continue;
}
if ((p->flags & (PG_BUSY|PG_FICTITIOUS|PG_FAKE)) == 0 ) {
if ((p->bmapped == 0) &&
(p->busy == 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY|PG_FICTITIOUS|PG_CACHE)) == 0 ) {
vm_page_hold(p);
v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff)));
/* a fault might occur here */
*(volatile char *)v += 0;
anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
p = p->listq.tqe_next;
@ -1490,23 +1538,20 @@ pmap_object_init_pt(pmap, addr, object, offset, size)
/*
* else lookup the pages one-by-one.
*/
for(tmpoff = 0; tmpoff < size; tmpoff += NBPG) {
for (tmpoff = 0; tmpoff < size; tmpoff += NBPG) {
p = vm_page_lookup(object, tmpoff + offset);
if (p) {
if( (p->flags & (PG_BUSY|PG_FICTITIOUS|PG_FAKE)) == 0) {
if ((p->bmapped == 0) &&
(p->busy == 0) &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY|PG_FICTITIOUS|PG_CACHE)) == 0) {
vm_page_hold(p);
v = i386_trunc_page(((vm_offset_t)vtopte( addr+tmpoff)));
/* a fault might occur here */
*(volatile char *)v += 0;
anyvalid += pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
pmap_enter_quick(pmap, addr+tmpoff, VM_PAGE_TO_PHYS(p));
vm_page_unhold(p);
}
}
}
}
if (anyvalid)
pmap_update();
}
/*
@ -1685,7 +1730,7 @@ pmap_page_exists(pmap, pa)
* note that the testbit/changebit routines are inline,
* and a lot of things compile-time evaluate.
*/
static inline boolean_t
boolean_t
pmap_testbit(pa, bit)
register vm_offset_t pa;
int bit;
@ -1749,7 +1794,7 @@ pmap_testbit(pa, bit)
/*
* this routine is used to modify bits in ptes
*/
static inline void
void
pmap_changebit(pa, bit, setem)
vm_offset_t pa;
int bit;
@ -1816,70 +1861,6 @@ pmap_page_protect(phys, prot)
}
}
/*
* Clear the modify bits on the specified physical page.
*/
void
pmap_clear_modify(pa)
vm_offset_t pa;
{
pmap_changebit(pa, PG_M, FALSE);
}
/*
* pmap_clear_reference:
*
* Clear the reference bit on the specified physical page.
*/
void
pmap_clear_reference(pa)
vm_offset_t pa;
{
pmap_changebit(pa, PG_U, FALSE);
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page is referenced
* by any physical maps.
*/
boolean_t
pmap_is_referenced(pa)
vm_offset_t pa;
{
return(pmap_testbit(pa, PG_U));
}
/*
* pmap_is_modified:
*
* Return whether or not the specified physical page is modified
* by any physical maps.
*/
boolean_t
pmap_is_modified(pa)
vm_offset_t pa;
{
return(pmap_testbit(pa, PG_M));
}
/*
* Routine: pmap_copy_on_write
* Function:
* Remove write privileges from all
* physical maps for this physical page.
*/
void
pmap_copy_on_write(pa)
vm_offset_t pa;
{
pmap_changebit(pa, PG_RW, FALSE);
}
vm_offset_t
pmap_phys_address(ppn)
int ppn;

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.41 1994/10/30 20:25:21 bde Exp $
* $Id: trap.c,v 1.42 1994/12/24 07:22:58 bde Exp $
*/
/*
@ -421,8 +421,6 @@ trap_pfault(frame, usermode)
*(volatile char *)v += 0;
ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
if( ptepg->hold_count == 0)
ptepg->act_count += 3;
vm_page_hold(ptepg);
/* Fault in the user page: */

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.28 1994/09/02 04:12:07 davidg Exp $
* $Id: vm_machdep.c,v 1.29 1994/10/08 22:19:51 phk Exp $
*/
#include "npx.h"
@ -321,6 +321,8 @@ vm_bounce_alloc(bp)
pa = pmap_kextract(va);
if (pa >= SIXTEENMEG)
++dobounceflag;
if( pa == 0)
panic("vm_bounce_alloc: Unmapped page");
va += NBPG;
}
if (dobounceflag == 0)
@ -492,11 +494,14 @@ vm_bounce_init()
if (!bounceallocarray)
panic("Cannot allocate bounce resource array\n");
bzero(bounceallocarray, bounceallocarraysize * sizeof(unsigned));
bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
if (!bouncepa)
panic("Cannot allocate physical memory array\n");
for(i=0;i<bounceallocarraysize;i++) {
bounceallocarray[i] = 0xffffffff;
}
for(i=0;i<bouncepages;i++) {
vm_offset_t pa;
if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG)
@ -504,6 +509,7 @@ vm_bounce_init()
if( pa == 0)
panic("bounce memory not resident");
bouncepa[i] = pa;
bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int))));
}
bouncefree = bouncepages;
@ -603,9 +609,9 @@ cpu_wait(p) struct proc *p; {
/* extern vm_map_t upages_map; */
/* drop per-process resources */
pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
pmap_remove(vm_map_pmap(u_map), (vm_offset_t) p->p_addr,
((vm_offset_t) p->p_addr) + ctob(UPAGES));
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
vmspace_free(p->p_vmspace);
}

View File

@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.18 1994/11/14 14:12:24 bde Exp $
* $Id: pmap.h,v 1.19 1994/12/18 03:11:46 davidg Exp $
*/
#ifndef _MACHINE_PMAP_H_
@ -62,7 +62,11 @@ typedef unsigned int *pt_entry_t;
* given to the user (NUPDE)
*/
#ifndef NKPT
#if 0
#define NKPT 26 /* actual number of kernel page tables */
#else
#define NKPT 9 /* actual number of kernel page tables */
#endif
#endif
#ifndef NKPDE
#define NKPDE 63 /* addressable number of page tables/pde's */
@ -126,6 +130,43 @@ pmap_kextract(vm_offset_t va)
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
return pa;
}
/*
* pmap_is_referenced:
*
* Return whether or not the specified physical page was referenced
* by any physical maps.
*/
#define pmap_is_referenced(pa) pmap_testbit((pa), PG_U)
/*
* pmap_is_modified:
*
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
#define pmap_is_modified(pa) pmap_testbit((pa), PG_M)
/*
* Clear the modify bits on the specified physical page.
*/
#define pmap_clear_modify(pa) pmap_changebit((pa), PG_M, FALSE)
/*
* pmap_clear_reference:
*
* Clear the reference bit on the specified physical page.
*/
#define pmap_clear_reference(pa) pmap_changebit((pa), PG_U, FALSE)
/*
* Routine: pmap_copy_on_write
* Function:
* Remove write privileges from all
* physical maps for this physical page.
*/
#define pmap_copy_on_write(pa) pmap_changebit((pa), PG_RW, FALSE)
#endif
/*
@ -196,6 +237,7 @@ pv_entry_t pv_table; /* array of entries, one per page */
struct pcb;
void pmap_activate __P((pmap_t, struct pcb *));
void pmap_changebit __P((vm_offset_t, int, boolean_t));
pmap_t pmap_kernel __P((void));
boolean_t pmap_page_exists __P((pmap_t, vm_offset_t));
pt_entry_t *pmap_pte(pmap_t, vm_offset_t);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
* $Id: vmparam.h,v 1.15 1994/09/12 11:38:20 davidg Exp $
* $Id: vmparam.h,v 1.16 1994/09/23 07:00:12 davidg Exp $
*/
@ -124,11 +124,11 @@
#define UPDT VM_MIN_KERNEL_ADDRESS
#define KPT_MIN_ADDRESS ((vm_offset_t)KERNBASE - NBPG*(NKPDE+1))
#define KPT_MAX_ADDRESS ((vm_offset_t)KERNBASE - NBPG)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)KERNBASE + NKPT*NBPG*NPTEPG)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)KERNBASE + NKPDE*NBPG*NPTEPG)
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
#define VM_KMEM_SIZE (16 * 1024 * 1024)
#define VM_KMEM_SIZE (32 * 1024 * 1024)
#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
#endif /* _MACHINE_VMPARAM_H_ */

View File

@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: kern_exec.c,v 1.9 1994/09/25 19:33:36 phk Exp $
* $Id: kern_exec.c,v 1.10 1994/10/02 17:35:13 phk Exp $
*/
#include <sys/param.h>
@ -97,7 +97,7 @@ execve(p, uap, retval)
* Allocate temporary demand zeroed space for argument and
* environment strings
*/
error = vm_allocate(kernel_map, (vm_offset_t *)&iparams->stringbase,
error = vm_allocate(exec_map, (vm_offset_t *)&iparams->stringbase,
ARG_MAX, TRUE);
if (error) {
log(LOG_WARNING, "execve: failed to allocate string space\n");
@ -127,7 +127,7 @@ execve(p, uap, retval)
error = namei(ndp);
if (error) {
vm_deallocate(kernel_map, (vm_offset_t)iparams->stringbase,
vm_deallocate(exec_map, (vm_offset_t)iparams->stringbase,
ARG_MAX);
goto exec_fail;
}
@ -296,7 +296,7 @@ execve(p, uap, retval)
/*
* free various allocated resources
*/
if (vm_deallocate(kernel_map, (vm_offset_t)iparams->stringbase, ARG_MAX))
if (vm_deallocate(exec_map, (vm_offset_t)iparams->stringbase, ARG_MAX))
panic("execve: string buffer dealloc failed (1)");
if (vm_deallocate(kernel_map, (vm_offset_t)image_header, PAGE_SIZE))
panic("execve: header dealloc failed (2)");
@ -307,7 +307,7 @@ execve(p, uap, retval)
exec_fail_dealloc:
if (iparams->stringbase && iparams->stringbase != (char *)-1)
if (vm_deallocate(kernel_map, (vm_offset_t)iparams->stringbase,
if (vm_deallocate(exec_map, (vm_offset_t)iparams->stringbase,
ARG_MAX))
panic("execve: string buffer dealloc failed (2)");
if (iparams->image_header && iparams->image_header != (char *)-1)

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
* $Id: kern_malloc.c,v 1.5 1994/10/09 07:34:56 davidg Exp $
* $Id: kern_malloc.c,v 1.6 1994/12/17 04:04:42 davidg Exp $
*/
#include <sys/param.h>
@ -365,6 +365,9 @@ kmeminit()
ERROR!_kmeminit:_MAXALLOCSAVE_too_small
#endif
npg = VM_KMEM_SIZE/ NBPG;
if( npg > cnt.v_page_count)
npg = cnt.v_page_count;
kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
(vm_size_t)(npg * sizeof(struct kmemusage)));
kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,

View File

@ -16,7 +16,7 @@
* 4. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id: kern_physio.c,v 1.7 1994/08/18 22:35:02 wollman Exp $
* $Id: kern_physio.c,v 1.8 1994/09/25 19:33:40 phk Exp $
*/
#include <sys/param.h>
@ -158,8 +158,8 @@ u_int
minphys(struct buf *bp)
{
if( bp->b_bcount > MAXBSIZE) {
bp->b_bcount = MAXBSIZE;
if( bp->b_bcount > MAXPHYS) {
bp->b_bcount = MAXPHYS;
}
return bp->b_bcount;
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)param.c 8.2 (Berkeley) 1/21/94
* $Id: param.c,v 1.3 1994/08/02 07:38:30 davidg Exp $
* $Id: param.c,v 1.4 1994/09/13 14:46:24 dfr Exp $
*/
#include <sys/param.h>
@ -81,14 +81,9 @@ int tick = 1000000 / HZ;
int tickadj = 30000 / (60 * HZ); /* can adjust 30ms in 60s */
struct timezone tz = { TIMEZONE, DST };
#define NPROC (20 + 16 * MAXUSERS)
int maxproc = NPROC;
#define NTEXT NPROC
int vm_cache_max = NTEXT/2 + 16;
#define NVNODE (NPROC + NTEXT + 100)
int desiredvnodes = NVNODE;
int maxfiles = 3 * (NPROC + MAXUSERS) + 80;
int ncallout = 16 + NPROC;
int nclist = 60 + 12 * MAXUSERS;
int maxproc = NPROC; /* maximum # of processes */
int maxfiles = 256; /* open files per process limit */
int ncallout = 16 + NPROC; /* maximum # of timer events */
int nmbclusters = NMBCLUSTERS;
int fscale = FSCALE; /* kernel uses `FSCALE', user uses `fscale' */

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.41 1994/10/30 20:25:21 bde Exp $
* $Id: trap.c,v 1.42 1994/12/24 07:22:58 bde Exp $
*/
/*
@ -421,8 +421,6 @@ trap_pfault(frame, usermode)
*(volatile char *)v += 0;
ptepg = (vm_page_t) pmap_pte_vm_page(vm_map_pmap(map), v);
if( ptepg->hold_count == 0)
ptepg->act_count += 3;
vm_page_hold(ptepg);
/* Fault in the user page: */

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,8 @@
/*-
* Copyright (c) 1993
* The Regents of the University of California. All rights reserved.
* Modifications/enhancements:
* Copyright (c) 1995 John S. Dyson. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -31,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
* $Id: vfs_cluster.c,v 1.6 1994/10/08 22:33:41 phk Exp $
* $Id: vfs_cluster.c,v 1.7 1994/12/18 03:05:49 davidg Exp $
*/
#include <sys/param.h>
@ -43,12 +45,15 @@
#include <sys/trace.h>
#include <sys/malloc.h>
#include <sys/resourcevar.h>
#include <sys/vmmeter.h>
#include <miscfs/specfs/specdev.h>
#ifdef DEBUG
#include <vm/vm.h>
#include <sys/sysctl.h>
int doreallocblks = 0;
struct ctldebug debug13 = { "doreallocblks", &doreallocblks };
struct ctldebug debug13 = {"doreallocblks", &doreallocblks};
#else
/* XXX for cluster_write */
#define doreallocblks 0
@ -57,14 +62,14 @@ struct ctldebug debug13 = { "doreallocblks", &doreallocblks };
/*
* Local declarations
*/
struct buf *cluster_newbuf __P((struct vnode *, struct buf *, long, daddr_t,
daddr_t, long, int));
struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *,
daddr_t, daddr_t, long, int, long));
void cluster_wbuild __P((struct vnode *, struct buf *, long,
daddr_t, int, daddr_t));
daddr_t, daddr_t, long, int, long));
void cluster_wbuild __P((struct vnode *, struct buf *, long, daddr_t, int, daddr_t));
struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
int totreads;
int totreadblocks;
#ifdef DIAGNOSTIC
/*
* Set to 1 if reads of block zero should cause readahead to be done.
@ -78,7 +83,8 @@ struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
* blocks from the cache. The former seems intuitive, but some quick tests
* showed that the latter performed better from a system-wide point of view.
*/
int doclusterraz = 0;
int doclusterraz = 0;
#define ISSEQREAD(vp, blk) \
(((blk) != 0 || doclusterraz) && \
((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
@ -92,17 +98,6 @@ int doclusterraz = 0;
* lastr is 0, we assume this is the first read and we'll read up to two
* blocks if they are sequential. After that, we'll do regular read ahead
* in clustered chunks.
*
* There are 4 or 5 cases depending on how you count:
* Desired block is in the cache:
* 1 Not sequential access (0 I/Os).
* 2 Access is sequential, do read-ahead (1 ASYNC).
* Desired block is not in cache:
* 3 Not sequential access (1 SYNC).
* 4 Sequential access, next block is contiguous (1 SYNC).
* 5 Sequential access, next block is not contiguous (1 SYNC, 1 ASYNC)
*
* There are potentially two buffers that require I/O.
* bp is the block requested.
* rbp is the read-ahead block.
* If either is NULL, then you don't have to do the I/O.
@ -117,156 +112,136 @@ cluster_read(vp, filesize, lblkno, size, cred, bpp)
struct buf **bpp;
{
struct buf *bp, *rbp;
daddr_t blkno, ioblkno;
daddr_t blkno, rablkno, origlblkno;
long flags;
int error, num_ra, alreadyincore;
#ifdef DIAGNOSTIC
if (size == 0)
panic("cluster_read: size = 0");
#endif
origlblkno = lblkno;
error = 0;
flags = B_READ;
/*
* get the requested block
*/
*bpp = bp = getblk(vp, lblkno, size, 0, 0);
/*
* if it is in the cache, then check to see if the reads have been
* sequential. If they have, then try some read-ahead, otherwise
* back-off on prospective read-aheads.
*/
if (bp->b_flags & B_CACHE) {
/*
* Desired block is in cache; do any readahead ASYNC.
* Case 1, 2.
*/
trace(TR_BREADHIT, pack(vp, size), lblkno);
flags |= B_ASYNC;
ioblkno = lblkno + (vp->v_ralen ? vp->v_ralen : 1);
alreadyincore = (int)incore(vp, ioblkno);
int i;
if (!ISSEQREAD(vp, origlblkno)) {
vp->v_ralen >>= 1;
return 0;
}
bp = NULL;
} else {
/* Block wasn't in cache, case 3, 4, 5. */
trace(TR_BREADMISS, pack(vp, size), lblkno);
/*
* if it isn't in the cache, then get a chunk from disk if
* sequential, otherwise just get the block.
*/
bp->b_flags |= B_READ;
ioblkno = lblkno;
alreadyincore = 0;
curproc->p_stats->p_ru.ru_inblock++; /* XXX */
lblkno += 1;
curproc->p_stats->p_ru.ru_inblock++; /* XXX */
}
/*
* XXX
* Replace 1 with a window size based on some permutation of
* maxcontig and rot_delay. This will let you figure out how
* many blocks you should read-ahead (case 2, 4, 5).
*
* If the access isn't sequential, reset the window to 1.
* Note that a read to the same block is considered sequential.
* This catches the case where the file is being read sequentially,
* but at smaller than the filesystem block size.
* if ralen is "none", then try a little
*/
if (vp->v_ralen == 0)
vp->v_ralen = 1;
/*
* assume no read-ahead
*/
alreadyincore = 1;
rablkno = lblkno;
/*
* if we have been doing sequential I/O, then do some read-ahead
*/
if (ISSEQREAD(vp, origlblkno)) {
int i;
/*
* this code makes sure that the stuff that we have read-ahead
* is still in the cache. If it isn't, we have been reading
* ahead too much, and we need to back-off, otherwise we might
* try to read more.
*/
for (i = 0; i < vp->v_ralen; i++) {
rablkno = lblkno + i;
alreadyincore = (int) incore(vp, rablkno);
if (!alreadyincore) {
if (rablkno < vp->v_maxra) {
vp->v_maxra = rablkno;
vp->v_ralen >>= 1;
alreadyincore = 1;
} else {
if (inmem(vp, rablkno))
continue;
if ((vp->v_ralen + 1) < MAXPHYS / size)
vp->v_ralen++;
}
break;
}
}
}
/*
* we now build the read-ahead buffer if it is desirable.
*/
rbp = NULL;
if (!ISSEQREAD(vp, lblkno)) {
vp->v_ralen = 0;
vp->v_maxra = lblkno;
} else if ((ioblkno + 1) * size <= filesize && !alreadyincore &&
!(error = VOP_BMAP(vp, ioblkno, NULL, &blkno, &num_ra)) &&
if (!alreadyincore &&
(rablkno + 1) * size <= filesize &&
!(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra)) &&
blkno != -1) {
/*
* Reading sequentially, and the next block is not in the
* cache. We are going to try reading ahead.
*/
if (num_ra) {
/*
* If our desired readahead block had been read
* in a previous readahead but is no longer in
* core, then we may be reading ahead too far
* or are not using our readahead very rapidly.
* In this case we scale back the window.
*/
if (!alreadyincore && ioblkno <= vp->v_maxra)
vp->v_ralen = max(vp->v_ralen >> 1, 1);
/*
* There are more sequential blocks than our current
* window allows, scale up. Ideally we want to get
* in sync with the filesystem maxcontig value.
*/
else if (num_ra > vp->v_ralen && lblkno != vp->v_lastr)
vp->v_ralen = vp->v_ralen ?
min(num_ra, vp->v_ralen << 1) : 1;
if (num_ra > vp->v_ralen)
num_ra = vp->v_ralen;
if (num_ra > vp->v_ralen)
num_ra = vp->v_ralen;
}
if (num_ra) /* case 2, 4 */
if (num_ra &&
((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_reserved)) {
rbp = cluster_rbuild(vp, filesize,
bp, ioblkno, blkno, size, num_ra, flags);
else if (ioblkno == lblkno) {
bp->b_blkno = blkno;
/* Case 5: check how many blocks to read ahead */
++ioblkno;
if ((ioblkno + 1) * size > filesize ||
incore(vp, ioblkno) || (error = VOP_BMAP(vp,
ioblkno, NULL, &blkno, &num_ra)) || blkno == -1)
goto skip_readahead;
/*
* Adjust readahead as above
*/
if (num_ra) {
if (!alreadyincore && ioblkno <= vp->v_maxra)
vp->v_ralen = max(vp->v_ralen >> 1, 1);
else if (num_ra > vp->v_ralen &&
lblkno != vp->v_lastr)
vp->v_ralen = vp->v_ralen ?
min(num_ra,vp->v_ralen<<1) : 1;
if (num_ra > vp->v_ralen)
num_ra = vp->v_ralen;
}
flags |= B_ASYNC;
if (num_ra)
rbp = cluster_rbuild(vp, filesize,
NULL, ioblkno, blkno, size, num_ra, flags);
else {
rbp = getblk(vp, ioblkno, size, 0, 0);
rbp->b_flags |= flags;
rbp->b_blkno = blkno;
}
NULL, rablkno, blkno, size, num_ra, B_READ | B_ASYNC);
} else {
/* case 2; read ahead single block */
rbp = getblk(vp, ioblkno, size, 0, 0);
rbp->b_flags |= flags;
rbp = getblk(vp, rablkno, size, 0, 0);
rbp->b_flags |= B_READ | B_ASYNC;
rbp->b_blkno = blkno;
}
if (rbp == bp) /* case 4 */
rbp = NULL;
else if (rbp) { /* case 2, 5 */
trace(TR_BREADMISSRA,
pack(vp, (num_ra + 1) * size), ioblkno);
curproc->p_stats->p_ru.ru_inblock++; /* XXX */
}
}
/* XXX Kirk, do we need to make sure the bp has creds? */
skip_readahead:
if (bp)
/*
* if the synchronous read is a cluster, handle it, otherwise do a
* simple, non-clustered read.
*/
if (bp) {
if (bp->b_flags & (B_DONE | B_DELWRI))
panic("cluster_read: DONE bp");
else
else {
vfs_busy_pages(bp, 0);
error = VOP_STRATEGY(bp);
if (rbp)
if (error || rbp->b_flags & (B_DONE | B_DELWRI)) {
vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
totreads++;
totreadblocks += bp->b_bcount / size;
curproc->p_stats->p_ru.ru_inblock++;
}
}
/*
* and if we have read-aheads, do them too
*/
if (rbp) {
if (error || (rbp->b_flags & B_CACHE)) {
rbp->b_flags &= ~(B_ASYNC | B_READ);
brelse(rbp);
} else
} else {
vfs_busy_pages(rbp, 0);
(void) VOP_STRATEGY(rbp);
/*
* Recalculate our maximum readahead
*/
if (rbp == NULL)
rbp = bp;
if (rbp)
vp->v_maxra = rbp->b_lblkno + (rbp->b_bufsize / size) - 1;
vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
totreads++;
totreadblocks += rbp->b_bcount / size;
curproc->p_stats->p_ru.ru_inblock++;
}
}
if (bp)
return(biowait(bp));
return(error);
return (biowait(bp));
return (error);
}
/*
@ -288,12 +263,12 @@ cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
struct cluster_save *b_save;
struct buf *tbp;
daddr_t bn;
int i, inc;
int i, inc, j;
#ifdef DIAGNOSTIC
if (size != vp->v_mount->mnt_stat.f_iosize)
panic("cluster_rbuild: size %d != filesize %d\n",
size, vp->v_mount->mnt_stat.f_iosize);
size, vp->v_mount->mnt_stat.f_iosize);
#endif
if (size * (lbn + run + 1) > filesize)
--run;
@ -303,97 +278,65 @@ cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
bp->b_blkno = blkno;
bp->b_flags |= flags;
}
return(bp);
}
bp = cluster_newbuf(vp, bp, flags, blkno, lbn, size, run + 1);
if (bp->b_flags & (B_DONE | B_DELWRI))
return (bp);
}
tbp = bp;
if (!tbp) {
tbp = getblk(vp, lbn, size, 0, 0);
}
if (tbp->b_flags & B_CACHE) {
return (tbp);
} else if (bp == NULL) {
tbp->b_flags |= B_ASYNC;
}
bp = getpbuf();
bp->b_flags = flags | B_CALL | B_BUSY | B_CLUSTER;
bp->b_iodone = cluster_callback;
bp->b_blkno = blkno;
bp->b_lblkno = lbn;
pbgetvp(vp, bp);
b_save = malloc(sizeof(struct buf *) * (run + 1) + sizeof(struct cluster_save),
M_SEGMENT, M_WAITOK);
b_save->bs_bufsize = b_save->bs_bcount = size;
b_save->bs_nchildren = 0;
b_save->bs_children = (struct buf **)(b_save + 1);
b_save->bs_saveaddr = bp->b_saveaddr;
bp->b_saveaddr = (caddr_t) b_save;
b_save->bs_children = (struct buf **) (b_save + 1);
bp->b_saveaddr = b_save;
bp->b_bcount = 0;
bp->b_bufsize = 0;
bp->b_npages = 0;
if (tbp->b_flags & B_VMIO)
bp->b_flags |= B_VMIO;
inc = btodb(size);
for (bn = blkno + inc, i = 1; i <= run; ++i, bn += inc) {
if (incore(vp, lbn + i)) {
if (i == 1) {
bp->b_saveaddr = b_save->bs_saveaddr;
bp->b_flags &= ~B_CALL;
bp->b_iodone = NULL;
allocbuf(bp, size);
free(b_save, M_SEGMENT);
} else
allocbuf(bp, size * i);
break;
for (bn = blkno, i = 0; i <= run; ++i, bn += inc) {
if (i != 0) {
if (inmem(vp, lbn + i)) {
break;
}
tbp = getblk(vp, lbn + i, size, 0, 0);
if ((tbp->b_flags & B_CACHE) ||
(tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO)) {
brelse(tbp);
break;
}
tbp->b_blkno = bn;
tbp->b_flags |= flags | B_READ | B_ASYNC;
} else {
tbp->b_flags |= flags | B_READ;
}
tbp = getblk(vp, lbn + i, 0, 0, 0);
/*
* getblk may return some memory in the buffer if there were
* no empty buffers to shed it to. If there is currently
* memory in the buffer, we move it down size bytes to make
* room for the valid pages that cluster_callback will insert.
* We do this now so we don't have to do it at interrupt time
* in the callback routine.
*/
if (tbp->b_bufsize != 0) {
caddr_t bdata = (char *)tbp->b_data;
if (tbp->b_bufsize + size > MAXBSIZE)
panic("cluster_rbuild: too much memory");
if (tbp->b_bufsize > size) {
/*
* XXX if the source and destination regions
* overlap we have to copy backward to avoid
* clobbering any valid pages (i.e. pagemove
* implementations typically can't handle
* overlap).
*/
bdata += tbp->b_bufsize;
while (bdata > (char *)tbp->b_data) {
bdata -= CLBYTES;
pagemove(bdata, bdata + size, CLBYTES);
}
} else
pagemove(bdata, bdata + size, tbp->b_bufsize);
}
tbp->b_blkno = bn;
tbp->b_flags |= flags | B_READ | B_ASYNC;
++b_save->bs_nchildren;
b_save->bs_children[i - 1] = tbp;
}
return(bp);
}
/*
* Either get a new buffer or grow the existing one.
*/
struct buf *
cluster_newbuf(vp, bp, flags, blkno, lblkno, size, run)
struct vnode *vp;
struct buf *bp;
long flags;
daddr_t blkno;
daddr_t lblkno;
long size;
int run;
{
if (!bp) {
bp = getblk(vp, lblkno, size, 0, 0);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
bp->b_blkno = blkno;
return(bp);
b_save->bs_children[i] = tbp;
for (j = 0; j < tbp->b_npages; j += 1) {
bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
}
bp->b_npages += tbp->b_npages;
bp->b_bcount += size;
bp->b_bufsize += size;
}
allocbuf(bp, run * size);
bp->b_blkno = blkno;
bp->b_iodone = cluster_callback;
bp->b_flags |= flags | B_CALL;
return(bp);
pmap_qenter(bp->b_data, bp->b_pages, bp->b_npages);
return (bp);
}
/*
@ -408,7 +351,6 @@ cluster_callback(bp)
{
struct cluster_save *b_save;
struct buf **bpp, *tbp;
long bsize;
caddr_t cp;
int error = 0;
@ -418,46 +360,22 @@ cluster_callback(bp)
if (bp->b_flags & B_ERROR)
error = bp->b_error;
b_save = (struct cluster_save *)(bp->b_saveaddr);
bp->b_saveaddr = b_save->bs_saveaddr;
bsize = b_save->bs_bufsize;
cp = (char *)bp->b_data + bsize;
b_save = (struct cluster_save *) (bp->b_saveaddr);
pmap_qremove(bp->b_data, bp->b_npages);
/*
* Move memory from the large cluster buffer into the component
* buffers and mark IO as done on these.
*/
for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
tbp = *bpp;
pagemove(cp, tbp->b_data, bsize);
tbp->b_bufsize += bsize;
tbp->b_bcount = bsize;
if (error) {
tbp->b_flags |= B_ERROR;
tbp->b_error = error;
}
biodone(tbp);
bp->b_bufsize -= bsize;
cp += bsize;
}
/*
* If there was excess memory in the cluster buffer,
* slide it up adjacent to the remaining valid data.
*/
if (bp->b_bufsize != bsize) {
if (bp->b_bufsize < bsize)
panic("cluster_callback: too little memory");
pagemove(cp, (char *)bp->b_data + bsize, bp->b_bufsize - bsize);
}
bp->b_bcount = bsize;
bp->b_iodone = NULL;
free(b_save, M_SEGMENT);
if (bp->b_flags & B_ASYNC)
brelse(bp);
else {
bp->b_flags &= ~B_WANTED;
wakeup((caddr_t)bp);
}
relpbuf(bp);
}
/*
@ -472,78 +390,47 @@ cluster_callback(bp)
*/
void
cluster_write(bp, filesize)
struct buf *bp;
struct buf *bp;
u_quad_t filesize;
{
struct vnode *vp;
daddr_t lbn;
int maxclen, cursize;
struct vnode *vp;
daddr_t lbn;
int maxclen, cursize;
int lblocksize;
vp = bp->b_vp;
lbn = bp->b_lblkno;
vp = bp->b_vp;
lblocksize = vp->v_mount->mnt_stat.f_iosize;
lbn = bp->b_lblkno;
/* Initialize vnode to beginning of file. */
if (lbn == 0)
vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
(bp->b_blkno != vp->v_lasta + btodb(bp->b_bcount))) {
maxclen = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize - 1;
if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
(bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
maxclen = MAXPHYS / lblocksize;
if (vp->v_clen != 0) {
/*
* Next block is not sequential.
*
*
* If we are not writing at end of file, the process
* seeked to another point in the file since its
* last write, or we have reached our maximum
* cluster size, then push the previous cluster.
* Otherwise try reallocating to make it sequential.
* seeked to another point in the file since its last
* write, or we have reached our maximum cluster size,
* then push the previous cluster. Otherwise try
* reallocating to make it sequential.
*/
cursize = vp->v_lastw - vp->v_cstart + 1;
if (!doreallocblks ||
(lbn + 1) * bp->b_bcount != filesize ||
lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
cluster_wbuild(vp, NULL, bp->b_bcount,
vp->v_cstart, cursize, lbn);
} else {
struct buf **bpp, **endbp;
struct cluster_save *buflist;
buflist = cluster_collectbufs(vp, bp);
endbp = &buflist->bs_children
[buflist->bs_nchildren - 1];
if (VOP_REALLOCBLKS(vp, buflist)) {
/*
* Failed, push the previous cluster.
*/
for (bpp = buflist->bs_children;
bpp < endbp; bpp++)
brelse(*bpp);
free(buflist, M_SEGMENT);
cluster_wbuild(vp, NULL, bp->b_bcount,
vp->v_cstart, cursize, lbn);
} else {
/*
* Succeeded, keep building cluster.
*/
for (bpp = buflist->bs_children;
bpp <= endbp; bpp++)
bdwrite(*bpp);
free(buflist, M_SEGMENT);
vp->v_lastw = lbn;
vp->v_lasta = bp->b_blkno;
return;
}
}
cluster_wbuild(vp, NULL, lblocksize,
vp->v_cstart, cursize, lbn);
}
/*
* Consider beginning a cluster.
* If at end of file, make cluster as large as possible,
* otherwise find size of existing cluster.
* Consider beginning a cluster. If at end of file, make
* cluster as large as possible, otherwise find size of
* existing cluster.
*/
if ((lbn + 1) * bp->b_bcount != filesize &&
if ((lbn + 1) * lblocksize != filesize &&
(VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) ||
bp->b_blkno == -1)) {
bp->b_blkno == -1)) {
bawrite(bp);
vp->v_clen = 0;
vp->v_lasta = bp->b_blkno;
@ -551,13 +438,13 @@ cluster_write(bp, filesize)
vp->v_lastw = lbn;
return;
}
vp->v_clen = maxclen;
if (maxclen == 0) { /* I/O not contiguous */
vp->v_clen = maxclen;
if (maxclen == 0) { /* I/O not contiguous */
vp->v_cstart = lbn + 1;
bawrite(bp);
} else { /* Wait for rest of cluster */
bawrite(bp);
} else { /* Wait for rest of cluster */
vp->v_cstart = lbn;
bdwrite(bp);
bdwrite(bp);
}
} else if (lbn == vp->v_cstart + vp->v_clen) {
/*
@ -569,8 +456,7 @@ cluster_write(bp, filesize)
vp->v_cstart = lbn + 1;
} else
/*
* In the middle of a cluster, so just delay the
* I/O for now.
* In the middle of a cluster, so just delay the I/O for now.
*/
bdwrite(bp);
vp->v_lastw = lbn;
@ -591,17 +477,17 @@ cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
long size;
daddr_t start_lbn;
int len;
daddr_t lbn;
daddr_t lbn;
{
struct cluster_save *b_save;
struct buf *bp, *tbp;
caddr_t cp;
int i, s;
caddr_t cp;
int i, j, s;
#ifdef DIAGNOSTIC
if (size != vp->v_mount->mnt_stat.f_iosize)
panic("cluster_wbuild: size %d != filesize %d\n",
size, vp->v_mount->mnt_stat.f_iosize);
size, vp->v_mount->mnt_stat.f_iosize);
#endif
redo:
while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
@ -619,104 +505,95 @@ cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
}
return;
}
bp = getblk(vp, start_lbn, size, 0, 0);
if (!(bp->b_flags & B_DELWRI)) {
tbp = getblk(vp, start_lbn, size, 0, 0);
if (!(tbp->b_flags & B_DELWRI)) {
++start_lbn;
--len;
brelse(bp);
brelse(tbp);
goto redo;
}
/*
* Extra memory in the buffer, punt on this buffer.
* XXX we could handle this in most cases, but we would have to
* push the extra memory down to after our max possible cluster
* size and then potentially pull it back up if the cluster was
* terminated prematurely--too much hassle.
* Extra memory in the buffer, punt on this buffer. XXX we could
* handle this in most cases, but we would have to push the extra
* memory down to after our max possible cluster size and then
* potentially pull it back up if the cluster was terminated
* prematurely--too much hassle.
*/
if (bp->b_bcount != bp->b_bufsize) {
if (tbp->b_bcount != tbp->b_bufsize) {
++start_lbn;
--len;
bawrite(bp);
bawrite(tbp);
goto redo;
}
--len;
b_save = malloc(sizeof(struct buf *) * len + sizeof(struct cluster_save),
bp = getpbuf();
b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save),
M_SEGMENT, M_WAITOK);
b_save->bs_bcount = bp->b_bcount;
b_save->bs_bufsize = bp->b_bufsize;
b_save->bs_nchildren = 0;
b_save->bs_children = (struct buf **)(b_save + 1);
b_save->bs_saveaddr = bp->b_saveaddr;
bp->b_saveaddr = (caddr_t) b_save;
b_save->bs_children = (struct buf **) (b_save + 1);
bp->b_saveaddr = b_save;
bp->b_bcount = 0;
bp->b_bufsize = 0;
bp->b_npages = 0;
bp->b_flags |= B_CALL;
if (tbp->b_flags & B_VMIO)
bp->b_flags |= B_VMIO;
bp->b_blkno = tbp->b_blkno;
bp->b_lblkno = tbp->b_lblkno;
bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
bp->b_iodone = cluster_callback;
cp = (char *)bp->b_data + size;
for (++start_lbn, i = 0; i < len; ++i, ++start_lbn) {
/*
* Block is not in core or the non-sequential block
* ending our cluster was part of the cluster (in which
* case we don't want to write it twice).
*/
if (!incore(vp, start_lbn) ||
(last_bp == NULL && start_lbn == lbn))
break;
pbgetvp(vp, bp);
/*
* Get the desired block buffer (unless it is the final
* sequential block whose buffer was passed in explictly
* as last_bp).
*/
if (last_bp == NULL || start_lbn != lbn) {
tbp = getblk(vp, start_lbn, size, 0, 0);
if (!(tbp->b_flags & B_DELWRI)) {
brelse(tbp);
for (i = 0; i < len; ++i, ++start_lbn) {
if (i != 0) {
/*
* Block is not in core or the non-sequential block
* ending our cluster was part of the cluster (in
* which case we don't want to write it twice).
*/
if (!(tbp = incore(vp, start_lbn)) ||
(last_bp == NULL && start_lbn == lbn))
break;
}
} else
tbp = last_bp;
++b_save->bs_nchildren;
if ((tbp->b_flags & (B_INVAL | B_BUSY | B_CLUSTEROK)) != B_CLUSTEROK)
break;
/* Move memory from children to parent */
if (tbp->b_blkno != (bp->b_blkno + btodb(bp->b_bufsize))) {
printf("Clustered Block: %lu addr %lx bufsize: %ld\n",
(u_long)bp->b_lblkno, bp->b_blkno, bp->b_bufsize);
printf("Child Block: %lu addr: %lx\n",
(u_long)tbp->b_lblkno, tbp->b_blkno);
panic("Clustered write to wrong blocks");
/*
* Get the desired block buffer (unless it is the
* final sequential block whose buffer was passed in
* explictly as last_bp).
*/
if (last_bp == NULL || start_lbn != lbn) {
tbp = getblk(vp, start_lbn, size, 0, 0);
if (!(tbp->b_flags & B_DELWRI) ||
((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) {
brelse(tbp);
break;
}
} else
tbp = last_bp;
}
pagemove(tbp->b_data, cp, size);
for (j = 0; j < tbp->b_npages; j += 1) {
bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
}
bp->b_npages += tbp->b_npages;
bp->b_bcount += size;
bp->b_bufsize += size;
tbp->b_bufsize -= size;
tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
tbp->b_flags |= B_ASYNC;
s = splbio();
reassignbuf(tbp, tbp->b_vp); /* put on clean list */
reassignbuf(tbp, tbp->b_vp); /* put on clean list */
++tbp->b_vp->v_numoutput;
splx(s);
b_save->bs_children[i] = tbp;
cp += size;
}
if (i == 0) {
/* None to cluster */
bp->b_saveaddr = b_save->bs_saveaddr;
bp->b_flags &= ~B_CALL;
bp->b_iodone = NULL;
free(b_save, M_SEGMENT);
}
b_save->bs_nchildren = i;
pmap_qenter(bp->b_data, bp->b_pages, bp->b_npages);
bawrite(bp);
if (i < len) {
len -= i + 1;
start_lbn += 1;
len -= i;
goto redo;
}
}
@ -731,17 +608,17 @@ cluster_collectbufs(vp, last_bp)
struct buf *last_bp;
{
struct cluster_save *buflist;
daddr_t lbn;
daddr_t lbn;
int i, len;
len = vp->v_lastw - vp->v_cstart + 1;
buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
M_SEGMENT, M_WAITOK);
buflist->bs_nchildren = 0;
buflist->bs_children = (struct buf **)(buflist + 1);
buflist->bs_children = (struct buf **) (buflist + 1);
for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
(void)bread(vp, lbn, last_bp->b_bcount, NOCRED,
&buflist->bs_children[i]);
(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
&buflist->bs_children[i]);
buflist->bs_children[i] = last_bp;
buflist->bs_nchildren = i + 1;
return (buflist);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
* $Id: vfs_subr.c,v 1.12 1994/10/06 21:06:37 davidg Exp $
* $Id: vfs_subr.c,v 1.13 1994/12/23 04:52:55 davidg Exp $
*/
/*
@ -63,13 +63,13 @@
#include <miscfs/specfs/specdev.h>
void insmntque __P((struct vnode *, struct mount *));
void insmntque __P((struct vnode *, struct mount *));
enum vtype iftovt_tab[16] = {
VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
};
int vttoif_tab[9] = {
int vttoif_tab[9] = {
0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
S_IFSOCK, S_IFIFO, S_IFMT,
};
@ -84,7 +84,9 @@ int vttoif_tab[9] = {
}
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
struct mntlist mountlist; /* mounted filesystem list */
struct mntlist mountlist; /* mounted filesystem list */
int desiredvnodes;
/*
* Initialize the vnode management data structures.
@ -92,6 +94,9 @@ struct mntlist mountlist; /* mounted filesystem list */
void
vntblinit()
{
extern int vm_object_cache_max;
desiredvnodes = maxproc + vm_object_cache_max;
TAILQ_INIT(&vnode_free_list);
TAILQ_INIT(&mountlist);
@ -106,9 +111,9 @@ vfs_lock(mp)
register struct mount *mp;
{
while(mp->mnt_flag & MNT_MLOCK) {
while (mp->mnt_flag & MNT_MLOCK) {
mp->mnt_flag |= MNT_MWAIT;
(void) tsleep((caddr_t)mp, PVFS, "vfslck", 0);
(void) tsleep((caddr_t) mp, PVFS, "vfslck", 0);
}
mp->mnt_flag |= MNT_MLOCK;
return (0);
@ -128,7 +133,7 @@ vfs_unlock(mp)
mp->mnt_flag &= ~MNT_MLOCK;
if (mp->mnt_flag & MNT_MWAIT) {
mp->mnt_flag &= ~MNT_MWAIT;
wakeup((caddr_t)mp);
wakeup((caddr_t) mp);
}
}
@ -141,9 +146,9 @@ vfs_busy(mp)
register struct mount *mp;
{
while(mp->mnt_flag & MNT_MPBUSY) {
while (mp->mnt_flag & MNT_MPBUSY) {
mp->mnt_flag |= MNT_MPWANT;
(void) tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbsy", 0);
(void) tsleep((caddr_t) & mp->mnt_flag, PVFS, "vfsbsy", 0);
}
if (mp->mnt_flag & MNT_UNMOUNT)
return (1);
@ -165,7 +170,7 @@ vfs_unbusy(mp)
mp->mnt_flag &= ~MNT_MPBUSY;
if (mp->mnt_flag & MNT_MPWANT) {
mp->mnt_flag &= ~MNT_MPWANT;
wakeup((caddr_t)&mp->mnt_flag);
wakeup((caddr_t) & mp->mnt_flag);
}
}
@ -173,20 +178,18 @@ void
vfs_unmountroot(rootfs)
struct mount *rootfs;
{
struct mount *mp = rootfs;
int error;
struct mount *mp = rootfs;
int error;
if (vfs_busy(mp)) {
printf("failed to unmount root\n");
return;
}
mp->mnt_flag |= MNT_UNMOUNT;
if ((error = vfs_lock(mp))) {
printf("lock of root filesystem failed (%d)\n", error);
return;
}
vnode_pager_umount(mp); /* release cached vnodes */
cache_purgevfs(mp); /* remove cache entries for this file sys */
@ -200,7 +203,6 @@ vfs_unmountroot(rootfs)
else
printf("%d)\n", error);
}
mp->mnt_flag &= ~MNT_UNMOUNT;
vfs_unbusy(mp);
}
@ -222,7 +224,6 @@ vfs_unmountall()
rootfs = mp;
continue;
}
error = dounmount(mp, MNT_FORCE, initproc);
if (error) {
printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
@ -255,7 +256,7 @@ getvfs(fsid)
mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
return (mp);
}
return ((struct mount *)0);
return ((struct mount *) 0);
}
/*
@ -266,7 +267,7 @@ getnewfsid(mp, mtype)
struct mount *mp;
int mtype;
{
static u_short xxxfs_mntid;
static u_short xxxfs_mntid;
fsid_t tfsid;
@ -297,19 +298,19 @@ vattr_null(vap)
vap->va_size = VNOVAL;
vap->va_bytes = VNOVAL;
vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
vap->va_fsid = vap->va_fileid =
vap->va_blocksize = vap->va_rdev =
vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
vap->va_flags = vap->va_gen = VNOVAL;
vap->va_fsid = vap->va_fileid =
vap->va_blocksize = vap->va_rdev =
vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
vap->va_flags = vap->va_gen = VNOVAL;
vap->va_vaflags = 0;
}
/*
* Routines having to do with the management of the vnode table.
*/
extern int (**dead_vnodeop_p)();
extern int (**dead_vnodeop_p) ();
extern void vclean();
long numvnodes;
@ -320,17 +321,16 @@ int
getnewvnode(tag, mp, vops, vpp)
enum vtagtype tag;
struct mount *mp;
int (**vops)();
int (**vops) ();
struct vnode **vpp;
{
register struct vnode *vp;
if ((vnode_free_list.tqh_first == NULL &&
numvnodes < 2 * desiredvnodes) ||
if (vnode_free_list.tqh_first == NULL ||
numvnodes < desiredvnodes) {
vp = (struct vnode *)malloc((u_long)sizeof *vp,
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *)vp, sizeof *vp);
bzero((char *) vp, sizeof *vp);
numvnodes++;
} else {
if ((vp = vnode_free_list.tqh_first) == NULL) {
@ -340,21 +340,23 @@ getnewvnode(tag, mp, vops, vpp)
}
if (vp->v_usecount)
panic("free vnode isn't");
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
/* see comment on why 0xdeadb is set at end of vgone (below) */
vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
vp->v_lease = NULL;
if (vp->v_type != VBAD)
vgone(vp);
#ifdef DIAGNOSTIC
{
int s;
if (vp->v_data)
panic("cleaned vnode isn't");
s = splbio();
if (vp->v_numoutput)
panic("Clean vnode has pending I/O's");
splx(s);
int s;
if (vp->v_data)
panic("cleaned vnode isn't");
s = splbio();
if (vp->v_numoutput)
panic("Clean vnode has pending I/O's");
splx(s);
}
#endif
vp->v_flag = 0;
@ -366,7 +368,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_cstart = 0;
vp->v_clen = 0;
vp->v_socket = 0;
vp->v_writecount = 0; /* XXX */
vp->v_writecount = 0; /* XXX */
}
vp->v_type = VNON;
cache_purge(vp);
@ -415,11 +417,9 @@ vwakeup(bp)
vp->v_numoutput--;
if (vp->v_numoutput < 0)
panic("vwakeup: neg numoutput");
if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
if (vp->v_numoutput < 0)
panic("vwakeup: neg numoutput");
if (vp->v_flag & VBWAIT) {
vp->v_flag &= ~VBWAIT;
wakeup((caddr_t)&vp->v_numoutput);
wakeup((caddr_t) & vp->v_numoutput);
}
}
}
@ -452,7 +452,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
while (blist && blist->b_lblkno < 0)
blist = blist->b_vnbufs.le_next;
if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
(flags & V_SAVEMETA))
while (blist && blist->b_lblkno < 0)
blist = blist->b_vnbufs.le_next;
@ -466,9 +466,9 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
s = splbio();
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
error = tsleep((caddr_t)bp,
slpflag | (PRIBIO + 1), "vinvalbuf",
slptimeo);
error = tsleep((caddr_t) bp,
slpflag | (PRIBIO + 1), "vinvalbuf",
slptimeo);
splx(s);
if (error)
return (error);
@ -478,9 +478,10 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
bp->b_flags |= B_BUSY;
splx(s);
/*
* XXX Since there are no node locks for NFS, I believe
* there is a slight chance that a delayed write will
* occur while sleeping just above, so check for it.
* XXX Since there are no node locks for NFS, I
* believe there is a slight chance that a delayed
* write will occur while sleeping just above, so
* check for it.
*/
if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
(void) VOP_BWRITE(bp);
@ -491,9 +492,17 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
}
}
s = splbio();
while (vp->v_numoutput > 0) {
vp->v_flag |= VBWAIT;
tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
}
splx(s);
pager = NULL;
object = (vm_object_t)vp->v_vmdata;
if( object != NULL)
object = (vm_object_t) vp->v_vmdata;
if (object != NULL)
pager = object->pager;
if (pager != NULL) {
object = vm_object_lookup(pager);
@ -506,7 +515,6 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
vm_object_deallocate(object);
}
}
if (!(flags & V_SAVEMETA) &&
(vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
panic("vinvalbuf: flush failed");
@ -564,6 +572,41 @@ brelvp(bp)
HOLDRELE(vp);
}
/*
* Associate a p-buffer with a vnode.
*/
void
pbgetvp(vp, bp)
register struct vnode *vp;
register struct buf *bp;
{
if (bp->b_vp)
panic("pbgetvp: not free");
VHOLD(vp);
bp->b_vp = vp;
if (vp->v_type == VBLK || vp->v_type == VCHR)
bp->b_dev = vp->v_rdev;
else
bp->b_dev = NODEV;
}
/*
* Disassociate a p-buffer from a vnode.
*/
void
pbrelvp(bp)
register struct buf *bp;
{
struct vnode *vp;
if (bp->b_vp == (struct vnode *) 0)
panic("brelvp: NULL");
vp = bp->b_vp;
bp->b_vp = (struct vnode *) 0;
HOLDRELE(vp);
}
/*
* Reassign a buffer from one vnode to another.
* Used to assign file specific control information
@ -586,14 +629,25 @@ reassignbuf(bp, newvp)
if (bp->b_vnbufs.le_next != NOLIST)
bufremvn(bp);
/*
* If dirty, put on list of dirty buffers;
* otherwise insert onto list of clean buffers.
* If dirty, put on list of dirty buffers; otherwise insert onto list
* of clean buffers.
*/
if (bp->b_flags & B_DELWRI)
listheadp = &newvp->v_dirtyblkhd;
else
if (bp->b_flags & B_DELWRI) {
struct buf *tbp;
tbp = newvp->v_dirtyblkhd.lh_first;
if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
bufinsvn(bp, &newvp->v_dirtyblkhd);
} else {
while (tbp->b_vnbufs.le_next && (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
tbp = tbp->b_vnbufs.le_next;
}
LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
}
} else {
listheadp = &newvp->v_cleanblkhd;
bufinsvn(bp, listheadp);
bufinsvn(bp, listheadp);
}
}
/*
@ -612,14 +666,14 @@ bdevvp(dev, vpp)
if (dev == NODEV)
return (0);
error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
if (error) {
*vpp = 0;
return (error);
}
vp = nvp;
vp->v_type = VBLK;
if ((nvp = checkalias(vp, dev, (struct mount *)0))) {
if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
vput(vp);
vp = nvp;
}
@ -665,7 +719,7 @@ checkalias(nvp, nvp_rdev, mp)
}
if (vp == NULL || vp->v_tag != VT_NON) {
MALLOC(nvp->v_specinfo, struct specinfo *,
sizeof(struct specinfo), M_VNODE, M_WAITOK);
sizeof(struct specinfo), M_VNODE, M_WAITOK);
nvp->v_rdev = nvp_rdev;
nvp->v_hashchain = vpp;
nvp->v_specnext = *vpp;
@ -702,20 +756,19 @@ vget(vp, lockflag)
{
/*
* If the vnode is in the process of being cleaned out for
* another use, we wait for the cleaning to finish and then
* return failure. Cleaning is determined either by checking
* that the VXLOCK flag is set, or that the use count is
* zero with the back pointer set to show that it has been
* removed from the free list by getnewvnode. The VXLOCK
* flag may not have been set yet because vclean is blocked in
* the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
* If the vnode is in the process of being cleaned out for another
* use, we wait for the cleaning to finish and then return failure.
* Cleaning is determined either by checking that the VXLOCK flag is
* set, or that the use count is zero with the back pointer set to
* show that it has been removed from the free list by getnewvnode.
* The VXLOCK flag may not have been set yet because vclean is blocked
* in the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
*/
if ((vp->v_flag & VXLOCK) ||
(vp->v_usecount == 0 &&
vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)) {
vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) {
vp->v_flag |= VXWANT;
(void) tsleep((caddr_t)vp, PINOD, "vget", 0);
(void) tsleep((caddr_t) vp, PINOD, "vget", 0);
return (1);
}
if (vp->v_usecount == 0)
@ -768,7 +821,7 @@ vrele(vp)
if (vp->v_usecount > 0)
return;
#ifdef DIAGNOSTIC
if (vp->v_usecount != 0 /* || vp->v_writecount != 0 */) {
if (vp->v_usecount != 0 /* || vp->v_writecount != 0 */ ) {
vprint("vrele: bad ref count", vp);
panic("vrele: ref cnt");
}
@ -813,8 +866,9 @@ holdrele(vp)
* that are found.
*/
#ifdef DIAGNOSTIC
int busyprt = 0; /* print out busy vnodes */
struct ctldebug debug1 = { "busyprt", &busyprt };
int busyprt = 0; /* print out busy vnodes */
struct ctldebug debug1 = {"busyprt", &busyprt};
#endif
int
@ -844,24 +898,24 @@ vflush(mp, skipvp, flags)
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
continue;
/*
* If WRITECLOSE is set, only flush out regular file
* vnodes open for writing.
* If WRITECLOSE is set, only flush out regular file vnodes
* open for writing.
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG))
continue;
/*
* With v_usecount == 0, all we need to do is clear
* out the vnode data structures and we are done.
* With v_usecount == 0, all we need to do is clear out the
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
vgone(vp);
continue;
}
/*
* If FORCECLOSE is set, forcibly close the vnode.
* For block or character devices, revert to an
* anonymous device. For all other files, just kill them.
* If FORCECLOSE is set, forcibly close the vnode. For block
* or character devices, revert to an anonymous device. For
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
if (vp->v_type != VBLK && vp->v_type != VCHR) {
@ -869,7 +923,7 @@ vflush(mp, skipvp, flags)
} else {
vclean(vp, 0);
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *)0);
insmntque(vp, (struct mount *) 0);
}
continue;
}
@ -895,24 +949,23 @@ vclean(vp, flags)
int active;
/*
* Check to see if the vnode is in use.
* If so we have to reference it before we clean it out
* so that its count cannot fall to zero and generate a
* race against ourselves to recycle it.
* Check to see if the vnode is in use. If so we have to reference it
* before we clean it out so that its count cannot fall to zero and
* generate a race against ourselves to recycle it.
*/
if ((active = vp->v_usecount))
VREF(vp);
/*
* Even if the count is zero, the VOP_INACTIVE routine may still
* have the object locked while it cleans it out. The VOP_LOCK
* ensures that the VOP_INACTIVE routine is done with its work.
* For active vnodes, it ensures that no other activity can
* occur while the underlying object is being cleaned out.
* Even if the count is zero, the VOP_INACTIVE routine may still have
* the object locked while it cleans it out. The VOP_LOCK ensures that
* the VOP_INACTIVE routine is done with its work. For active vnodes,
* it ensures that no other activity can occur while the underlying
* object is being cleaned out.
*/
VOP_LOCK(vp);
/*
* Prevent the vnode from being recycled or
* brought into use while we clean it out.
* Prevent the vnode from being recycled or brought into use while we
* clean it out.
*/
if (vp->v_flag & VXLOCK)
panic("vclean: deadlock");
@ -923,13 +976,13 @@ vclean(vp, flags)
if (flags & DOCLOSE)
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
/*
* Any other processes trying to obtain this lock must first
* wait for VXLOCK to clear, then call the new lock operation.
* Any other processes trying to obtain this lock must first wait for
* VXLOCK to clear, then call the new lock operation.
*/
VOP_UNLOCK(vp);
/*
* If purging an active vnode, it must be closed and
* deactivated before being reclaimed.
* If purging an active vnode, it must be closed and deactivated
* before being reclaimed.
*/
if (active) {
if (flags & DOCLOSE)
@ -952,7 +1005,7 @@ vclean(vp, flags)
vp->v_flag &= ~VXLOCK;
if (vp->v_flag & VXWANT) {
vp->v_flag &= ~VXWANT;
wakeup((caddr_t)vp);
wakeup((caddr_t) vp);
}
}
@ -968,17 +1021,17 @@ vgoneall(vp)
if (vp->v_flag & VALIASED) {
/*
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
* If a vgone (or vclean) is already in progress, wait until
* it is done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
(void) tsleep((caddr_t)vp, PINOD, "vgall", 0);
(void) tsleep((caddr_t) vp, PINOD, "vgall", 0);
return;
}
/*
* Ensure that vp will not be vgone'd while we
* are eliminating its aliases.
* Ensure that vp will not be vgone'd while we are eliminating
* its aliases.
*/
vp->v_flag |= VXLOCK;
while (vp->v_flag & VALIASED) {
@ -991,9 +1044,8 @@ vgoneall(vp)
}
}
/*
* Remove the lock so that vgone below will
* really eliminate the vnode after which time
* vgone will awaken any sleepers.
* Remove the lock so that vgone below will really eliminate
* the vnode after which time vgone will awaken any sleepers.
*/
vp->v_flag &= ~VXLOCK;
}
@ -1012,12 +1064,12 @@ vgone(vp)
struct vnode *vx;
/*
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
* If a vgone (or vclean) is already in progress, wait until it is
* done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
(void) tsleep((caddr_t)vp, PINOD, "vgone", 0);
(void) tsleep((caddr_t) vp, PINOD, "vgone", 0);
return;
}
/*
@ -1067,20 +1119,18 @@ vgone(vp)
vp->v_specinfo = NULL;
}
/*
* If it is on the freelist and not already at the head,
* move it to the head of the list. The test of the back
* pointer and the reference count of zero is because
* it will be removed from the free list by getnewvnode,
* but will not have its reference count incremented until
* after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
* So, the back pointer is explicitly set to `0xdeadb' in
* getnewvnode after removing it from the freelist to ensure
* that we do not try to move it here.
* If it is on the freelist and not already at the head, move it to
* the head of the list. The test of the back pointer and the
* reference count of zero is because it will be removed from the free
* list by getnewvnode, but will not have its reference count
* incremented until after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to close the
* previous instance of the underlying object. So, the back pointer is
* explicitly set to `0xdeadb' in getnewvnode after removing it from
* the freelist to ensure that we do not try to move it here.
*/
if (vp->v_usecount == 0 &&
vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb &&
vnode_free_list.tqh_first != vp) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@ -1141,7 +1191,7 @@ vcount(vp)
* Print out a description of a vnode.
*/
static char *typename[] =
{ "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
void
vprint(label, vp)
@ -1153,8 +1203,8 @@ vprint(label, vp)
if (label != NULL)
printf("%s: ", label);
printf("type %s, usecount %d, writecount %d, refcount %ld,",
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
buf[0] = '\0';
if (vp->v_flag & VROOT)
strcat(buf, "|VROOT");
@ -1194,16 +1244,17 @@ printlockedvnodes()
printf("Locked vnodes\n");
for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next)
vp != NULL;
vp = vp->v_mntvnodes.le_next)
if (VOP_ISLOCKED(vp))
vprint((char *)0, vp);
vprint((char *) 0, vp);
}
}
#endif
int kinfo_vdebug = 1;
int kinfo_vgetfailed;
#define KINFO_VNODESLOP 10
/*
* Dump vnode list (via sysctl).
@ -1228,7 +1279,7 @@ sysctl_vnode(where, sizep)
return (0);
}
ewhere = where + *sizep;
for (mp = mountlist.tqh_first; mp != NULL; mp = nmp) {
nmp = mp->mnt_list.tqe_next;
if (vfs_busy(mp))
@ -1236,12 +1287,12 @@ sysctl_vnode(where, sizep)
savebp = bp;
again:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
/*
* Check that the vp is still associated with
* this filesystem. RACE: could have been
* recycled onto the same filesystem.
* Check that the vp is still associated with this
* filesystem. RACE: could have been recycled onto
* the same filesystem.
*/
if (vp->v_mount != mp) {
if (kinfo_vdebug)
@ -1253,8 +1304,8 @@ sysctl_vnode(where, sizep)
*sizep = bp - where;
return (ENOMEM);
}
if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
(error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
if ((error = copyout((caddr_t) & vp, bp, VPTRSZ)) ||
(error = copyout((caddr_t) vp, bp + VPTRSZ, VNODESZ)))
return (error);
bp += VPTRSZ + VNODESZ;
}
@ -1317,16 +1368,16 @@ vfs_hang_addrlist(mp, nep, argp)
return (0);
}
i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
bzero((caddr_t)np, i);
saddr = (struct sockaddr *)(np + 1);
if ((error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen)))
np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
bzero((caddr_t) np, i);
saddr = (struct sockaddr *) (np + 1);
if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
goto out;
if (saddr->sa_len > argp->ex_addrlen)
saddr->sa_len = argp->ex_addrlen;
if (argp->ex_masklen) {
smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
if (error)
goto out;
if (smask->sa_len > argp->ex_masklen)
@ -1335,13 +1386,13 @@ vfs_hang_addrlist(mp, nep, argp)
i = saddr->sa_family;
if ((rnh = nep->ne_rtable[i]) == 0) {
/*
* Seems silly to initialize every AF when most are not
* used, do so on demand here
* Seems silly to initialize every AF when most are not used,
* do so on demand here
*/
for (dom = domains; dom; dom = dom->dom_next)
if (dom->dom_family == i && dom->dom_rtattach) {
dom->dom_rtattach((void **)&nep->ne_rtable[i],
dom->dom_rtoffset);
dom->dom_rtattach((void **) &nep->ne_rtable[i],
dom->dom_rtoffset);
break;
}
if ((rnh = nep->ne_rtable[i]) == 0) {
@ -1349,9 +1400,9 @@ vfs_hang_addrlist(mp, nep, argp)
goto out;
}
}
rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
np->netc_rnodes);
if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
np->netc_rnodes);
if (rn == 0 || np != (struct netcred *) rn) { /* already exists */
error = EPERM;
goto out;
}
@ -1370,13 +1421,13 @@ vfs_free_netcred(rn, w)
struct radix_node *rn;
caddr_t w;
{
register struct radix_node_head *rnh = (struct radix_node_head *)w;
register struct radix_node_head *rnh = (struct radix_node_head *) w;
(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
free((caddr_t)rn, M_NETADDR);
(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
free((caddr_t) rn, M_NETADDR);
return (0);
}
/*
* Free the net address hash lists that are hanging off the mount points.
*/
@ -1389,9 +1440,9 @@ vfs_free_addrlist(nep)
for (i = 0; i <= AF_MAX; i++)
if ((rnh = nep->ne_rtable[i])) {
(*rnh->rnh_walktree)(rnh, vfs_free_netcred,
(caddr_t)rnh);
free((caddr_t)rnh, M_RTABLE);
(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
(caddr_t) rnh);
free((caddr_t) rnh, M_RTABLE);
nep->ne_rtable[i] = 0;
}
}
@ -1436,8 +1487,8 @@ vfs_export_lookup(mp, nep, nam)
rnh = nep->ne_rtable[saddr->sa_family];
if (rnh != NULL) {
np = (struct netcred *)
(*rnh->rnh_matchaddr)((caddr_t)saddr,
rnh);
(*rnh->rnh_matchaddr) ((caddr_t) saddr,
rnh);
if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
np = NULL;
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
* $Id: vfs_subr.c,v 1.12 1994/10/06 21:06:37 davidg Exp $
* $Id: vfs_subr.c,v 1.13 1994/12/23 04:52:55 davidg Exp $
*/
/*
@ -63,13 +63,13 @@
#include <miscfs/specfs/specdev.h>
void insmntque __P((struct vnode *, struct mount *));
void insmntque __P((struct vnode *, struct mount *));
enum vtype iftovt_tab[16] = {
VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
};
int vttoif_tab[9] = {
int vttoif_tab[9] = {
0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
S_IFSOCK, S_IFIFO, S_IFMT,
};
@ -84,7 +84,9 @@ int vttoif_tab[9] = {
}
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
struct mntlist mountlist; /* mounted filesystem list */
struct mntlist mountlist; /* mounted filesystem list */
int desiredvnodes;
/*
* Initialize the vnode management data structures.
@ -92,6 +94,9 @@ struct mntlist mountlist; /* mounted filesystem list */
void
vntblinit()
{
extern int vm_object_cache_max;
desiredvnodes = maxproc + vm_object_cache_max;
TAILQ_INIT(&vnode_free_list);
TAILQ_INIT(&mountlist);
@ -106,9 +111,9 @@ vfs_lock(mp)
register struct mount *mp;
{
while(mp->mnt_flag & MNT_MLOCK) {
while (mp->mnt_flag & MNT_MLOCK) {
mp->mnt_flag |= MNT_MWAIT;
(void) tsleep((caddr_t)mp, PVFS, "vfslck", 0);
(void) tsleep((caddr_t) mp, PVFS, "vfslck", 0);
}
mp->mnt_flag |= MNT_MLOCK;
return (0);
@ -128,7 +133,7 @@ vfs_unlock(mp)
mp->mnt_flag &= ~MNT_MLOCK;
if (mp->mnt_flag & MNT_MWAIT) {
mp->mnt_flag &= ~MNT_MWAIT;
wakeup((caddr_t)mp);
wakeup((caddr_t) mp);
}
}
@ -141,9 +146,9 @@ vfs_busy(mp)
register struct mount *mp;
{
while(mp->mnt_flag & MNT_MPBUSY) {
while (mp->mnt_flag & MNT_MPBUSY) {
mp->mnt_flag |= MNT_MPWANT;
(void) tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbsy", 0);
(void) tsleep((caddr_t) & mp->mnt_flag, PVFS, "vfsbsy", 0);
}
if (mp->mnt_flag & MNT_UNMOUNT)
return (1);
@ -165,7 +170,7 @@ vfs_unbusy(mp)
mp->mnt_flag &= ~MNT_MPBUSY;
if (mp->mnt_flag & MNT_MPWANT) {
mp->mnt_flag &= ~MNT_MPWANT;
wakeup((caddr_t)&mp->mnt_flag);
wakeup((caddr_t) & mp->mnt_flag);
}
}
@ -173,20 +178,18 @@ void
vfs_unmountroot(rootfs)
struct mount *rootfs;
{
struct mount *mp = rootfs;
int error;
struct mount *mp = rootfs;
int error;
if (vfs_busy(mp)) {
printf("failed to unmount root\n");
return;
}
mp->mnt_flag |= MNT_UNMOUNT;
if ((error = vfs_lock(mp))) {
printf("lock of root filesystem failed (%d)\n", error);
return;
}
vnode_pager_umount(mp); /* release cached vnodes */
cache_purgevfs(mp); /* remove cache entries for this file sys */
@ -200,7 +203,6 @@ vfs_unmountroot(rootfs)
else
printf("%d)\n", error);
}
mp->mnt_flag &= ~MNT_UNMOUNT;
vfs_unbusy(mp);
}
@ -222,7 +224,6 @@ vfs_unmountall()
rootfs = mp;
continue;
}
error = dounmount(mp, MNT_FORCE, initproc);
if (error) {
printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
@ -255,7 +256,7 @@ getvfs(fsid)
mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
return (mp);
}
return ((struct mount *)0);
return ((struct mount *) 0);
}
/*
@ -266,7 +267,7 @@ getnewfsid(mp, mtype)
struct mount *mp;
int mtype;
{
static u_short xxxfs_mntid;
static u_short xxxfs_mntid;
fsid_t tfsid;
@ -297,19 +298,19 @@ vattr_null(vap)
vap->va_size = VNOVAL;
vap->va_bytes = VNOVAL;
vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
vap->va_fsid = vap->va_fileid =
vap->va_blocksize = vap->va_rdev =
vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
vap->va_flags = vap->va_gen = VNOVAL;
vap->va_fsid = vap->va_fileid =
vap->va_blocksize = vap->va_rdev =
vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
vap->va_flags = vap->va_gen = VNOVAL;
vap->va_vaflags = 0;
}
/*
* Routines having to do with the management of the vnode table.
*/
extern int (**dead_vnodeop_p)();
extern int (**dead_vnodeop_p) ();
extern void vclean();
long numvnodes;
@ -320,17 +321,16 @@ int
getnewvnode(tag, mp, vops, vpp)
enum vtagtype tag;
struct mount *mp;
int (**vops)();
int (**vops) ();
struct vnode **vpp;
{
register struct vnode *vp;
if ((vnode_free_list.tqh_first == NULL &&
numvnodes < 2 * desiredvnodes) ||
if (vnode_free_list.tqh_first == NULL ||
numvnodes < desiredvnodes) {
vp = (struct vnode *)malloc((u_long)sizeof *vp,
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *)vp, sizeof *vp);
bzero((char *) vp, sizeof *vp);
numvnodes++;
} else {
if ((vp = vnode_free_list.tqh_first) == NULL) {
@ -340,21 +340,23 @@ getnewvnode(tag, mp, vops, vpp)
}
if (vp->v_usecount)
panic("free vnode isn't");
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
/* see comment on why 0xdeadb is set at end of vgone (below) */
vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
vp->v_lease = NULL;
if (vp->v_type != VBAD)
vgone(vp);
#ifdef DIAGNOSTIC
{
int s;
if (vp->v_data)
panic("cleaned vnode isn't");
s = splbio();
if (vp->v_numoutput)
panic("Clean vnode has pending I/O's");
splx(s);
int s;
if (vp->v_data)
panic("cleaned vnode isn't");
s = splbio();
if (vp->v_numoutput)
panic("Clean vnode has pending I/O's");
splx(s);
}
#endif
vp->v_flag = 0;
@ -366,7 +368,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_cstart = 0;
vp->v_clen = 0;
vp->v_socket = 0;
vp->v_writecount = 0; /* XXX */
vp->v_writecount = 0; /* XXX */
}
vp->v_type = VNON;
cache_purge(vp);
@ -415,11 +417,9 @@ vwakeup(bp)
vp->v_numoutput--;
if (vp->v_numoutput < 0)
panic("vwakeup: neg numoutput");
if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
if (vp->v_numoutput < 0)
panic("vwakeup: neg numoutput");
if (vp->v_flag & VBWAIT) {
vp->v_flag &= ~VBWAIT;
wakeup((caddr_t)&vp->v_numoutput);
wakeup((caddr_t) & vp->v_numoutput);
}
}
}
@ -452,7 +452,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
while (blist && blist->b_lblkno < 0)
blist = blist->b_vnbufs.le_next;
if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
(flags & V_SAVEMETA))
while (blist && blist->b_lblkno < 0)
blist = blist->b_vnbufs.le_next;
@ -466,9 +466,9 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
s = splbio();
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
error = tsleep((caddr_t)bp,
slpflag | (PRIBIO + 1), "vinvalbuf",
slptimeo);
error = tsleep((caddr_t) bp,
slpflag | (PRIBIO + 1), "vinvalbuf",
slptimeo);
splx(s);
if (error)
return (error);
@ -478,9 +478,10 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
bp->b_flags |= B_BUSY;
splx(s);
/*
* XXX Since there are no node locks for NFS, I believe
* there is a slight chance that a delayed write will
* occur while sleeping just above, so check for it.
* XXX Since there are no node locks for NFS, I
* believe there is a slight chance that a delayed
* write will occur while sleeping just above, so
* check for it.
*/
if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
(void) VOP_BWRITE(bp);
@ -491,9 +492,17 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
}
}
s = splbio();
while (vp->v_numoutput > 0) {
vp->v_flag |= VBWAIT;
tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
}
splx(s);
pager = NULL;
object = (vm_object_t)vp->v_vmdata;
if( object != NULL)
object = (vm_object_t) vp->v_vmdata;
if (object != NULL)
pager = object->pager;
if (pager != NULL) {
object = vm_object_lookup(pager);
@ -506,7 +515,6 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
vm_object_deallocate(object);
}
}
if (!(flags & V_SAVEMETA) &&
(vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
panic("vinvalbuf: flush failed");
@ -564,6 +572,41 @@ brelvp(bp)
HOLDRELE(vp);
}
/*
* Associate a p-buffer with a vnode.
*/
void
pbgetvp(vp, bp)
register struct vnode *vp;
register struct buf *bp;
{
if (bp->b_vp)
panic("pbgetvp: not free");
VHOLD(vp);
bp->b_vp = vp;
if (vp->v_type == VBLK || vp->v_type == VCHR)
bp->b_dev = vp->v_rdev;
else
bp->b_dev = NODEV;
}
/*
* Disassociate a p-buffer from a vnode.
*/
void
pbrelvp(bp)
register struct buf *bp;
{
struct vnode *vp;
if (bp->b_vp == (struct vnode *) 0)
panic("brelvp: NULL");
vp = bp->b_vp;
bp->b_vp = (struct vnode *) 0;
HOLDRELE(vp);
}
/*
* Reassign a buffer from one vnode to another.
* Used to assign file specific control information
@ -586,14 +629,25 @@ reassignbuf(bp, newvp)
if (bp->b_vnbufs.le_next != NOLIST)
bufremvn(bp);
/*
* If dirty, put on list of dirty buffers;
* otherwise insert onto list of clean buffers.
* If dirty, put on list of dirty buffers; otherwise insert onto list
* of clean buffers.
*/
if (bp->b_flags & B_DELWRI)
listheadp = &newvp->v_dirtyblkhd;
else
if (bp->b_flags & B_DELWRI) {
struct buf *tbp;
tbp = newvp->v_dirtyblkhd.lh_first;
if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
bufinsvn(bp, &newvp->v_dirtyblkhd);
} else {
while (tbp->b_vnbufs.le_next && (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
tbp = tbp->b_vnbufs.le_next;
}
LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
}
} else {
listheadp = &newvp->v_cleanblkhd;
bufinsvn(bp, listheadp);
bufinsvn(bp, listheadp);
}
}
/*
@ -612,14 +666,14 @@ bdevvp(dev, vpp)
if (dev == NODEV)
return (0);
error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
if (error) {
*vpp = 0;
return (error);
}
vp = nvp;
vp->v_type = VBLK;
if ((nvp = checkalias(vp, dev, (struct mount *)0))) {
if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
vput(vp);
vp = nvp;
}
@ -665,7 +719,7 @@ checkalias(nvp, nvp_rdev, mp)
}
if (vp == NULL || vp->v_tag != VT_NON) {
MALLOC(nvp->v_specinfo, struct specinfo *,
sizeof(struct specinfo), M_VNODE, M_WAITOK);
sizeof(struct specinfo), M_VNODE, M_WAITOK);
nvp->v_rdev = nvp_rdev;
nvp->v_hashchain = vpp;
nvp->v_specnext = *vpp;
@ -702,20 +756,19 @@ vget(vp, lockflag)
{
/*
* If the vnode is in the process of being cleaned out for
* another use, we wait for the cleaning to finish and then
* return failure. Cleaning is determined either by checking
* that the VXLOCK flag is set, or that the use count is
* zero with the back pointer set to show that it has been
* removed from the free list by getnewvnode. The VXLOCK
* flag may not have been set yet because vclean is blocked in
* the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
* If the vnode is in the process of being cleaned out for another
* use, we wait for the cleaning to finish and then return failure.
* Cleaning is determined either by checking that the VXLOCK flag is
* set, or that the use count is zero with the back pointer set to
* show that it has been removed from the free list by getnewvnode.
* The VXLOCK flag may not have been set yet because vclean is blocked
* in the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
*/
if ((vp->v_flag & VXLOCK) ||
(vp->v_usecount == 0 &&
vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)) {
vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) {
vp->v_flag |= VXWANT;
(void) tsleep((caddr_t)vp, PINOD, "vget", 0);
(void) tsleep((caddr_t) vp, PINOD, "vget", 0);
return (1);
}
if (vp->v_usecount == 0)
@ -768,7 +821,7 @@ vrele(vp)
if (vp->v_usecount > 0)
return;
#ifdef DIAGNOSTIC
if (vp->v_usecount != 0 /* || vp->v_writecount != 0 */) {
if (vp->v_usecount != 0 /* || vp->v_writecount != 0 */ ) {
vprint("vrele: bad ref count", vp);
panic("vrele: ref cnt");
}
@ -813,8 +866,9 @@ holdrele(vp)
* that are found.
*/
#ifdef DIAGNOSTIC
int busyprt = 0; /* print out busy vnodes */
struct ctldebug debug1 = { "busyprt", &busyprt };
int busyprt = 0; /* print out busy vnodes */
struct ctldebug debug1 = {"busyprt", &busyprt};
#endif
int
@ -844,24 +898,24 @@ vflush(mp, skipvp, flags)
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
continue;
/*
* If WRITECLOSE is set, only flush out regular file
* vnodes open for writing.
* If WRITECLOSE is set, only flush out regular file vnodes
* open for writing.
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG))
continue;
/*
* With v_usecount == 0, all we need to do is clear
* out the vnode data structures and we are done.
* With v_usecount == 0, all we need to do is clear out the
* vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
vgone(vp);
continue;
}
/*
* If FORCECLOSE is set, forcibly close the vnode.
* For block or character devices, revert to an
* anonymous device. For all other files, just kill them.
* If FORCECLOSE is set, forcibly close the vnode. For block
* or character devices, revert to an anonymous device. For
* all other files, just kill them.
*/
if (flags & FORCECLOSE) {
if (vp->v_type != VBLK && vp->v_type != VCHR) {
@ -869,7 +923,7 @@ vflush(mp, skipvp, flags)
} else {
vclean(vp, 0);
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *)0);
insmntque(vp, (struct mount *) 0);
}
continue;
}
@ -895,24 +949,23 @@ vclean(vp, flags)
int active;
/*
* Check to see if the vnode is in use.
* If so we have to reference it before we clean it out
* so that its count cannot fall to zero and generate a
* race against ourselves to recycle it.
* Check to see if the vnode is in use. If so we have to reference it
* before we clean it out so that its count cannot fall to zero and
* generate a race against ourselves to recycle it.
*/
if ((active = vp->v_usecount))
VREF(vp);
/*
* Even if the count is zero, the VOP_INACTIVE routine may still
* have the object locked while it cleans it out. The VOP_LOCK
* ensures that the VOP_INACTIVE routine is done with its work.
* For active vnodes, it ensures that no other activity can
* occur while the underlying object is being cleaned out.
* Even if the count is zero, the VOP_INACTIVE routine may still have
* the object locked while it cleans it out. The VOP_LOCK ensures that
* the VOP_INACTIVE routine is done with its work. For active vnodes,
* it ensures that no other activity can occur while the underlying
* object is being cleaned out.
*/
VOP_LOCK(vp);
/*
* Prevent the vnode from being recycled or
* brought into use while we clean it out.
* Prevent the vnode from being recycled or brought into use while we
* clean it out.
*/
if (vp->v_flag & VXLOCK)
panic("vclean: deadlock");
@ -923,13 +976,13 @@ vclean(vp, flags)
if (flags & DOCLOSE)
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
/*
* Any other processes trying to obtain this lock must first
* wait for VXLOCK to clear, then call the new lock operation.
* Any other processes trying to obtain this lock must first wait for
* VXLOCK to clear, then call the new lock operation.
*/
VOP_UNLOCK(vp);
/*
* If purging an active vnode, it must be closed and
* deactivated before being reclaimed.
* If purging an active vnode, it must be closed and deactivated
* before being reclaimed.
*/
if (active) {
if (flags & DOCLOSE)
@ -952,7 +1005,7 @@ vclean(vp, flags)
vp->v_flag &= ~VXLOCK;
if (vp->v_flag & VXWANT) {
vp->v_flag &= ~VXWANT;
wakeup((caddr_t)vp);
wakeup((caddr_t) vp);
}
}
@ -968,17 +1021,17 @@ vgoneall(vp)
if (vp->v_flag & VALIASED) {
/*
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
* If a vgone (or vclean) is already in progress, wait until
* it is done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
(void) tsleep((caddr_t)vp, PINOD, "vgall", 0);
(void) tsleep((caddr_t) vp, PINOD, "vgall", 0);
return;
}
/*
* Ensure that vp will not be vgone'd while we
* are eliminating its aliases.
* Ensure that vp will not be vgone'd while we are eliminating
* its aliases.
*/
vp->v_flag |= VXLOCK;
while (vp->v_flag & VALIASED) {
@ -991,9 +1044,8 @@ vgoneall(vp)
}
}
/*
* Remove the lock so that vgone below will
* really eliminate the vnode after which time
* vgone will awaken any sleepers.
* Remove the lock so that vgone below will really eliminate
* the vnode after which time vgone will awaken any sleepers.
*/
vp->v_flag &= ~VXLOCK;
}
@ -1012,12 +1064,12 @@ vgone(vp)
struct vnode *vx;
/*
* If a vgone (or vclean) is already in progress,
* wait until it is done and return.
* If a vgone (or vclean) is already in progress, wait until it is
* done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
(void) tsleep((caddr_t)vp, PINOD, "vgone", 0);
(void) tsleep((caddr_t) vp, PINOD, "vgone", 0);
return;
}
/*
@ -1067,20 +1119,18 @@ vgone(vp)
vp->v_specinfo = NULL;
}
/*
* If it is on the freelist and not already at the head,
* move it to the head of the list. The test of the back
* pointer and the reference count of zero is because
* it will be removed from the free list by getnewvnode,
* but will not have its reference count incremented until
* after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
* So, the back pointer is explicitly set to `0xdeadb' in
* getnewvnode after removing it from the freelist to ensure
* that we do not try to move it here.
* If it is on the freelist and not already at the head, move it to
* the head of the list. The test of the back pointer and the
* reference count of zero is because it will be removed from the free
* list by getnewvnode, but will not have its reference count
* incremented until after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to close the
* previous instance of the underlying object. So, the back pointer is
* explicitly set to `0xdeadb' in getnewvnode after removing it from
* the freelist to ensure that we do not try to move it here.
*/
if (vp->v_usecount == 0 &&
vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb &&
vnode_free_list.tqh_first != vp) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@ -1141,7 +1191,7 @@ vcount(vp)
* Print out a description of a vnode.
*/
static char *typename[] =
{ "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
void
vprint(label, vp)
@ -1153,8 +1203,8 @@ vprint(label, vp)
if (label != NULL)
printf("%s: ", label);
printf("type %s, usecount %d, writecount %d, refcount %ld,",
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
buf[0] = '\0';
if (vp->v_flag & VROOT)
strcat(buf, "|VROOT");
@ -1194,16 +1244,17 @@ printlockedvnodes()
printf("Locked vnodes\n");
for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next)
vp != NULL;
vp = vp->v_mntvnodes.le_next)
if (VOP_ISLOCKED(vp))
vprint((char *)0, vp);
vprint((char *) 0, vp);
}
}
#endif
int kinfo_vdebug = 1;
int kinfo_vgetfailed;
#define KINFO_VNODESLOP 10
/*
* Dump vnode list (via sysctl).
@ -1228,7 +1279,7 @@ sysctl_vnode(where, sizep)
return (0);
}
ewhere = where + *sizep;
for (mp = mountlist.tqh_first; mp != NULL; mp = nmp) {
nmp = mp->mnt_list.tqe_next;
if (vfs_busy(mp))
@ -1236,12 +1287,12 @@ sysctl_vnode(where, sizep)
savebp = bp;
again:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
vp != NULL;
vp = vp->v_mntvnodes.le_next) {
/*
* Check that the vp is still associated with
* this filesystem. RACE: could have been
* recycled onto the same filesystem.
* Check that the vp is still associated with this
* filesystem. RACE: could have been recycled onto
* the same filesystem.
*/
if (vp->v_mount != mp) {
if (kinfo_vdebug)
@ -1253,8 +1304,8 @@ sysctl_vnode(where, sizep)
*sizep = bp - where;
return (ENOMEM);
}
if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
(error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
if ((error = copyout((caddr_t) & vp, bp, VPTRSZ)) ||
(error = copyout((caddr_t) vp, bp + VPTRSZ, VNODESZ)))
return (error);
bp += VPTRSZ + VNODESZ;
}
@ -1317,16 +1368,16 @@ vfs_hang_addrlist(mp, nep, argp)
return (0);
}
i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
bzero((caddr_t)np, i);
saddr = (struct sockaddr *)(np + 1);
if ((error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen)))
np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
bzero((caddr_t) np, i);
saddr = (struct sockaddr *) (np + 1);
if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
goto out;
if (saddr->sa_len > argp->ex_addrlen)
saddr->sa_len = argp->ex_addrlen;
if (argp->ex_masklen) {
smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
if (error)
goto out;
if (smask->sa_len > argp->ex_masklen)
@ -1335,13 +1386,13 @@ vfs_hang_addrlist(mp, nep, argp)
i = saddr->sa_family;
if ((rnh = nep->ne_rtable[i]) == 0) {
/*
* Seems silly to initialize every AF when most are not
* used, do so on demand here
* Seems silly to initialize every AF when most are not used,
* do so on demand here
*/
for (dom = domains; dom; dom = dom->dom_next)
if (dom->dom_family == i && dom->dom_rtattach) {
dom->dom_rtattach((void **)&nep->ne_rtable[i],
dom->dom_rtoffset);
dom->dom_rtattach((void **) &nep->ne_rtable[i],
dom->dom_rtoffset);
break;
}
if ((rnh = nep->ne_rtable[i]) == 0) {
@ -1349,9 +1400,9 @@ vfs_hang_addrlist(mp, nep, argp)
goto out;
}
}
rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
np->netc_rnodes);
if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
np->netc_rnodes);
if (rn == 0 || np != (struct netcred *) rn) { /* already exists */
error = EPERM;
goto out;
}
@ -1370,13 +1421,13 @@ vfs_free_netcred(rn, w)
struct radix_node *rn;
caddr_t w;
{
register struct radix_node_head *rnh = (struct radix_node_head *)w;
register struct radix_node_head *rnh = (struct radix_node_head *) w;
(*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
free((caddr_t)rn, M_NETADDR);
(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
free((caddr_t) rn, M_NETADDR);
return (0);
}
/*
* Free the net address hash lists that are hanging off the mount points.
*/
@ -1389,9 +1440,9 @@ vfs_free_addrlist(nep)
for (i = 0; i <= AF_MAX; i++)
if ((rnh = nep->ne_rtable[i])) {
(*rnh->rnh_walktree)(rnh, vfs_free_netcred,
(caddr_t)rnh);
free((caddr_t)rnh, M_RTABLE);
(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
(caddr_t) rnh);
free((caddr_t) rnh, M_RTABLE);
nep->ne_rtable[i] = 0;
}
}
@ -1436,8 +1487,8 @@ vfs_export_lookup(mp, nep, nam)
rnh = nep->ne_rtable[saddr->sa_family];
if (rnh != NULL) {
np = (struct netcred *)
(*rnh->rnh_matchaddr)((caddr_t)saddr,
rnh);
(*rnh->rnh_matchaddr) ((caddr_t) saddr,
rnh);
if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
np = NULL;
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
* $Id: vfs_vnops.c,v 1.5 1994/10/02 17:35:40 phk Exp $
* $Id: vfs_vnops.c,v 1.6 1994/10/05 09:48:26 davidg Exp $
*/
#include <sys/param.h>
@ -158,21 +158,26 @@ vn_open(ndp, fmode, cmode)
if( vp->v_type == VREG) {
vm_object_t object;
vm_pager_t pager;
retry:
if( (vp->v_flag & VVMIO) == 0) {
pager = (vm_pager_t) vnode_pager_alloc(vp, 0, 0, 0);
object = (vm_object_t) vp->v_vmdata;
if( object->pager != pager)
panic("ufs_open: pager/object mismatch");
panic("vn_open: pager/object mismatch");
(void) vm_object_lookup( pager);
pager_cache( object, TRUE);
vp->v_flag |= VVMIO;
} else {
object = (vm_object_t) vp->v_vmdata;
if( (object = (vm_object_t)vp->v_vmdata) &&
(object->flags & OBJ_DEAD)) {
tsleep( (caddr_t) object, PVM, "vodead", 0);
goto retry;
}
if( !object)
panic("ufs_open: VMIO object missing");
panic("vn_open: VMIO object missing");
pager = object->pager;
if( !pager)
panic("ufs_open: VMIO pager missing");
panic("vn_open: VMIO pager missing");
(void) vm_object_lookup( pager);
}
}
@ -235,11 +240,12 @@ vn_close(vp, flags, cred, p)
* be in vfs code.
*/
if (vp->v_flag & VVMIO) {
vrele(vp);
if( vp->v_vmdata == NULL)
panic("ufs_close: VMIO object missing");
panic("vn_close: VMIO object missing");
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vrele(vp);
} else
vrele(vp);
return (error);
}

View File

@ -1,4 +1,4 @@
/* $Id: msdosfs_denode.c,v 1.5 1994/12/12 12:35:43 bde Exp $ */
/* $Id: msdosfs_denode.c,v 1.6 1994/12/27 12:37:35 bde Exp $ */
/* $NetBSD: msdosfs_denode.c,v 1.9 1994/08/21 18:44:00 ws Exp $ */
/*-
@ -477,7 +477,7 @@ detrunc(dep, length, flags, cred, p)
#endif
return error;
}
vnode_pager_uncache(DETOV(dep)); /* what's this for? */
/* vnode_pager_uncache(DETOV(dep)); /* what's this for? */
/*
* is this the right place for it?
*/

View File

@ -1,4 +1,4 @@
/* $Id: msdosfs_vnops.c,v 1.10 1994/12/12 12:35:50 bde Exp $ */
/* $Id: msdosfs_vnops.c,v 1.11 1994/12/27 12:37:36 bde Exp $ */
/* $NetBSD: msdosfs_vnops.c,v 1.20 1994/08/21 18:44:13 ws Exp $ */
/*-
@ -704,7 +704,6 @@ msdosfs_write(ap)
dep->de_FileSize = uio->uio_offset + n;
vnode_pager_setsize(vp, dep->de_FileSize); /* why? */
}
(void) vnode_pager_uncache(vp); /* why not? */
/*
* Should these vnode_pager_* functions be done on dir
* files?
@ -725,7 +724,6 @@ msdosfs_write(ap)
if (ioflag & IO_SYNC)
(void) bwrite(bp);
else if (n + croffset == pmp->pm_bpcluster) {
bp->b_flags |= B_AGE;
bawrite(bp);
} else
bdwrite(bp);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.5 (Berkeley) 1/4/94
* $Id: nfs_bio.c,v 1.6 1994/10/02 17:26:55 phk Exp $
* $Id: nfs_bio.c,v 1.7 1994/10/17 17:47:32 phk Exp $
*/
#include <sys/param.h>
@ -78,7 +78,7 @@ nfs_bioread(vp, uio, ioflag, cred)
struct vattr vattr;
struct proc *p;
struct nfsmount *nmp;
daddr_t lbn, bn, rabn;
daddr_t lbn, rabn;
caddr_t baddr;
int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
@ -94,7 +94,7 @@ nfs_bioread(vp, uio, ioflag, cred)
if (uio->uio_offset < 0 && vp->v_type != VDIR)
return (EINVAL);
nmp = VFSTONFS(vp->v_mount);
biosize = nmp->nm_rsize;
biosize = NFS_MAXDGRAMDATA;
p = uio->uio_procp;
/*
* For nfs, cache consistency can only be maintained approximately.
@ -198,7 +198,6 @@ nfs_bioread(vp, uio, ioflag, cred)
nfsstats.biocache_reads++;
lbn = uio->uio_offset / biosize;
on = uio->uio_offset & (biosize-1);
bn = lbn * (biosize / DEV_BSIZE);
not_readin = 1;
/*
@ -208,15 +207,17 @@ nfs_bioread(vp, uio, ioflag, cred)
lbn == vp->v_lastr + 1) {
for (nra = 0; nra < nmp->nm_readahead &&
(lbn + 1 + nra) * biosize < np->n_size; nra++) {
rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
rabn = lbn + 1 + nra;
if (!incore(vp, rabn)) {
rabp = nfs_getcacheblk(vp, rabn, biosize, p);
if (!rabp)
return (EINTR);
if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
rabp->b_flags |= B_INVAL;
rabp->b_flags |= B_INVAL|B_ERROR;
vfs_unbusy_pages(rabp);
brelse(rabp);
}
}
@ -230,21 +231,23 @@ nfs_bioread(vp, uio, ioflag, cred)
* Otherwise, get the block and write back/read in,
* as required.
*/
if ((bp = incore(vp, bn)) &&
if ((bp = incore(vp, lbn)) &&
(bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
(B_BUSY | B_WRITEINPROG))
got_buf = 0;
else {
again:
bp = nfs_getcacheblk(vp, bn, biosize, p);
bp = nfs_getcacheblk(vp, lbn, biosize, p);
if (!bp)
return (EINTR);
got_buf = 1;
if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
bp->b_flags |= B_READ;
not_readin = 0;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
vfs_unbusy_pages(bp);
brelse(bp);
return (error);
}
@ -257,7 +260,7 @@ nfs_bioread(vp, uio, ioflag, cred)
if (not_readin && n > 0) {
if (on < bp->b_validoff || (on + n) > bp->b_validend) {
if (!got_buf) {
bp = nfs_getcacheblk(vp, bn, biosize, p);
bp = nfs_getcacheblk(vp, lbn, biosize, p);
if (!bp)
return (EINTR);
got_buf = 1;
@ -285,8 +288,11 @@ nfs_bioread(vp, uio, ioflag, cred)
return (EINTR);
if ((bp->b_flags & B_DONE) == 0) {
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
vfs_unbusy_pages(bp);
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
@ -297,14 +303,18 @@ nfs_bioread(vp, uio, ioflag, cred)
break;
case VDIR:
nfsstats.biocache_readdirs++;
bn = (daddr_t)uio->uio_offset;
bp = nfs_getcacheblk(vp, bn, NFS_DIRBLKSIZ, p);
lbn = (daddr_t)uio->uio_offset;
bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, p);
if (!bp)
return (EINTR);
if ((bp->b_flags & B_DONE) == 0) {
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
vfs_unbusy_pages(bp);
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
@ -323,8 +333,10 @@ nfs_bioread(vp, uio, ioflag, cred)
if (rabp) {
if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
rabp->b_flags |= B_INVAL;
vfs_unbusy_pages(rabp);
rabp->b_flags |= B_INVAL|B_ERROR;
brelse(rabp);
}
}
@ -385,7 +397,7 @@ nfs_write(ap)
struct buf *bp;
struct vattr vattr;
struct nfsmount *nmp;
daddr_t lbn, bn;
daddr_t lbn;
int n, on, error = 0;
#ifdef DIAGNOSTIC
@ -434,14 +446,12 @@ nfs_write(ap)
* will be the same size within a filesystem. nfs_writerpc will
* still use nm_wsize when sizing the rpc's.
*/
biosize = nmp->nm_rsize;
biosize = NFS_MAXDGRAMDATA;
do {
/*
* XXX make sure we aren't cached in the VM page cache
*/
(void)vnode_pager_uncache(vp);
/*
* Check for a valid write lease.
* If non-cachable, just do the rpc
@ -467,9 +477,8 @@ nfs_write(ap)
lbn = uio->uio_offset / biosize;
on = uio->uio_offset & (biosize-1);
n = min((unsigned)(biosize - on), uio->uio_resid);
bn = lbn * (biosize / DEV_BSIZE);
again:
bp = nfs_getcacheblk(vp, bn, biosize, p);
bp = nfs_getcacheblk(vp, lbn, biosize, p);
if (!bp)
return (EINTR);
if (bp->b_wcred == NOCRED) {
@ -591,6 +600,10 @@ nfs_getcacheblk(vp, bn, size, p)
}
} else
bp = getblk(vp, bn, size, 0, 0);
if( vp->v_type == VREG)
bp->b_blkno = (bn * NFS_MAXDGRAMDATA) / DEV_BSIZE;
return (bp);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_subs.c 8.3 (Berkeley) 1/4/94
* $Id: nfs_subs.c,v 1.6 1994/10/02 17:27:01 phk Exp $
* $Id: nfs_subs.c,v 1.7 1994/10/17 17:47:37 phk Exp $
*/
/*
@ -995,6 +995,7 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
*/
if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
cnp->cn_flags |= HASBUF;
nfsrv_vmio( ndp->ni_vp);
return (0);
}
out:
@ -1123,6 +1124,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
*rdonlyp = 0;
if (!lockflag)
VOP_UNLOCK(*vpp);
nfsrv_vmio(*vpp);
return (0);
}
@ -1168,3 +1170,54 @@ netaddr_match(family, haddr, nam)
};
return (0);
}
int
nfsrv_vmio( struct vnode *vp) {
int rtval;
vm_object_t object;
vm_pager_t pager;
if( (vp == NULL) || (vp->v_type != VREG))
return 1;
retry:
if( (vp->v_flag & VVMIO) == 0) {
pager = (vm_pager_t) vnode_pager_alloc(vp, 0, 0, 0);
object = (vm_object_t) vp->v_vmdata;
if( object->pager != pager)
panic("nfsrv_vmio: pager/object mismatch");
(void) vm_object_lookup( pager);
pager_cache( object, TRUE);
vp->v_flag |= VVMIO;
} else {
if( (object = (vm_object_t)vp->v_vmdata) &&
(object->flags & OBJ_DEAD)) {
tsleep( (caddr_t) object, PVM, "nfdead", 0);
goto retry;
}
if( !object)
panic("nfsrv_vmio: VMIO object missing");
pager = object->pager;
if( !pager)
panic("nfsrv_vmio: VMIO pager missing");
(void) vm_object_lookup( pager);
}
return 0;
}
int
nfsrv_vput( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vput( vp);
return 0;
}
int
nfsrv_vrele( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vrele( vp);
return 0;
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_nqlease.c 8.3 (Berkeley) 1/4/94
* $Id: nfs_nqlease.c,v 1.6 1994/10/02 17:26:57 phk Exp $
* $Id: nfs_nqlease.c,v 1.7 1994/10/17 17:47:34 phk Exp $
*/
/*
@ -715,7 +715,7 @@ nqnfsrv_getlease(nfsd, mrep, md, dpos, cred, nam, mrq)
(void) nqsrv_getlease(vp, &nfsd->nd_duration, flags, nfsd,
nam, &cache, &frev, cred);
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_NQFATTR + 4*NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 4*NFSX_UNSIGNED);
*tl++ = txdr_unsigned(cache);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_serv.c 8.3 (Berkeley) 1/12/94
* $Id: nfs_serv.c,v 1.6 1994/09/28 16:45:18 dfr Exp $
* $Id: nfs_serv.c,v 1.7 1994/10/02 17:26:58 phk Exp $
*/
/*
@ -121,7 +121,7 @@ nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
if (*tl == nfs_true)
mode |= VEXEC;
error = nfsrv_access(vp, mode, cred, rdonly, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
nfsm_srvdone;
}
@ -158,7 +158,7 @@ nfsrv_getattr(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsm_reply(0);
nqsrv_getl(vp, NQL_READ);
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfillattr;
@ -255,12 +255,12 @@ nfsrv_setattr(nfsd, mrep, md, dpos, cred, nam, mrq)
}
error = VOP_SETATTR(vp, vap, cred, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
out:
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL) + 2*NFSX_UNSIGNED);
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfillattr;
@ -314,21 +314,21 @@ nfsrv_lookup(nfsd, mrep, md, dpos, cred, nam, mrq)
if (error)
nfsm_reply(0);
nqsrv_getl(nd.ni_startdir, NQL_READ);
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
FREE(nd.ni_cnd.cn_pnbuf, M_NAMEI);
vp = nd.ni_vp;
bzero((caddr_t)fhp, sizeof(nfh));
fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
error = VFS_VPTOFH(vp, &fhp->fh_fid);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
if (duration2)
(void) nqsrv_getlease(vp, &duration2, NQL_READ, nfsd,
nam, &cache2, &frev2, cred);
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FH + NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL) + 5*NFSX_UNSIGNED);
if (nfsd->nd_nqlflag != NQL_NOVAL) {
if (duration2) {
@ -417,7 +417,7 @@ nfsrv_readlink(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(vp, NQL_READ);
error = VOP_READLINK(vp, uiop, cred);
out:
vput(vp);
nfsrv_vput(vp);
if (error)
m_freem(mp3);
nfsm_reply(NFSX_UNSIGNED);
@ -488,7 +488,7 @@ nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
if (off >= vap->va_size)
@ -539,12 +539,12 @@ nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
FREE((caddr_t)iv2, M_TEMP);
if (error || (error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp))) {
m_freem(mreq);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
} else
uiop->uio_resid = 0;
vput(vp);
nfsrv_vput(vp);
nfsm_srvfillattr;
len -= uiop->uio_resid;
tlen = nfsm_rndup(len);
@ -619,13 +619,13 @@ nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsm_reply(0);
if (vp->v_type != VREG) {
error = (vp->v_type == VDIR) ? EISDIR : EACCES;
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
nqsrv_getl(vp, NQL_WRITE);
error = nfsrv_access(vp, VWRITE, cred, rdonly, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
uiop->uio_resid = 0;
@ -663,19 +663,19 @@ nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
}
if (len > 0 && mp == NULL) {
error = EBADRPC;
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
uiop->uio_resid = siz;
error = VOP_WRITE(vp, uiop, ioflags, cred);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
off = uiop->uio_offset;
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfillattr;
@ -743,7 +743,7 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
else
rdev = fxdr_unsigned(long, sp->sa_nqrdev);
if (vap->va_type == VREG || vap->va_type == VSOCK) {
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
nqsrv_getl(nd.ni_dvp, NQL_WRITE);
error=VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap);
if (error)
@ -758,7 +758,7 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
error = suser(cred, (u_short *)0);
if (error) {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
goto out;
} else
vap->va_rdev = (dev_t)rdev;
@ -766,7 +766,7 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(nd.ni_dvp, NQL_WRITE);
error=VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap);
if (error) {
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
nfsm_reply(0);
}
nd.ni_cnd.cn_nameiop = LOOKUP;
@ -780,27 +780,27 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
}
FREE(nd.ni_cnd.cn_pnbuf, M_NAMEI);
if (nd.ni_cnd.cn_flags & ISSYMLINK) {
vrele(nd.ni_dvp);
vput(nd.ni_vp);
nfsrv_vrele(nd.ni_dvp);
nfsrv_vput(nd.ni_vp);
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
error = EINVAL;
nfsm_reply(0);
}
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
error = ENXIO;
goto out;
}
vp = nd.ni_vp;
} else {
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
free(nd.ni_cnd.cn_pnbuf, M_NAMEI);
vp = nd.ni_vp;
if (nd.ni_dvp == vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nfsd->nd_nqlflag == NQL_NOVAL) {
tsize = fxdr_unsigned(long, sp->sa_nfssize);
@ -814,13 +814,13 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
error = nfsrv_access(vp, VWRITE, cred,
(nd.ni_cnd.cn_flags & RDONLY), nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
nqsrv_getl(vp, NQL_WRITE);
error = VOP_SETATTR(vp, vap, cred, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
}
@ -829,11 +829,11 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
error = VFS_VPTOFH(vp, &fhp->fh_fid);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FH+NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfhtom(fhp);
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
@ -841,18 +841,18 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
return (error);
nfsmout:
if (nd.ni_cnd.cn_nameiop || nd.ni_cnd.cn_flags)
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vput(nd.ni_vp);
nfsrv_vput(nd.ni_vp);
return (error);
out:
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
free(nd.ni_cnd.cn_pnbuf, M_NAMEI);
nfsm_reply(0);
return (0);
@ -911,10 +911,10 @@ nfsrv_remove(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vput(vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vput(vp);
}
nfsm_reply(0);
nfsm_srvdone;
@ -973,8 +973,8 @@ nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
&dpos, nfsd->nd_procp);
if (error) {
VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
vrele(fromnd.ni_dvp);
vrele(fvp);
nfsrv_vrele(fromnd.ni_dvp);
nfsrv_vrele(fvp);
goto out1;
}
tdvp = tond.ni_dvp;
@ -1023,34 +1023,34 @@ nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd);
if (tdvp == tvp)
vrele(tdvp);
nfsrv_vrele(tdvp);
else
vput(tdvp);
nfsrv_vput(tdvp);
if (tvp)
vput(tvp);
nfsrv_vput(tvp);
VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
vrele(fromnd.ni_dvp);
vrele(fvp);
nfsrv_vrele(fromnd.ni_dvp);
nfsrv_vrele(fvp);
}
vrele(tond.ni_startdir);
nfsrv_vrele(tond.ni_startdir);
FREE(tond.ni_cnd.cn_pnbuf, M_NAMEI);
out1:
vrele(fromnd.ni_startdir);
nfsrv_vrele(fromnd.ni_startdir);
FREE(fromnd.ni_cnd.cn_pnbuf, M_NAMEI);
nfsm_reply(0);
return (error);
nfsmout:
if (tond.ni_cnd.cn_nameiop || tond.ni_cnd.cn_flags) {
vrele(tond.ni_startdir);
nfsrv_vrele(tond.ni_startdir);
FREE(tond.ni_cnd.cn_pnbuf, M_NAMEI);
}
if (fromnd.ni_cnd.cn_nameiop || fromnd.ni_cnd.cn_flags) {
vrele(fromnd.ni_startdir);
nfsrv_vrele(fromnd.ni_startdir);
FREE(fromnd.ni_cnd.cn_pnbuf, M_NAMEI);
VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
vrele(fromnd.ni_dvp);
vrele(fvp);
nfsrv_vrele(fromnd.ni_dvp);
nfsrv_vrele(fvp);
}
return (error);
}
@ -1111,14 +1111,14 @@ nfsrv_link(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vrele(nd.ni_vp);
nfsrv_vrele(nd.ni_vp);
}
out1:
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(0);
nfsm_srvdone;
}
@ -1178,10 +1178,10 @@ nfsrv_symlink(nfsd, mrep, md, dpos, cred, nam, mrq)
if (nd.ni_vp) {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vrele(nd.ni_vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vrele(nd.ni_vp);
error = EEXIST;
goto out;
}
@ -1197,11 +1197,11 @@ nfsrv_symlink(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsmout:
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vrele(nd.ni_vp);
nfsrv_vrele(nd.ni_vp);
if (pathcp)
FREE(pathcp, M_TEMP);
return (error);
@ -1252,10 +1252,10 @@ nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
if (vp != NULL) {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vrele(vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vrele(vp);
error = EEXIST;
nfsm_reply(0);
}
@ -1268,11 +1268,11 @@ nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
error = VFS_VPTOFH(vp, &fhp->fh_fid);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FH+NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfhtom(fhp);
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
@ -1281,11 +1281,11 @@ nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsmout:
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vrele(nd.ni_vp);
nfsrv_vrele(nd.ni_vp);
return (error);
}
@ -1347,10 +1347,10 @@ nfsrv_rmdir(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vput(vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vput(vp);
}
nfsm_reply(0);
nfsm_srvdone;
@ -1438,7 +1438,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(vp, NQL_READ);
error = nfsrv_access(vp, VEXEC, cred, rdonly, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
VOP_UNLOCK(vp);
@ -1458,7 +1458,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies);
off = (off_t)io.uio_offset;
if (error) {
vrele(vp);
nfsrv_vrele(vp);
free((caddr_t)rbuf, M_TEMP);
nfsm_reply(0);
}
@ -1466,7 +1466,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* If the filesystem doen't support cookies, return eof.
*/
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2*NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1482,7 +1482,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
* rpc reply
*/
if (siz == 0) {
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2*NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1573,7 +1573,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
dp = (struct dirent *)cpos;
cookiep++;
}
vrele(vp);
nfsrv_vrele(vp);
nfsm_clget;
*tl = nfs_false;
bp += NFSX_UNSIGNED;
@ -1643,7 +1643,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(vp, NQL_READ);
error = nfsrv_access(vp, VEXEC, cred, rdonly, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
VOP_UNLOCK(vp);
@ -1663,7 +1663,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies);
off = (u_long)io.uio_offset;
if (error) {
vrele(vp);
nfsrv_vrele(vp);
free((caddr_t)rbuf, M_TEMP);
nfsm_reply(0);
}
@ -1671,7 +1671,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* If the filesystem doen't support cookies, return eof.
*/
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2*NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1687,7 +1687,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
* rpc reply
*/
if (siz == 0) {
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2 * NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1742,7 +1742,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
fl.fl_nfh.fh_generic.fh_fsid =
nvp->v_mount->mnt_stat.f_fsid;
if (VFS_VPTOFH(nvp, &fl.fl_nfh.fh_generic.fh_fid)) {
vput(nvp);
nfsrv_vput(nvp);
goto invalid;
}
if (duration2) {
@ -1754,10 +1754,10 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
} else
fl.fl_duration = 0;
if (VOP_GETATTR(nvp, vap, cred, nfsd->nd_procp)) {
vput(nvp);
nfsrv_vput(nvp);
goto invalid;
}
vput(nvp);
nfsrv_vput(nvp);
fp = (struct nfsv2_fattr *)&fl.fl_fattr;
nfsm_srvfillattr;
len += (4*NFSX_UNSIGNED + nlen + rem + NFSX_FH
@ -1827,7 +1827,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
dp = (struct dirent *)cpos;
cookiep++;
}
vrele(vp);
nfsrv_vrele(vp);
nfsm_clget;
*tl = nfs_false;
bp += NFSX_UNSIGNED;
@ -1880,7 +1880,7 @@ nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsm_reply(0);
sf = &statfs;
error = VFS_STATFS(vp->v_mount, sf, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_STATFS(isnq));
nfsm_build(sfp, struct nfsv2_statfs *, NFSX_STATFS(isnq));
sfp->sf_tsize = txdr_unsigned(NFS_MAXDGRAMDATA);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_subs.c 8.3 (Berkeley) 1/4/94
* $Id: nfs_subs.c,v 1.6 1994/10/02 17:27:01 phk Exp $
* $Id: nfs_subs.c,v 1.7 1994/10/17 17:47:37 phk Exp $
*/
/*
@ -995,6 +995,7 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
*/
if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
cnp->cn_flags |= HASBUF;
nfsrv_vmio( ndp->ni_vp);
return (0);
}
out:
@ -1123,6 +1124,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
*rdonlyp = 0;
if (!lockflag)
VOP_UNLOCK(*vpp);
nfsrv_vmio(*vpp);
return (0);
}
@ -1168,3 +1170,54 @@ netaddr_match(family, haddr, nam)
};
return (0);
}
int
nfsrv_vmio( struct vnode *vp) {
int rtval;
vm_object_t object;
vm_pager_t pager;
if( (vp == NULL) || (vp->v_type != VREG))
return 1;
retry:
if( (vp->v_flag & VVMIO) == 0) {
pager = (vm_pager_t) vnode_pager_alloc(vp, 0, 0, 0);
object = (vm_object_t) vp->v_vmdata;
if( object->pager != pager)
panic("nfsrv_vmio: pager/object mismatch");
(void) vm_object_lookup( pager);
pager_cache( object, TRUE);
vp->v_flag |= VVMIO;
} else {
if( (object = (vm_object_t)vp->v_vmdata) &&
(object->flags & OBJ_DEAD)) {
tsleep( (caddr_t) object, PVM, "nfdead", 0);
goto retry;
}
if( !object)
panic("nfsrv_vmio: VMIO object missing");
pager = object->pager;
if( !pager)
panic("nfsrv_vmio: VMIO pager missing");
(void) vm_object_lookup( pager);
}
return 0;
}
int
nfsrv_vput( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vput( vp);
return 0;
}
int
nfsrv_vrele( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vrele( vp);
return 0;
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.5 (Berkeley) 2/13/94
* $Id: nfs_vnops.c,v 1.9 1994/10/09 07:35:06 davidg Exp $
* $Id: nfs_vnops.c,v 1.10 1994/10/17 17:47:41 phk Exp $
*/
/*
@ -2356,8 +2356,10 @@ nfs_update(ap)
} */ *ap;
{
#if 0
/* Use nfs_setattr */
printf("nfs_update: need to implement!!");
#endif
return (EOPNOTSUPP);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.5 (Berkeley) 1/4/94
* $Id: nfs_bio.c,v 1.6 1994/10/02 17:26:55 phk Exp $
* $Id: nfs_bio.c,v 1.7 1994/10/17 17:47:32 phk Exp $
*/
#include <sys/param.h>
@ -78,7 +78,7 @@ nfs_bioread(vp, uio, ioflag, cred)
struct vattr vattr;
struct proc *p;
struct nfsmount *nmp;
daddr_t lbn, bn, rabn;
daddr_t lbn, rabn;
caddr_t baddr;
int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
@ -94,7 +94,7 @@ nfs_bioread(vp, uio, ioflag, cred)
if (uio->uio_offset < 0 && vp->v_type != VDIR)
return (EINVAL);
nmp = VFSTONFS(vp->v_mount);
biosize = nmp->nm_rsize;
biosize = NFS_MAXDGRAMDATA;
p = uio->uio_procp;
/*
* For nfs, cache consistency can only be maintained approximately.
@ -198,7 +198,6 @@ nfs_bioread(vp, uio, ioflag, cred)
nfsstats.biocache_reads++;
lbn = uio->uio_offset / biosize;
on = uio->uio_offset & (biosize-1);
bn = lbn * (biosize / DEV_BSIZE);
not_readin = 1;
/*
@ -208,15 +207,17 @@ nfs_bioread(vp, uio, ioflag, cred)
lbn == vp->v_lastr + 1) {
for (nra = 0; nra < nmp->nm_readahead &&
(lbn + 1 + nra) * biosize < np->n_size; nra++) {
rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
rabn = lbn + 1 + nra;
if (!incore(vp, rabn)) {
rabp = nfs_getcacheblk(vp, rabn, biosize, p);
if (!rabp)
return (EINTR);
if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
rabp->b_flags |= B_INVAL;
rabp->b_flags |= B_INVAL|B_ERROR;
vfs_unbusy_pages(rabp);
brelse(rabp);
}
}
@ -230,21 +231,23 @@ nfs_bioread(vp, uio, ioflag, cred)
* Otherwise, get the block and write back/read in,
* as required.
*/
if ((bp = incore(vp, bn)) &&
if ((bp = incore(vp, lbn)) &&
(bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
(B_BUSY | B_WRITEINPROG))
got_buf = 0;
else {
again:
bp = nfs_getcacheblk(vp, bn, biosize, p);
bp = nfs_getcacheblk(vp, lbn, biosize, p);
if (!bp)
return (EINTR);
got_buf = 1;
if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
bp->b_flags |= B_READ;
not_readin = 0;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
vfs_unbusy_pages(bp);
brelse(bp);
return (error);
}
@ -257,7 +260,7 @@ nfs_bioread(vp, uio, ioflag, cred)
if (not_readin && n > 0) {
if (on < bp->b_validoff || (on + n) > bp->b_validend) {
if (!got_buf) {
bp = nfs_getcacheblk(vp, bn, biosize, p);
bp = nfs_getcacheblk(vp, lbn, biosize, p);
if (!bp)
return (EINTR);
got_buf = 1;
@ -285,8 +288,11 @@ nfs_bioread(vp, uio, ioflag, cred)
return (EINTR);
if ((bp->b_flags & B_DONE) == 0) {
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
vfs_unbusy_pages(bp);
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
@ -297,14 +303,18 @@ nfs_bioread(vp, uio, ioflag, cred)
break;
case VDIR:
nfsstats.biocache_readdirs++;
bn = (daddr_t)uio->uio_offset;
bp = nfs_getcacheblk(vp, bn, NFS_DIRBLKSIZ, p);
lbn = (daddr_t)uio->uio_offset;
bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, p);
if (!bp)
return (EINTR);
if ((bp->b_flags & B_DONE) == 0) {
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
error = nfs_doio(bp, cred, p);
if (error) {
vfs_unbusy_pages(bp);
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
@ -323,8 +333,10 @@ nfs_bioread(vp, uio, ioflag, cred)
if (rabp) {
if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
rabp->b_flags |= B_INVAL;
vfs_unbusy_pages(rabp);
rabp->b_flags |= B_INVAL|B_ERROR;
brelse(rabp);
}
}
@ -385,7 +397,7 @@ nfs_write(ap)
struct buf *bp;
struct vattr vattr;
struct nfsmount *nmp;
daddr_t lbn, bn;
daddr_t lbn;
int n, on, error = 0;
#ifdef DIAGNOSTIC
@ -434,14 +446,12 @@ nfs_write(ap)
* will be the same size within a filesystem. nfs_writerpc will
* still use nm_wsize when sizing the rpc's.
*/
biosize = nmp->nm_rsize;
biosize = NFS_MAXDGRAMDATA;
do {
/*
* XXX make sure we aren't cached in the VM page cache
*/
(void)vnode_pager_uncache(vp);
/*
* Check for a valid write lease.
* If non-cachable, just do the rpc
@ -467,9 +477,8 @@ nfs_write(ap)
lbn = uio->uio_offset / biosize;
on = uio->uio_offset & (biosize-1);
n = min((unsigned)(biosize - on), uio->uio_resid);
bn = lbn * (biosize / DEV_BSIZE);
again:
bp = nfs_getcacheblk(vp, bn, biosize, p);
bp = nfs_getcacheblk(vp, lbn, biosize, p);
if (!bp)
return (EINTR);
if (bp->b_wcred == NOCRED) {
@ -591,6 +600,10 @@ nfs_getcacheblk(vp, bn, size, p)
}
} else
bp = getblk(vp, bn, size, 0, 0);
if( vp->v_type == VREG)
bp->b_blkno = (bn * NFS_MAXDGRAMDATA) / DEV_BSIZE;
return (bp);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_subs.c 8.3 (Berkeley) 1/4/94
* $Id: nfs_subs.c,v 1.6 1994/10/02 17:27:01 phk Exp $
* $Id: nfs_subs.c,v 1.7 1994/10/17 17:47:37 phk Exp $
*/
/*
@ -995,6 +995,7 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
*/
if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
cnp->cn_flags |= HASBUF;
nfsrv_vmio( ndp->ni_vp);
return (0);
}
out:
@ -1123,6 +1124,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
*rdonlyp = 0;
if (!lockflag)
VOP_UNLOCK(*vpp);
nfsrv_vmio(*vpp);
return (0);
}
@ -1168,3 +1170,54 @@ netaddr_match(family, haddr, nam)
};
return (0);
}
int
nfsrv_vmio( struct vnode *vp) {
int rtval;
vm_object_t object;
vm_pager_t pager;
if( (vp == NULL) || (vp->v_type != VREG))
return 1;
retry:
if( (vp->v_flag & VVMIO) == 0) {
pager = (vm_pager_t) vnode_pager_alloc(vp, 0, 0, 0);
object = (vm_object_t) vp->v_vmdata;
if( object->pager != pager)
panic("nfsrv_vmio: pager/object mismatch");
(void) vm_object_lookup( pager);
pager_cache( object, TRUE);
vp->v_flag |= VVMIO;
} else {
if( (object = (vm_object_t)vp->v_vmdata) &&
(object->flags & OBJ_DEAD)) {
tsleep( (caddr_t) object, PVM, "nfdead", 0);
goto retry;
}
if( !object)
panic("nfsrv_vmio: VMIO object missing");
pager = object->pager;
if( !pager)
panic("nfsrv_vmio: VMIO pager missing");
(void) vm_object_lookup( pager);
}
return 0;
}
int
nfsrv_vput( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vput( vp);
return 0;
}
int
nfsrv_vrele( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vrele( vp);
return 0;
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.5 (Berkeley) 2/13/94
* $Id: nfs_vnops.c,v 1.9 1994/10/09 07:35:06 davidg Exp $
* $Id: nfs_vnops.c,v 1.10 1994/10/17 17:47:41 phk Exp $
*/
/*
@ -2356,8 +2356,10 @@ nfs_update(ap)
} */ *ap;
{
#if 0
/* Use nfs_setattr */
printf("nfs_update: need to implement!!");
#endif
return (EOPNOTSUPP);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_serv.c 8.3 (Berkeley) 1/12/94
* $Id: nfs_serv.c,v 1.6 1994/09/28 16:45:18 dfr Exp $
* $Id: nfs_serv.c,v 1.7 1994/10/02 17:26:58 phk Exp $
*/
/*
@ -121,7 +121,7 @@ nqnfsrv_access(nfsd, mrep, md, dpos, cred, nam, mrq)
if (*tl == nfs_true)
mode |= VEXEC;
error = nfsrv_access(vp, mode, cred, rdonly, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
nfsm_srvdone;
}
@ -158,7 +158,7 @@ nfsrv_getattr(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsm_reply(0);
nqsrv_getl(vp, NQL_READ);
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfillattr;
@ -255,12 +255,12 @@ nfsrv_setattr(nfsd, mrep, md, dpos, cred, nam, mrq)
}
error = VOP_SETATTR(vp, vap, cred, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
out:
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL) + 2*NFSX_UNSIGNED);
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfillattr;
@ -314,21 +314,21 @@ nfsrv_lookup(nfsd, mrep, md, dpos, cred, nam, mrq)
if (error)
nfsm_reply(0);
nqsrv_getl(nd.ni_startdir, NQL_READ);
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
FREE(nd.ni_cnd.cn_pnbuf, M_NAMEI);
vp = nd.ni_vp;
bzero((caddr_t)fhp, sizeof(nfh));
fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
error = VFS_VPTOFH(vp, &fhp->fh_fid);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
if (duration2)
(void) nqsrv_getlease(vp, &duration2, NQL_READ, nfsd,
nam, &cache2, &frev2, cred);
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FH + NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL) + 5*NFSX_UNSIGNED);
if (nfsd->nd_nqlflag != NQL_NOVAL) {
if (duration2) {
@ -417,7 +417,7 @@ nfsrv_readlink(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(vp, NQL_READ);
error = VOP_READLINK(vp, uiop, cred);
out:
vput(vp);
nfsrv_vput(vp);
if (error)
m_freem(mp3);
nfsm_reply(NFSX_UNSIGNED);
@ -488,7 +488,7 @@ nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
if (off >= vap->va_size)
@ -539,12 +539,12 @@ nfsrv_read(nfsd, mrep, md, dpos, cred, nam, mrq)
FREE((caddr_t)iv2, M_TEMP);
if (error || (error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp))) {
m_freem(mreq);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
} else
uiop->uio_resid = 0;
vput(vp);
nfsrv_vput(vp);
nfsm_srvfillattr;
len -= uiop->uio_resid;
tlen = nfsm_rndup(len);
@ -619,13 +619,13 @@ nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsm_reply(0);
if (vp->v_type != VREG) {
error = (vp->v_type == VDIR) ? EISDIR : EACCES;
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
nqsrv_getl(vp, NQL_WRITE);
error = nfsrv_access(vp, VWRITE, cred, rdonly, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
uiop->uio_resid = 0;
@ -663,19 +663,19 @@ nfsrv_write(nfsd, mrep, md, dpos, cred, nam, mrq)
}
if (len > 0 && mp == NULL) {
error = EBADRPC;
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
uiop->uio_resid = siz;
error = VOP_WRITE(vp, uiop, ioflags, cred);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
off = uiop->uio_offset;
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfillattr;
@ -743,7 +743,7 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
else
rdev = fxdr_unsigned(long, sp->sa_nqrdev);
if (vap->va_type == VREG || vap->va_type == VSOCK) {
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
nqsrv_getl(nd.ni_dvp, NQL_WRITE);
error=VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap);
if (error)
@ -758,7 +758,7 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
error = suser(cred, (u_short *)0);
if (error) {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
goto out;
} else
vap->va_rdev = (dev_t)rdev;
@ -766,7 +766,7 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(nd.ni_dvp, NQL_WRITE);
error=VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap);
if (error) {
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
nfsm_reply(0);
}
nd.ni_cnd.cn_nameiop = LOOKUP;
@ -780,27 +780,27 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
}
FREE(nd.ni_cnd.cn_pnbuf, M_NAMEI);
if (nd.ni_cnd.cn_flags & ISSYMLINK) {
vrele(nd.ni_dvp);
vput(nd.ni_vp);
nfsrv_vrele(nd.ni_dvp);
nfsrv_vput(nd.ni_vp);
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
error = EINVAL;
nfsm_reply(0);
}
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
error = ENXIO;
goto out;
}
vp = nd.ni_vp;
} else {
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
free(nd.ni_cnd.cn_pnbuf, M_NAMEI);
vp = nd.ni_vp;
if (nd.ni_dvp == vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nfsd->nd_nqlflag == NQL_NOVAL) {
tsize = fxdr_unsigned(long, sp->sa_nfssize);
@ -814,13 +814,13 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
error = nfsrv_access(vp, VWRITE, cred,
(nd.ni_cnd.cn_flags & RDONLY), nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
nqsrv_getl(vp, NQL_WRITE);
error = VOP_SETATTR(vp, vap, cred, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
}
@ -829,11 +829,11 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
error = VFS_VPTOFH(vp, &fhp->fh_fid);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FH+NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfhtom(fhp);
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
@ -841,18 +841,18 @@ nfsrv_create(nfsd, mrep, md, dpos, cred, nam, mrq)
return (error);
nfsmout:
if (nd.ni_cnd.cn_nameiop || nd.ni_cnd.cn_flags)
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vput(nd.ni_vp);
nfsrv_vput(nd.ni_vp);
return (error);
out:
vrele(nd.ni_startdir);
nfsrv_vrele(nd.ni_startdir);
free(nd.ni_cnd.cn_pnbuf, M_NAMEI);
nfsm_reply(0);
return (0);
@ -911,10 +911,10 @@ nfsrv_remove(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vput(vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vput(vp);
}
nfsm_reply(0);
nfsm_srvdone;
@ -973,8 +973,8 @@ nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
&dpos, nfsd->nd_procp);
if (error) {
VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
vrele(fromnd.ni_dvp);
vrele(fvp);
nfsrv_vrele(fromnd.ni_dvp);
nfsrv_vrele(fvp);
goto out1;
}
tdvp = tond.ni_dvp;
@ -1023,34 +1023,34 @@ nfsrv_rename(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd);
if (tdvp == tvp)
vrele(tdvp);
nfsrv_vrele(tdvp);
else
vput(tdvp);
nfsrv_vput(tdvp);
if (tvp)
vput(tvp);
nfsrv_vput(tvp);
VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
vrele(fromnd.ni_dvp);
vrele(fvp);
nfsrv_vrele(fromnd.ni_dvp);
nfsrv_vrele(fvp);
}
vrele(tond.ni_startdir);
nfsrv_vrele(tond.ni_startdir);
FREE(tond.ni_cnd.cn_pnbuf, M_NAMEI);
out1:
vrele(fromnd.ni_startdir);
nfsrv_vrele(fromnd.ni_startdir);
FREE(fromnd.ni_cnd.cn_pnbuf, M_NAMEI);
nfsm_reply(0);
return (error);
nfsmout:
if (tond.ni_cnd.cn_nameiop || tond.ni_cnd.cn_flags) {
vrele(tond.ni_startdir);
nfsrv_vrele(tond.ni_startdir);
FREE(tond.ni_cnd.cn_pnbuf, M_NAMEI);
}
if (fromnd.ni_cnd.cn_nameiop || fromnd.ni_cnd.cn_flags) {
vrele(fromnd.ni_startdir);
nfsrv_vrele(fromnd.ni_startdir);
FREE(fromnd.ni_cnd.cn_pnbuf, M_NAMEI);
VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd);
vrele(fromnd.ni_dvp);
vrele(fvp);
nfsrv_vrele(fromnd.ni_dvp);
nfsrv_vrele(fvp);
}
return (error);
}
@ -1111,14 +1111,14 @@ nfsrv_link(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vrele(nd.ni_vp);
nfsrv_vrele(nd.ni_vp);
}
out1:
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(0);
nfsm_srvdone;
}
@ -1178,10 +1178,10 @@ nfsrv_symlink(nfsd, mrep, md, dpos, cred, nam, mrq)
if (nd.ni_vp) {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vrele(nd.ni_vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vrele(nd.ni_vp);
error = EEXIST;
goto out;
}
@ -1197,11 +1197,11 @@ nfsrv_symlink(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsmout:
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vrele(nd.ni_vp);
nfsrv_vrele(nd.ni_vp);
if (pathcp)
FREE(pathcp, M_TEMP);
return (error);
@ -1252,10 +1252,10 @@ nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
if (vp != NULL) {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vrele(vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vrele(vp);
error = EEXIST;
nfsm_reply(0);
}
@ -1268,11 +1268,11 @@ nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid;
error = VFS_VPTOFH(vp, &fhp->fh_fid);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
error = VOP_GETATTR(vp, vap, cred, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_FH+NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
nfsm_srvfhtom(fhp);
nfsm_build(fp, struct nfsv2_fattr *, NFSX_FATTR(nfsd->nd_nqlflag != NQL_NOVAL));
@ -1281,11 +1281,11 @@ nfsrv_mkdir(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsmout:
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
nfsrv_vput(nd.ni_dvp);
if (nd.ni_vp)
vrele(nd.ni_vp);
nfsrv_vrele(nd.ni_vp);
return (error);
}
@ -1347,10 +1347,10 @@ nfsrv_rmdir(nfsd, mrep, md, dpos, cred, nam, mrq)
} else {
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
if (nd.ni_dvp == nd.ni_vp)
vrele(nd.ni_dvp);
nfsrv_vrele(nd.ni_dvp);
else
vput(nd.ni_dvp);
vput(vp);
nfsrv_vput(nd.ni_dvp);
nfsrv_vput(vp);
}
nfsm_reply(0);
nfsm_srvdone;
@ -1438,7 +1438,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(vp, NQL_READ);
error = nfsrv_access(vp, VEXEC, cred, rdonly, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
VOP_UNLOCK(vp);
@ -1458,7 +1458,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies);
off = (off_t)io.uio_offset;
if (error) {
vrele(vp);
nfsrv_vrele(vp);
free((caddr_t)rbuf, M_TEMP);
nfsm_reply(0);
}
@ -1466,7 +1466,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* If the filesystem doen't support cookies, return eof.
*/
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2*NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1482,7 +1482,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
* rpc reply
*/
if (siz == 0) {
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2*NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1573,7 +1573,7 @@ nfsrv_readdir(nfsd, mrep, md, dpos, cred, nam, mrq)
dp = (struct dirent *)cpos;
cookiep++;
}
vrele(vp);
nfsrv_vrele(vp);
nfsm_clget;
*tl = nfs_false;
bp += NFSX_UNSIGNED;
@ -1643,7 +1643,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
nqsrv_getl(vp, NQL_READ);
error = nfsrv_access(vp, VEXEC, cred, rdonly, nfsd->nd_procp);
if (error) {
vput(vp);
nfsrv_vput(vp);
nfsm_reply(0);
}
VOP_UNLOCK(vp);
@ -1663,7 +1663,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies);
off = (u_long)io.uio_offset;
if (error) {
vrele(vp);
nfsrv_vrele(vp);
free((caddr_t)rbuf, M_TEMP);
nfsm_reply(0);
}
@ -1671,7 +1671,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
/*
* If the filesystem doen't support cookies, return eof.
*/
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2*NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1687,7 +1687,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
* rpc reply
*/
if (siz == 0) {
vrele(vp);
nfsrv_vrele(vp);
nfsm_reply(2 * NFSX_UNSIGNED);
nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED);
*tl++ = nfs_false;
@ -1742,7 +1742,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
fl.fl_nfh.fh_generic.fh_fsid =
nvp->v_mount->mnt_stat.f_fsid;
if (VFS_VPTOFH(nvp, &fl.fl_nfh.fh_generic.fh_fid)) {
vput(nvp);
nfsrv_vput(nvp);
goto invalid;
}
if (duration2) {
@ -1754,10 +1754,10 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
} else
fl.fl_duration = 0;
if (VOP_GETATTR(nvp, vap, cred, nfsd->nd_procp)) {
vput(nvp);
nfsrv_vput(nvp);
goto invalid;
}
vput(nvp);
nfsrv_vput(nvp);
fp = (struct nfsv2_fattr *)&fl.fl_fattr;
nfsm_srvfillattr;
len += (4*NFSX_UNSIGNED + nlen + rem + NFSX_FH
@ -1827,7 +1827,7 @@ nqnfsrv_readdirlook(nfsd, mrep, md, dpos, cred, nam, mrq)
dp = (struct dirent *)cpos;
cookiep++;
}
vrele(vp);
nfsrv_vrele(vp);
nfsm_clget;
*tl = nfs_false;
bp += NFSX_UNSIGNED;
@ -1880,7 +1880,7 @@ nfsrv_statfs(nfsd, mrep, md, dpos, cred, nam, mrq)
nfsm_reply(0);
sf = &statfs;
error = VFS_STATFS(vp->v_mount, sf, nfsd->nd_procp);
vput(vp);
nfsrv_vput(vp);
nfsm_reply(NFSX_STATFS(isnq));
nfsm_build(sfp, struct nfsv2_statfs *, NFSX_STATFS(isnq));
sfp->sf_tsize = txdr_unsigned(NFS_MAXDGRAMDATA);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_subs.c 8.3 (Berkeley) 1/4/94
* $Id: nfs_subs.c,v 1.6 1994/10/02 17:27:01 phk Exp $
* $Id: nfs_subs.c,v 1.7 1994/10/17 17:47:37 phk Exp $
*/
/*
@ -995,6 +995,7 @@ nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, p)
*/
if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
cnp->cn_flags |= HASBUF;
nfsrv_vmio( ndp->ni_vp);
return (0);
}
out:
@ -1123,6 +1124,7 @@ nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp)
*rdonlyp = 0;
if (!lockflag)
VOP_UNLOCK(*vpp);
nfsrv_vmio(*vpp);
return (0);
}
@ -1168,3 +1170,54 @@ netaddr_match(family, haddr, nam)
};
return (0);
}
int
nfsrv_vmio( struct vnode *vp) {
int rtval;
vm_object_t object;
vm_pager_t pager;
if( (vp == NULL) || (vp->v_type != VREG))
return 1;
retry:
if( (vp->v_flag & VVMIO) == 0) {
pager = (vm_pager_t) vnode_pager_alloc(vp, 0, 0, 0);
object = (vm_object_t) vp->v_vmdata;
if( object->pager != pager)
panic("nfsrv_vmio: pager/object mismatch");
(void) vm_object_lookup( pager);
pager_cache( object, TRUE);
vp->v_flag |= VVMIO;
} else {
if( (object = (vm_object_t)vp->v_vmdata) &&
(object->flags & OBJ_DEAD)) {
tsleep( (caddr_t) object, PVM, "nfdead", 0);
goto retry;
}
if( !object)
panic("nfsrv_vmio: VMIO object missing");
pager = object->pager;
if( !pager)
panic("nfsrv_vmio: VMIO pager missing");
(void) vm_object_lookup( pager);
}
return 0;
}
int
nfsrv_vput( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vput( vp);
return 0;
}
int
nfsrv_vrele( struct vnode *vp) {
if( (vp->v_flag & VVMIO) && vp->v_vmdata) {
vm_object_deallocate( (vm_object_t) vp->v_vmdata);
}
vrele( vp);
return 0;
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.7 (Berkeley) 1/21/94
* $Id: buf.h,v 1.9 1994/10/10 00:58:31 phk Exp $
* $Id: buf.h,v 1.10 1994/10/18 06:55:57 davidg Exp $
*/
#ifndef _SYS_BUF_H_
@ -83,9 +83,9 @@ struct buf {
void *b_driver2; /* for private use by the driver */
void *b_spc;
#ifndef VMIO
void *b_pages[(MAXBSIZE + PAGE_SIZE - 1)/PAGE_SIZE];
void *b_pages[(MAXPHYS + PAGE_SIZE - 1)/PAGE_SIZE];
#else
vm_page_t b_pages[(MAXBSIZE + PAGE_SIZE - 1)/PAGE_SIZE];
struct vm_page *b_pages[(MAXPHYS + PAGE_SIZE - 1)/PAGE_SIZE];
#endif
int b_npages;
};
@ -116,13 +116,13 @@ struct buf {
#define B_INVAL 0x00002000 /* Does not contain valid info. */
#define B_LOCKED 0x00004000 /* Locked in core (not reusable). */
#define B_NOCACHE 0x00008000 /* Do not cache block after use. */
#define B_PAGET 0x00010000 /* Page in/out of page table space. */
#define B_PGIN 0x00020000 /* Pagein op, so swap() can count it. */
#define B_MALLOC 0x00010000 /* malloced b_data */
#define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */
#define B_PHYS 0x00040000 /* I/O to user memory. */
#define B_RAW 0x00080000 /* Set by physio for raw transfers. */
#define B_READ 0x00100000 /* Read buffer. */
#define B_TAPE 0x00200000 /* Magnetic tape I/O. */
#define B_UAREA 0x00400000 /* Buffer describes Uarea I/O. */
#define B_PDWANTED 0x00400000 /* Pageout daemon wants this buffer. */
#define B_WANTED 0x00800000 /* Process wants this buffer. */
#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
#define B_WRITEINPROG 0x01000000 /* Write in progress. */
@ -159,7 +159,7 @@ struct cluster_save {
/*
* Definitions for the buffer free lists.
*/
#define BUFFER_QUEUES 5 /* number of free buffer queues */
#define BUFFER_QUEUES 6 /* number of free buffer queues */
LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
@ -167,8 +167,9 @@ TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
#define QUEUE_NONE 0 /* on no queue */
#define QUEUE_LOCKED 1 /* locked buffers */
#define QUEUE_LRU 2 /* useful buffers */
#define QUEUE_AGE 3 /* less useful buffers */
#define QUEUE_EMPTY 4 /* empty buffer headers*/
#define QUEUE_VMIO 3 /* VMIO buffers */
#define QUEUE_AGE 4 /* not-useful buffers */
#define QUEUE_EMPTY 5 /* empty buffer headers*/
/*
* Zero out the buffer's data area.
@ -202,12 +203,12 @@ int bwrite __P((struct buf *));
void bdwrite __P((struct buf *));
void bawrite __P((struct buf *));
void brelse __P((struct buf *));
struct buf *getnewbuf __P((int slpflag, int slptimeo));
struct buf *getnewbuf __P((int slpflag, int slptimeo, int));
struct buf * getpbuf __P((void));
struct buf *incore __P((struct vnode *, daddr_t));
struct buf *getblk __P((struct vnode *, daddr_t, int, int, int));
struct buf *geteblk __P((int));
void allocbuf __P((struct buf *, int));
int allocbuf __P((struct buf *, int, int));
int biowait __P((struct buf *));
void biodone __P((struct buf *));

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.7 (Berkeley) 1/21/94
* $Id: buf.h,v 1.9 1994/10/10 00:58:31 phk Exp $
* $Id: buf.h,v 1.10 1994/10/18 06:55:57 davidg Exp $
*/
#ifndef _SYS_BUF_H_
@ -83,9 +83,9 @@ struct buf {
void *b_driver2; /* for private use by the driver */
void *b_spc;
#ifndef VMIO
void *b_pages[(MAXBSIZE + PAGE_SIZE - 1)/PAGE_SIZE];
void *b_pages[(MAXPHYS + PAGE_SIZE - 1)/PAGE_SIZE];
#else
vm_page_t b_pages[(MAXBSIZE + PAGE_SIZE - 1)/PAGE_SIZE];
struct vm_page *b_pages[(MAXPHYS + PAGE_SIZE - 1)/PAGE_SIZE];
#endif
int b_npages;
};
@ -116,13 +116,13 @@ struct buf {
#define B_INVAL 0x00002000 /* Does not contain valid info. */
#define B_LOCKED 0x00004000 /* Locked in core (not reusable). */
#define B_NOCACHE 0x00008000 /* Do not cache block after use. */
#define B_PAGET 0x00010000 /* Page in/out of page table space. */
#define B_PGIN 0x00020000 /* Pagein op, so swap() can count it. */
#define B_MALLOC 0x00010000 /* malloced b_data */
#define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */
#define B_PHYS 0x00040000 /* I/O to user memory. */
#define B_RAW 0x00080000 /* Set by physio for raw transfers. */
#define B_READ 0x00100000 /* Read buffer. */
#define B_TAPE 0x00200000 /* Magnetic tape I/O. */
#define B_UAREA 0x00400000 /* Buffer describes Uarea I/O. */
#define B_PDWANTED 0x00400000 /* Pageout daemon wants this buffer. */
#define B_WANTED 0x00800000 /* Process wants this buffer. */
#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
#define B_WRITEINPROG 0x01000000 /* Write in progress. */
@ -159,7 +159,7 @@ struct cluster_save {
/*
* Definitions for the buffer free lists.
*/
#define BUFFER_QUEUES 5 /* number of free buffer queues */
#define BUFFER_QUEUES 6 /* number of free buffer queues */
LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
@ -167,8 +167,9 @@ TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
#define QUEUE_NONE 0 /* on no queue */
#define QUEUE_LOCKED 1 /* locked buffers */
#define QUEUE_LRU 2 /* useful buffers */
#define QUEUE_AGE 3 /* less useful buffers */
#define QUEUE_EMPTY 4 /* empty buffer headers*/
#define QUEUE_VMIO 3 /* VMIO buffers */
#define QUEUE_AGE 4 /* not-useful buffers */
#define QUEUE_EMPTY 5 /* empty buffer headers*/
/*
* Zero out the buffer's data area.
@ -202,12 +203,12 @@ int bwrite __P((struct buf *));
void bdwrite __P((struct buf *));
void bawrite __P((struct buf *));
void brelse __P((struct buf *));
struct buf *getnewbuf __P((int slpflag, int slptimeo));
struct buf *getnewbuf __P((int slpflag, int slptimeo, int));
struct buf * getpbuf __P((void));
struct buf *incore __P((struct vnode *, daddr_t));
struct buf *getblk __P((struct vnode *, daddr_t, int, int, int));
struct buf *geteblk __P((int));
void allocbuf __P((struct buf *, int));
int allocbuf __P((struct buf *, int, int));
int biowait __P((struct buf *));
void biodone __P((struct buf *));

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)param.h 8.2 (Berkeley) 1/21/94
* $Id: param.h,v 1.4 1994/08/21 04:41:55 paul Exp $
* $Id: param.h,v 1.5 1994/09/01 05:12:51 davidg Exp $
*/
#ifndef _SYS_PARAM_H_
@ -152,9 +152,10 @@
* smaller units (fragments) only in the last direct block. MAXBSIZE
* primarily determines the size of buffers in the buffer pool. It may be
* made larger without any effect on existing file systems; however making
* it smaller make make some file systems unmountable.
* it smaller make make some file systems unmountable. Also, MAXBSIZE
* must be less than MAXPHYS!!!
*/
#define MAXBSIZE MAXPHYS
#define MAXBSIZE 16384
#define MAXFRAG 8
/*

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)proc.h 8.8 (Berkeley) 1/21/94
* $Id: proc.h,v 1.13 1994/11/13 12:46:08 davidg Exp $
* $Id: proc.h,v 1.14 1994/11/15 14:37:39 bde Exp $
*/
#ifndef _SYS_PROC_H_
@ -99,8 +99,7 @@ struct proc {
int p_flag; /* P_* flags. */
char p_stat; /* S* process status. */
char p_lock; /* Process lock count. */
char p_pad1[2];
char p_pad1[3];
pid_t p_pid; /* Process identifier. */
struct proc *p_hash; /* Hashed based on p_pid for kill+exit+... */
@ -137,7 +136,9 @@ struct proc {
struct vnode *p_textvp; /* Vnode of executable. */
long p_spare[3]; /* Pad to 256, avoid shifting eproc. */
char p_lock; /* Process lock count. */
char p_pad2[3]; /* alignment */
long p_spare[2]; /* Pad to 256, avoid shifting eproc. XXX */
/* End area that is zeroed on creation. */
#define p_endzero p_startcopy

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vmmeter.h 8.1 (Berkeley) 6/2/93
* $Id: vmmeter.h,v 1.5 1994/10/15 13:33:02 davidg Exp $
* $Id: vmmeter.h,v 1.6 1994/10/18 14:59:13 davidg Exp $
*/
#ifndef _SYS_VMMETER_H_
@ -87,6 +87,10 @@ struct vmmeter {
unsigned v_active_count;/* number of pages active */
unsigned v_inactive_target; /* number of pages desired inactive */
unsigned v_inactive_count; /* number of pages inactive */
unsigned v_cache_count; /* number of pages on buffer cache queue */
unsigned v_cache_min; /* min number of pages desired on cache queue */
unsigned v_cache_max; /* max number of pages in cached obj */
unsigned v_pageout_free_min; /* min number pages reserved for kernel */
};
#ifdef KERNEL
struct vmmeter cnt;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94
* $Id: ffs_alloc.c,v 1.4 1994/09/20 05:53:24 bde Exp $
* $Id: ffs_alloc.c,v 1.5 1994/10/10 01:04:34 phk Exp $
*/
#include <sys/param.h>
@ -210,7 +210,7 @@ ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
panic("bad blockno");
ip->i_blocks += btodb(nsize - osize);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
allocbuf(bp, nsize);
allocbuf(bp, nsize, 0);
bp->b_flags |= B_DONE;
bzero((char *)bp->b_data + osize, (u_int)nsize - osize);
*bpp = bp;
@ -268,14 +268,14 @@ ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
(u_long (*)())ffs_alloccg);
if (bno > 0) {
bp->b_blkno = fsbtodb(fs, bno);
(void) vnode_pager_uncache(ITOV(ip));
/* (void) vnode_pager_uncache(ITOV(ip)); */
ffs_blkfree(ip, bprev, (long)osize);
if (nsize < request)
ffs_blkfree(ip, bno + numfrags(fs, nsize),
(long)(request - nsize));
ip->i_blocks += btodb(nsize - osize);
ip->i_flag |= IN_CHANGE | IN_UPDATE;
allocbuf(bp, nsize);
allocbuf(bp, nsize, 0);
bp->b_flags |= B_DONE;
bzero((char *)bp->b_data + osize, (u_int)nsize - osize);
*bpp = bp;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93
* $Id: ffs_inode.c,v 1.9 1994/10/22 02:27:32 davidg Exp $
* $Id: ffs_inode.c,v 1.10 1994/12/27 14:44:42 bde Exp $
*/
#include <sys/param.h>
@ -204,7 +204,6 @@ ffs_truncate(ap)
if (error)
return (error);
#endif
vnode_pager_setsize(ovp, (u_long)length);
osize = oip->i_size;
/*
* Lengthen the size of the file. We must ensure that the
@ -226,6 +225,7 @@ ffs_truncate(ap)
bwrite(bp);
else
bawrite(bp);
vnode_pager_setsize(ovp, (u_long)length);
oip->i_flag |= IN_CHANGE | IN_UPDATE;
return (VOP_UPDATE(ovp, &tv, &tv, 1));
}
@ -250,7 +250,7 @@ ffs_truncate(ap)
oip->i_size = length;
size = blksize(fs, oip, lbn);
bzero((char *)bp->b_data + offset, (u_int)(size - offset));
allocbuf(bp, size);
allocbuf(bp, size, 0);
if (aflags & IO_SYNC)
bwrite(bp);
else
@ -386,6 +386,7 @@ ffs_truncate(ap)
if (oip->i_blocks < 0) /* sanity */
oip->i_blocks = 0;
oip->i_flag |= IN_CHANGE;
vnode_pager_setsize(ovp, (u_long)length);
#ifdef QUOTA
(void) chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
@ -441,7 +442,8 @@ ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
*/
vp = ITOV(ip);
bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
/* if (bp->b_flags & (B_DONE | B_DELWRI)) { */
if (bp->b_flags & B_CACHE) {
/* Braces must be here in case trace evaluates to nothing. */
trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn);
} else {
@ -451,6 +453,7 @@ ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
if (bp->b_bcount > bp->b_bufsize)
panic("ffs_indirtrunc: bad buffer size");
bp->b_blkno = dbn;
vfs_busy_pages(bp, 0);
VOP_STRATEGY(bp);
error = biowait(bp);
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_vnops.c 8.7 (Berkeley) 2/3/94
* $Id: ffs_vnops.c,v 1.6 1994/10/06 21:06:59 davidg Exp $
* $Id: ffs_vnops.c,v 1.7 1994/10/10 01:04:40 phk Exp $
*/
#include <sys/param.h>
@ -261,19 +261,27 @@ ffs_fsync(ap)
continue;
if ((bp->b_flags & B_DELWRI) == 0)
panic("ffs_fsync: not dirty");
bremfree(bp);
bp->b_flags |= B_BUSY;
splx(s);
if (bp->b_vp != vp && ap->a_waitfor != MNT_NOWAIT) {
bremfree(bp);
bp->b_flags |= B_BUSY;
splx(s);
/*
* Wait for I/O associated with indirect blocks to complete,
* since there is no way to quickly wait for them below.
*/
if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT)
(void) bawrite(bp);
else
(void) bwrite(bp);
if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT)
(void) bawrite(bp);
else
(void) bwrite(bp);
} else {
vfs_bio_awrite(bp);
splx(s);
}
goto loop;
}
if (ap->a_waitfor == MNT_WAIT) {
while (vp->v_numoutput) {
vp->v_flag |= VBWAIT;
@ -287,6 +295,7 @@ ffs_fsync(ap)
#endif
}
splx(s);
tv = time;
return (VOP_UPDATE(ap->a_vp, &tv, &tv, ap->a_waitfor == MNT_WAIT));
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)lfs_balloc.c 8.1 (Berkeley) 6/11/93
* $Id$
* $Id: lfs_balloc.c,v 1.2 1994/08/02 07:54:30 davidg Exp $
*/
#include <sys/param.h>
#include <sys/buf.h>
@ -129,6 +129,7 @@ lfs_balloc(vp, iosize, lbn, bpp)
else {
bp->b_blkno = daddr;
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
VOP_STRATEGY(bp);
return(biowait(bp));
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)lfs_inode.c 8.5 (Berkeley) 12/30/93
* $Id: lfs_inode.c,v 1.4 1994/10/10 01:04:50 phk Exp $
* $Id: lfs_inode.c,v 1.5 1995/01/04 23:46:31 gibbs Exp $
*/
#include <sys/param.h>
@ -235,7 +235,7 @@ lfs_truncate(ap)
ip->i_size = length;
size = blksize(fs);
bzero((char *)bp->b_data + offset, (u_int)(size - offset));
allocbuf(bp, size);
allocbuf(bp, size, 0);
if (e1 = VOP_BWRITE(bp))
return (e1);
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)lfs_segment.c 8.5 (Berkeley) 1/4/94
* $Id: lfs_segment.c,v 1.5 1994/11/17 01:30:49 gibbs Exp $
* $Id: lfs_segment.c,v 1.6 1995/01/04 23:46:32 gibbs Exp $
*/
#include <sys/param.h>
@ -1091,7 +1091,6 @@ lfs_newbuf(vp, daddr, size)
bp = getpbuf();
if (nbytes)
bp->b_data = lfs_alloc_buffer( nbytes);
bgetvp(vp, bp);
bp->b_bufsize = size;
bp->b_bcount = size;
bp->b_lblkno = daddr;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)lfs_subr.c 8.2 (Berkeley) 9/21/93
* $Id: lfs_subr.c,v 1.4 1994/11/17 01:30:51 gibbs Exp $
* $Id: lfs_subr.c,v 1.5 1995/01/04 23:46:32 gibbs Exp $
*/
#include <sys/param.h>
@ -147,11 +147,8 @@ lfs_segunlock(fs)
if (sp->bpp != sp->cbpp) {
/* Free allocated segment summary */
fs->lfs_offset -= LFS_SUMMARY_SIZE / DEV_BSIZE;
/* free((*sp->bpp)->b_data, M_SEGMENT); */
lfs_free_buffer((*sp->bpp)->b_data, roundup( (*sp->bpp)->b_bufsize, DEV_BSIZE));
/* free(*sp->bpp, M_SEGMENT); */
lfs_free_buffer((*sp->bpp)->b_data, roundup((*sp->bpp)->b_bufsize, DEV_BSIZE));
relpbuf(*sp->bpp);
} else
printf ("unlock to 0 with no summary");
free(sp->bpp, M_SEGMENT);

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)lfs_syscalls.c 8.5 (Berkeley) 4/20/94
* $Id: lfs_syscalls.c,v 1.4 1994/11/17 01:30:52 gibbs Exp $
* $Id: lfs_syscalls.c,v 1.5 1995/01/04 23:46:33 gibbs Exp $
*/
#include <sys/param.h>
@ -238,10 +238,6 @@ err2: lfs_vunref(vp);
/* Free up fakebuffers */
for (bpp = --sp->cbpp; bpp >= sp->bpp; --bpp)
if ((*bpp)->b_flags & B_CALL) {
brelvp(*bpp);
/*
free(*bpp, M_SEGMENT);
*/
relpbuf(*bpp);
} else
brelse(*bpp);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_bmap.c 8.6 (Berkeley) 1/21/94
* $Id: ufs_bmap.c,v 1.3 1994/08/02 07:54:52 davidg Exp $
* $Id: ufs_bmap.c,v 1.4 1994/10/08 06:57:21 phk Exp $
*/
#include <sys/param.h>
@ -128,12 +128,12 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
if (runp) {
/*
* XXX
* If MAXBSIZE is the largest transfer the disks can handle,
* If MAXPHYS is the largest transfer the disks can handle,
* we probably want maxrun to be 1 block less so that we
* don't create a block larger than the device can handle.
*/
*runp = 0;
maxrun = MAXBSIZE / mp->mnt_stat.f_iosize - 1;
maxrun = MAXPHYS / mp->mnt_stat.f_iosize - 1;
}
xap = ap == NULL ? a : ap;
@ -179,7 +179,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
xap->in_exists = 1;
bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
if (bp->b_flags & B_CACHE) {
trace(TR_BREADHIT, pack(vp, size), metalbn);
}
#ifdef DIAGNOSTIC
@ -190,6 +190,7 @@ ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
trace(TR_BREADMISS, pack(vp, size), metalbn);
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_flags |= B_READ;
vfs_busy_pages(bp, 0);
VOP_STRATEGY(bp);
curproc->p_stats->p_ru.ru_inblock++; /* XXX */
error = biowait(bp);

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.7 (Berkeley) 1/21/94
* $Id: ufs_readwrite.c,v 1.4 1994/08/08 09:11:44 davidg Exp $
* $Id: ufs_readwrite.c,v 1.5 1994/10/10 01:04:55 phk Exp $
*/
#ifdef LFS_READWRITE
@ -101,6 +101,9 @@ READ(ap)
if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
break;
lbn = lblkno(fs, uio->uio_offset);
xfersize = vfs_read_bypass( vp, uio, bytesinfile, lbn);
if( xfersize != 0)
continue;
nextlbn = lbn + 1;
size = BLKSIZE(fs, ip, lbn);
blkoffset = blkoff(fs, uio->uio_offset);
@ -231,6 +234,10 @@ WRITE(ap)
xfersize = fs->fs_bsize - blkoffset;
if (uio->uio_resid < xfersize)
xfersize = uio->uio_resid;
if (uio->uio_offset + xfersize > ip->i_size)
vnode_pager_setsize(vp, (u_long)uio->uio_offset + xfersize);
#ifdef LFS_READWRITE
(void)lfs_check(vp, lbn);
error = lfs_balloc(vp, xfersize, lbn, &bp);
@ -245,11 +252,13 @@ WRITE(ap)
#endif
if (error)
break;
if (uio->uio_offset + xfersize > ip->i_size) {
ip->i_size = uio->uio_offset + xfersize;
vnode_pager_setsize(vp, (u_long)ip->i_size);
}
/*
(void)vnode_pager_uncache(vp);
*/
size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
if (size < xfersize)
@ -262,14 +271,17 @@ WRITE(ap)
#else
if (ioflag & IO_SYNC)
(void)bwrite(bp);
else if (xfersize + blkoffset == fs->fs_bsize)
if (doclusterwrite)
else if (xfersize + blkoffset == fs->fs_bsize) {
if (doclusterwrite) {
bp->b_flags |= B_CLUSTEROK;
cluster_write(bp, ip->i_size);
else {
} else {
bawrite(bp);
}
else
} else {
bp->b_flags |= B_CLUSTEROK;
bdwrite(bp);
}
#endif
if (error || xfersize == 0)
break;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_vnops.c 8.10 (Berkeley) 4/1/94
* $Id: ufs_vnops.c,v 1.12 1994/10/21 01:19:25 wollman Exp $
* $Id: ufs_vnops.c,v 1.13 1994/11/26 19:38:30 bde Exp $
*/
#include <sys/param.h>
@ -441,8 +441,10 @@ ufs_chmod(vp, mode, cred, p)
ip->i_mode &= ~ALLPERMS;
ip->i_mode |= (mode & ALLPERMS);
ip->i_flag |= IN_CHANGE;
/*
if ((vp->v_flag & VTEXT) && (ip->i_mode & S_ISTXT) == 0)
(void) vnode_pager_uncache(vp);
*/
return (0);
}
@ -647,6 +649,8 @@ ufs_remove(ap)
if ((error = ufs_dirremove(dvp, ap->a_cnp)) == 0) {
ip->i_nlink--;
ip->i_flag |= IN_CHANGE;
if( (ip->i_nlink == 0) && vp->v_vmdata)
((vm_object_t)vp->v_vmdata)->flags |= OBJ_INTERNAL;
}
out:
if (dvp == vp)

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
* $Id: device_pager.c,v 1.3 1994/08/02 07:55:06 davidg Exp $
* $Id: device_pager.c,v 1.4 1994/10/02 17:48:58 phk Exp $
*/
/*
@ -55,28 +55,26 @@
#include <vm/vm_page.h>
#include <vm/device_pager.h>
struct pagerlst dev_pager_list; /* list of managed devices */
struct pagerlst dev_pager_list; /* list of managed devices */
struct pglist dev_pager_fakelist; /* list of available vm_page_t's */
#ifdef DEBUG
int dpagerdebug = 0;
int dpagerdebug = 0;
#define DDB_FOLLOW 0x01
#define DDB_INIT 0x02
#define DDB_ALLOC 0x04
#define DDB_FAIL 0x08
#endif
static vm_pager_t dev_pager_alloc
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
static void dev_pager_dealloc __P((vm_pager_t));
static int dev_pager_getpage
__P((vm_pager_t, vm_page_t, boolean_t));
static boolean_t dev_pager_haspage __P((vm_pager_t, vm_offset_t));
static void dev_pager_init __P((void));
static int dev_pager_putpage
__P((vm_pager_t, vm_page_t, boolean_t));
static vm_page_t dev_pager_getfake __P((vm_offset_t));
static void dev_pager_putfake __P((vm_page_t));
static vm_pager_t dev_pager_alloc __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
static void dev_pager_dealloc __P((vm_pager_t));
static int dev_pager_getpage __P((vm_pager_t, vm_page_t, boolean_t));
static boolean_t dev_pager_haspage __P((vm_pager_t, vm_offset_t));
static void dev_pager_init __P((void));
static int dev_pager_putpage __P((vm_pager_t, vm_page_t, boolean_t));
static vm_page_t dev_pager_getfake __P((vm_offset_t));
static void dev_pager_putfake __P((vm_page_t));
struct pagerops devicepagerops = {
dev_pager_init,
@ -109,7 +107,7 @@ dev_pager_alloc(handle, size, prot, foff)
{
dev_t dev;
vm_pager_t pager;
int (*mapfunc)();
int (*mapfunc) ();
vm_object_t object;
dev_pager_t devp;
unsigned int npages, off;
@ -117,7 +115,7 @@ dev_pager_alloc(handle, size, prot, foff)
#ifdef DEBUG
if (dpagerdebug & DDB_FOLLOW)
printf("dev_pager_alloc(%x, %x, %x, %x)\n",
handle, size, prot, foff);
handle, size, prot, foff);
#endif
#ifdef DIAGNOSTIC
/*
@ -130,27 +128,27 @@ dev_pager_alloc(handle, size, prot, foff)
/*
* Make sure this device can be mapped.
*/
dev = (dev_t)(u_long)handle;
dev = (dev_t) (u_long) handle;
mapfunc = cdevsw[major(dev)].d_mmap;
if (mapfunc == NULL || mapfunc == enodev || mapfunc == nullop)
return(NULL);
return (NULL);
/*
* Offset should be page aligned.
*/
if (foff & (PAGE_SIZE-1))
return(NULL);
if (foff & (PAGE_SIZE - 1))
return (NULL);
/*
* Check that the specified range of the device allows the
* desired protection.
*
* Check that the specified range of the device allows the desired
* protection.
*
* XXX assumes VM_PROT_* == PROT_*
*/
npages = atop(round_page(size));
for (off = foff; npages--; off += PAGE_SIZE)
if ((*mapfunc)(dev, off, (int)prot) == -1)
return(NULL);
if ((*mapfunc) (dev, off, (int) prot) == -1)
return (NULL);
/*
* Look up pager, creating as necessary.
@ -161,58 +159,57 @@ dev_pager_alloc(handle, size, prot, foff)
/*
* Allocate and initialize pager structs
*/
pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK);
pager = (vm_pager_t) malloc(sizeof *pager, M_VMPAGER, M_WAITOK);
if (pager == NULL)
return(NULL);
devp = (dev_pager_t)malloc(sizeof *devp, M_VMPGDATA, M_WAITOK);
return (NULL);
devp = (dev_pager_t) malloc(sizeof *devp, M_VMPGDATA, M_WAITOK);
if (devp == NULL) {
free((caddr_t)pager, M_VMPAGER);
return(NULL);
free((caddr_t) pager, M_VMPAGER);
return (NULL);
}
pager->pg_handle = handle;
pager->pg_ops = &devicepagerops;
pager->pg_type = PG_DEVICE;
pager->pg_data = (caddr_t)devp;
pager->pg_flags = 0;
pager->pg_data = (caddr_t) devp;
TAILQ_INIT(&devp->devp_pglist);
/*
* Allocate object and associate it with the pager.
*/
object = devp->devp_object = vm_object_allocate(0);
vm_object_enter(object, pager);
vm_object_setpager(object, pager, (vm_offset_t)foff, FALSE);
vm_object_setpager(object, pager, (vm_offset_t) foff, FALSE);
/*
* Finally, put it on the managed list so other can find it.
* First we re-lookup in case someone else beat us to this
* point (due to blocking in the various mallocs). If so,
* we free everything and start over.
* point (due to blocking in the various mallocs). If so, we
* free everything and start over.
*/
if (vm_pager_lookup(&dev_pager_list, handle)) {
free((caddr_t)devp, M_VMPGDATA);
free((caddr_t)pager, M_VMPAGER);
free((caddr_t) devp, M_VMPGDATA);
free((caddr_t) pager, M_VMPAGER);
goto top;
}
TAILQ_INSERT_TAIL(&dev_pager_list, pager, pg_list);
#ifdef DEBUG
if (dpagerdebug & DDB_ALLOC) {
printf("dev_pager_alloc: pager %x devp %x object %x\n",
pager, devp, object);
pager, devp, object);
vm_object_print(object, FALSE);
}
#endif
} else {
/*
* vm_object_lookup() gains a reference and also
* removes the object from the cache.
* vm_object_lookup() gains a reference and also removes the
* object from the cache.
*/
object = vm_object_lookup(pager);
#ifdef DIAGNOSTIC
devp = (dev_pager_t)pager->pg_data;
devp = (dev_pager_t) pager->pg_data;
if (object != devp->devp_object)
panic("dev_pager_setup: bad object");
#endif
}
return(pager);
return (pager);
}
static void
@ -229,11 +226,10 @@ dev_pager_dealloc(pager)
#endif
TAILQ_REMOVE(&dev_pager_list, pager, pg_list);
/*
* Get the object.
* Note: cannot use vm_object_lookup since object has already
* been removed from the hash chain.
* Get the object. Note: cannot use vm_object_lookup since object has
* already been removed from the hash chain.
*/
devp = (dev_pager_t)pager->pg_data;
devp = (dev_pager_t) pager->pg_data;
object = devp->devp_object;
#ifdef DEBUG
if (dpagerdebug & DDB_ALLOC)
@ -242,12 +238,12 @@ dev_pager_dealloc(pager)
/*
* Free up our fake pages.
*/
while ((m=devp->devp_pglist.tqh_first) != 0) {
while ((m = devp->devp_pglist.tqh_first) != 0) {
TAILQ_REMOVE(&devp->devp_pglist, m, pageq);
dev_pager_putfake(m);
}
free((caddr_t)devp, M_VMPGDATA);
free((caddr_t)pager, M_VMPAGER);
free((caddr_t) devp, M_VMPGDATA);
free((caddr_t) pager, M_VMPAGER);
}
static int
@ -261,7 +257,7 @@ dev_pager_getpage(pager, m, sync)
vm_page_t page;
dev_t dev;
int s;
int (*mapfunc)(), prot;
int (*mapfunc) (), prot;
#ifdef DEBUG
if (dpagerdebug & DDB_FOLLOW)
@ -269,7 +265,7 @@ dev_pager_getpage(pager, m, sync)
#endif
object = m->object;
dev = (dev_t)(u_long)pager->pg_handle;
dev = (dev_t) (u_long) pager->pg_handle;
offset = m->offset + object->paging_offset;
prot = PROT_READ; /* XXX should pass in? */
mapfunc = cdevsw[major(dev)].d_mmap;
@ -277,31 +273,31 @@ dev_pager_getpage(pager, m, sync)
if (mapfunc == NULL || mapfunc == enodev || mapfunc == nullop)
panic("dev_pager_getpage: no map function");
paddr = pmap_phys_address((*mapfunc)((dev_t)dev, (int)offset, prot));
paddr = pmap_phys_address((*mapfunc) ((dev_t) dev, (int) offset, prot));
#ifdef DIAGNOSTIC
if (paddr == -1)
panic("dev_pager_getpage: map function returns error");
#endif
/*
* Replace the passed in page with our own fake page and free
* up the original.
* Replace the passed in page with our own fake page and free up the
* original.
*/
page = dev_pager_getfake(paddr);
TAILQ_INSERT_TAIL(&((dev_pager_t)pager->pg_data)->devp_pglist,
page, pageq);
TAILQ_INSERT_TAIL(&((dev_pager_t) pager->pg_data)->devp_pglist,
page, pageq);
vm_object_lock(object);
vm_page_lock_queues();
PAGE_WAKEUP(m);
vm_page_free(m);
vm_page_unlock_queues();
s = splhigh();
vm_page_insert(page, object, offset);
splx(s);
PAGE_WAKEUP(m);
if (offset + PAGE_SIZE > object->size)
object->size = offset + PAGE_SIZE; /* XXX anal */
vm_object_unlock(object);
return(VM_PAGER_OK);
return (VM_PAGER_OK);
}
static int
@ -328,7 +324,7 @@ dev_pager_haspage(pager, offset)
if (dpagerdebug & DDB_FOLLOW)
printf("dev_pager_haspage(%x, %x)\n", pager, offset);
#endif
return(TRUE);
return (TRUE);
}
static vm_page_t
@ -339,8 +335,8 @@ dev_pager_getfake(paddr)
int i;
if (dev_pager_fakelist.tqh_first == NULL) {
m = (vm_page_t)malloc(PAGE_SIZE, M_VMPGDATA, M_WAITOK);
for (i = PAGE_SIZE / sizeof(*m); i > 0; i--) {
m = (vm_page_t) malloc(PAGE_SIZE * 2, M_VMPGDATA, M_WAITOK);
for (i = (PAGE_SIZE * 2) / sizeof(*m); i > 0; i--) {
TAILQ_INSERT_TAIL(&dev_pager_fakelist, m, pageq);
m++;
}
@ -348,12 +344,16 @@ dev_pager_getfake(paddr)
m = dev_pager_fakelist.tqh_first;
TAILQ_REMOVE(&dev_pager_fakelist, m, pageq);
m->flags = PG_BUSY | PG_CLEAN | PG_FAKE | PG_FICTITIOUS;
m->flags = PG_BUSY | PG_FICTITIOUS;
m->dirty = 0;
m->valid = VM_PAGE_BITS_ALL;
m->busy = 0;
m->bmapped = 0;
m->wire_count = 1;
m->phys_addr = paddr;
return(m);
return (m);
}
static void

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.h 8.3 (Berkeley) 12/13/93
* $Id$
* $Id: device_pager.h,v 1.2 1994/08/02 07:55:07 davidg Exp $
*/
#ifndef _DEVICE_PAGER_
@ -46,9 +46,9 @@
* Device pager private data.
*/
struct devpager {
struct pglist devp_pglist; /* list of pages allocated */
vm_object_t devp_object; /* object representing this device */
struct pglist devp_pglist; /* list of pages allocated */
vm_object_t devp_object; /* object representing this device */
};
typedef struct devpager *dev_pager_t;
typedef struct devpager *dev_pager_t;
#endif /* _DEVICE_PAGER_ */
#endif /* _DEVICE_PAGER_ */

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: kern_lock.c,v 1.2 1994/08/02 07:55:08 davidg Exp $
*/
/*
@ -75,7 +75,8 @@
/* XXX */
#include <sys/proc.h>
typedef int *thread_t;
typedef int *thread_t;
#define current_thread() ((thread_t)&curproc->p_thread)
/* XXX */
@ -112,44 +113,49 @@ typedef int *thread_t;
* may only be used for exclusive locks.
*/
void simple_lock_init(l)
simple_lock_t l;
void
simple_lock_init(l)
simple_lock_t l;
{
*(boolean_t *)l = FALSE;
*(boolean_t *) l = FALSE;
}
void simple_lock(l)
simple_lock_t l;
void
simple_lock(l)
simple_lock_t l;
{
while (test_and_set((boolean_t *)l))
while (test_and_set((boolean_t *) l))
continue;
}
void simple_unlock(l)
simple_lock_t l;
void
simple_unlock(l)
simple_lock_t l;
{
*(boolean_t *)l = FALSE;
*(boolean_t *) l = FALSE;
}
boolean_t simple_lock_try(l)
simple_lock_t l;
boolean_t
simple_lock_try(l)
simple_lock_t l;
{
return (!test_and_set((boolean_t *)l));
return (!test_and_set((boolean_t *) l));
}
#endif /* notdef */
#endif /* NCPUS > 1 */
#endif /* notdef */
#endif /* NCPUS > 1 */
#if NCPUS > 1
int lock_wait_time = 100;
#else /* NCPUS > 1 */
/*
* It is silly to spin on a uni-processor as if we
* thought something magical would happen to the
* want_write bit while we are executing.
*/
#else /* NCPUS > 1 */
/*
* It is silly to spin on a uni-processor as if we thought something magical
* would happen to the want_write bit while we are executing.
*/
int lock_wait_time = 0;
#endif /* NCPUS > 1 */
#endif /* NCPUS > 1 */
/*
@ -160,9 +166,10 @@ int lock_wait_time = 0;
* variables and then initialize them, rather
* than getting a new one from this module.
*/
void lock_init(l, can_sleep)
lock_t l;
boolean_t can_sleep;
void
lock_init(l, can_sleep)
lock_t l;
boolean_t can_sleep;
{
bzero(l, sizeof(lock_data_t));
simple_lock_init(&l->interlock);
@ -170,13 +177,14 @@ void lock_init(l, can_sleep)
l->want_upgrade = FALSE;
l->read_count = 0;
l->can_sleep = can_sleep;
l->thread = (char *)-1; /* XXX */
l->thread = (char *) -1; /* XXX */
l->recursion_depth = 0;
}
void lock_sleepable(l, can_sleep)
lock_t l;
boolean_t can_sleep;
void
lock_sleepable(l, can_sleep)
lock_t l;
boolean_t can_sleep;
{
simple_lock(&l->interlock);
l->can_sleep = can_sleep;
@ -190,24 +198,24 @@ void lock_sleepable(l, can_sleep)
* for the lock. These work on uniprocessor systems.
*/
void lock_write(l)
register lock_t l;
void
lock_write(l)
register lock_t l;
{
register int i;
register int i;
simple_lock(&l->interlock);
if (((thread_t)l->thread) == current_thread()) {
if (((thread_t) l->thread) == current_thread()) {
/*
* Recursive lock.
* Recursive lock.
*/
l->recursion_depth++;
simple_unlock(&l->interlock);
return;
}
/*
* Try to acquire the want_write bit.
* Try to acquire the want_write bit.
*/
while (l->want_write) {
if ((i = lock_wait_time) > 0) {
@ -216,7 +224,6 @@ void lock_write(l)
continue;
simple_lock(&l->interlock);
}
if (l->can_sleep && l->want_write) {
l->waiting = TRUE;
thread_sleep((int) l, &l->interlock, FALSE);
@ -231,11 +238,10 @@ void lock_write(l)
if ((i = lock_wait_time) > 0) {
simple_unlock(&l->interlock);
while (--i > 0 && (l->read_count != 0 ||
l->want_upgrade))
l->want_upgrade))
continue;
simple_lock(&l->interlock);
}
if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
l->waiting = TRUE;
thread_sleep((int) l, &l->interlock, FALSE);
@ -245,21 +251,20 @@ void lock_write(l)
simple_unlock(&l->interlock);
}
void lock_done(l)
register lock_t l;
void
lock_done(l)
register lock_t l;
{
simple_lock(&l->interlock);
if (l->read_count != 0)
l->read_count--;
else
if (l->recursion_depth != 0)
else if (l->recursion_depth != 0)
l->recursion_depth--;
else if (l->want_upgrade)
l->want_upgrade = FALSE;
else
if (l->want_upgrade)
l->want_upgrade = FALSE;
else
l->want_write = FALSE;
l->want_write = FALSE;
if (l->waiting) {
l->waiting = FALSE;
@ -268,22 +273,22 @@ void lock_done(l)
simple_unlock(&l->interlock);
}
void lock_read(l)
register lock_t l;
void
lock_read(l)
register lock_t l;
{
register int i;
register int i;
simple_lock(&l->interlock);
if (((thread_t)l->thread) == current_thread()) {
if (((thread_t) l->thread) == current_thread()) {
/*
* Recursive lock.
* Recursive lock.
*/
l->read_count++;
simple_unlock(&l->interlock);
return;
}
while (l->want_write || l->want_upgrade) {
if ((i = lock_wait_time) > 0) {
simple_unlock(&l->interlock);
@ -291,7 +296,6 @@ void lock_read(l)
continue;
simple_lock(&l->interlock);
}
if (l->can_sleep && (l->want_write || l->want_upgrade)) {
l->waiting = TRUE;
thread_sleep((int) l, &l->interlock, FALSE);
@ -313,39 +317,36 @@ void lock_read(l)
*
* Returns TRUE if the upgrade *failed*.
*/
boolean_t lock_read_to_write(l)
register lock_t l;
boolean_t
lock_read_to_write(l)
register lock_t l;
{
register int i;
register int i;
simple_lock(&l->interlock);
l->read_count--;
if (((thread_t)l->thread) == current_thread()) {
if (((thread_t) l->thread) == current_thread()) {
/*
* Recursive lock.
* Recursive lock.
*/
l->recursion_depth++;
simple_unlock(&l->interlock);
return(FALSE);
return (FALSE);
}
if (l->want_upgrade) {
/*
* Someone else has requested upgrade.
* Since we've released a read lock, wake
* him up.
* Someone else has requested upgrade. Since we've released a
* read lock, wake him up.
*/
if (l->waiting) {
l->waiting = FALSE;
thread_wakeup((int) l);
}
simple_unlock(&l->interlock);
return (TRUE);
}
l->want_upgrade = TRUE;
while (l->read_count != 0) {
@ -355,7 +356,6 @@ boolean_t lock_read_to_write(l)
continue;
simple_lock(&l->interlock);
}
if (l->can_sleep && l->read_count != 0) {
l->waiting = TRUE;
thread_sleep((int) l, &l->interlock, FALSE);
@ -367,25 +367,24 @@ boolean_t lock_read_to_write(l)
return (FALSE);
}
void lock_write_to_read(l)
register lock_t l;
void
lock_write_to_read(l)
register lock_t l;
{
simple_lock(&l->interlock);
l->read_count++;
if (l->recursion_depth != 0)
l->recursion_depth--;
else
if (l->want_upgrade)
else if (l->want_upgrade)
l->want_upgrade = FALSE;
else
l->want_write = FALSE;
l->want_write = FALSE;
if (l->waiting) {
l->waiting = FALSE;
thread_wakeup((int) l);
}
simple_unlock(&l->interlock);
}
@ -398,36 +397,35 @@ void lock_write_to_read(l)
* Returns FALSE if the lock is not held on return.
*/
boolean_t lock_try_write(l)
register lock_t l;
boolean_t
lock_try_write(l)
register lock_t l;
{
simple_lock(&l->interlock);
if (((thread_t)l->thread) == current_thread()) {
if (((thread_t) l->thread) == current_thread()) {
/*
* Recursive lock
* Recursive lock
*/
l->recursion_depth++;
simple_unlock(&l->interlock);
return(TRUE);
return (TRUE);
}
if (l->want_write || l->want_upgrade || l->read_count) {
/*
* Can't get lock.
* Can't get lock.
*/
simple_unlock(&l->interlock);
return(FALSE);
return (FALSE);
}
/*
* Have lock.
* Have lock.
*/
l->want_write = TRUE;
simple_unlock(&l->interlock);
return(TRUE);
return (TRUE);
}
/*
@ -438,28 +436,27 @@ boolean_t lock_try_write(l)
* Returns FALSE if the lock is not held on return.
*/
boolean_t lock_try_read(l)
register lock_t l;
boolean_t
lock_try_read(l)
register lock_t l;
{
simple_lock(&l->interlock);
if (((thread_t)l->thread) == current_thread()) {
if (((thread_t) l->thread) == current_thread()) {
/*
* Recursive lock
* Recursive lock
*/
l->read_count++;
simple_unlock(&l->interlock);
return(TRUE);
return (TRUE);
}
if (l->want_write || l->want_upgrade) {
simple_unlock(&l->interlock);
return(FALSE);
return (FALSE);
}
l->read_count++;
simple_unlock(&l->interlock);
return(TRUE);
return (TRUE);
}
/*
@ -472,25 +469,25 @@ boolean_t lock_try_read(l)
*
* Returns FALSE if the upgrade *failed*.
*/
boolean_t lock_try_read_to_write(l)
register lock_t l;
boolean_t
lock_try_read_to_write(l)
register lock_t l;
{
simple_lock(&l->interlock);
if (((thread_t)l->thread) == current_thread()) {
if (((thread_t) l->thread) == current_thread()) {
/*
* Recursive lock
* Recursive lock
*/
l->read_count--;
l->recursion_depth++;
simple_unlock(&l->interlock);
return(TRUE);
return (TRUE);
}
if (l->want_upgrade) {
simple_unlock(&l->interlock);
return(FALSE);
return (FALSE);
}
l->want_upgrade = TRUE;
l->read_count--;
@ -502,15 +499,16 @@ boolean_t lock_try_read_to_write(l)
}
simple_unlock(&l->interlock);
return(TRUE);
return (TRUE);
}
/*
* Allow a process that has a lock for write to acquire it
* recursively (for read, write, or update).
*/
void lock_set_recursive(l)
lock_t l;
void
lock_set_recursive(l)
lock_t l;
{
simple_lock(&l->interlock);
if (!l->want_write) {
@ -523,14 +521,15 @@ void lock_set_recursive(l)
/*
* Prevent a lock from being re-acquired.
*/
void lock_clear_recursive(l)
lock_t l;
void
lock_clear_recursive(l)
lock_t l;
{
simple_lock(&l->interlock);
if (((thread_t) l->thread) != current_thread()) {
panic("lock_clear_recursive: wrong thread");
}
if (l->recursion_depth == 0)
l->thread = (char *)-1; /* XXX */
l->thread = (char *) -1; /* XXX */
simple_unlock(&l->interlock);
}

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: lock.h,v 1.2 1994/08/02 07:55:11 davidg Exp $
*/
/*
@ -78,11 +78,11 @@
*/
struct slock {
int lock_data; /* in general 1 bit is sufficient */
int lock_data; /* in general 1 bit is sufficient */
};
typedef struct slock simple_lock_data_t;
typedef struct slock *simple_lock_t;
typedef struct slock simple_lock_data_t;
typedef struct slock *simple_lock_t;
/*
* The general lock structure. Provides for multiple readers,
@ -93,61 +93,55 @@ typedef struct slock *simple_lock_t;
struct lock {
#ifdef vax
/*
* Efficient VAX implementation -- see field description below.
* Efficient VAX implementation -- see field description below.
*/
unsigned int read_count:16,
want_upgrade:1,
want_write:1,
waiting:1,
can_sleep:1,
:0;
unsigned int read_count:16, want_upgrade:1, want_write:1, waiting:1, can_sleep:1,:0;
simple_lock_data_t interlock;
#else /* vax */
simple_lock_data_t interlock;
#else /* vax */
#ifdef ns32000
/*
* Efficient ns32000 implementation --
* see field description below.
* Efficient ns32000 implementation -- see field description below.
*/
simple_lock_data_t interlock;
unsigned int read_count:16,
want_upgrade:1,
want_write:1,
waiting:1,
can_sleep:1,
:0;
simple_lock_data_t interlock;
unsigned int read_count:16, want_upgrade:1, want_write:1, waiting:1, can_sleep:1,:0;
#else /* ns32000 */
/* Only the "interlock" field is used for hardware exclusion;
* other fields are modified with normal instructions after
* acquiring the interlock bit.
#else /* ns32000 */
/*
* Only the "interlock" field is used for hardware exclusion; other
* fields are modified with normal instructions after acquiring the
* interlock bit.
*/
simple_lock_data_t
interlock; /* Interlock for remaining fields */
boolean_t want_write; /* Writer is waiting, or locked for write */
boolean_t want_upgrade; /* Read-to-write upgrade waiting */
boolean_t waiting; /* Someone is sleeping on lock */
boolean_t can_sleep; /* Can attempts to lock go to sleep */
int read_count; /* Number of accepted readers */
#endif /* ns32000 */
#endif /* vax */
char *thread; /* Thread that has lock, if recursive locking allowed */
/* (should be thread_t, but but we then have mutually
recursive definitions) */
int recursion_depth;/* Depth of recursion */
simple_lock_data_t
interlock; /* Interlock for remaining fields */
boolean_t want_write; /* Writer is waiting, or locked for write */
boolean_t want_upgrade; /* Read-to-write upgrade waiting */
boolean_t waiting; /* Someone is sleeping on lock */
boolean_t can_sleep; /* Can attempts to lock go to sleep */
int read_count; /* Number of accepted readers */
#endif /* ns32000 */
#endif /* vax */
char *thread; /* Thread that has lock, if recursive locking
* allowed */
/*
* (should be thread_t, but but we then have mutually recursive
* definitions)
*/
int recursion_depth; /* Depth of recursion */
};
typedef struct lock lock_data_t;
typedef struct lock *lock_t;
typedef struct lock lock_data_t;
typedef struct lock *lock_t;
#if NCPUS > 1
__BEGIN_DECLS
void simple_lock __P((simple_lock_t));
void simple_lock_init __P((simple_lock_t));
boolean_t simple_lock_try __P((simple_lock_t));
void simple_unlock __P((simple_lock_t));
void simple_lock __P((simple_lock_t));
void simple_lock_init __P((simple_lock_t));
boolean_t simple_lock_try __P((simple_lock_t));
void simple_unlock __P((simple_lock_t));
__END_DECLS
#else /* No multiprocessor locking is necessary. */
#else /* No multiprocessor locking is necessary. */
#define simple_lock(l)
#define simple_lock_init(l)
#define simple_lock_try(l) (1) /* Always succeeds. */
@ -159,16 +153,17 @@ __END_DECLS
#define lock_read_done(l) lock_done(l)
#define lock_write_done(l) lock_done(l)
void lock_clear_recursive __P((lock_t));
void lock_done __P((lock_t));
void lock_init __P((lock_t, boolean_t));
void lock_read __P((lock_t));
boolean_t lock_read_to_write __P((lock_t));
void lock_set_recursive __P((lock_t));
void lock_sleepable __P((lock_t, boolean_t));
boolean_t lock_try_read __P((lock_t));
boolean_t lock_try_read_to_write __P((lock_t));
boolean_t lock_try_write __P((lock_t));
void lock_write __P((lock_t));
void lock_write_to_read __P((lock_t));
#endif /* !_LOCK_H_ */
void lock_clear_recursive __P((lock_t));
void lock_done __P((lock_t));
void lock_init __P((lock_t, boolean_t));
void lock_read __P((lock_t));
boolean_t lock_read_to_write __P((lock_t));
void lock_set_recursive __P((lock_t));
void lock_sleepable __P((lock_t, boolean_t));
boolean_t lock_try_read __P((lock_t));
boolean_t lock_try_read_to_write __P((lock_t));
boolean_t lock_try_write __P((lock_t));
void lock_write __P((lock_t));
void lock_write_to_read __P((lock_t));
#endif /* !_LOCK_H_ */

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Author: Avadis Tevanian, Jr.
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: pmap.h,v 1.4 1994/09/02 04:12:26 davidg Exp $
* $Id: pmap.h,v 1.5 1994/11/14 08:19:07 bde Exp $
*/
/*
@ -79,47 +79,50 @@
* in the following structure.
*/
struct pmap_statistics {
long resident_count; /* # of pages mapped (total)*/
long wired_count; /* # of pages wired */
long resident_count; /* # of pages mapped (total) */
long wired_count; /* # of pages wired */
};
typedef struct pmap_statistics *pmap_statistics_t;
typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
#ifdef KERNEL
__BEGIN_DECLS
void * pmap_bootstrap_alloc __P((int));
#if 0 /* XXX */
void pmap_bootstrap __P((/* machine dependent */));
void *pmap_bootstrap_alloc __P((int));
#if 0 /* XXX */
void pmap_bootstrap __P(( /* machine dependent */ ));
#endif
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
void pmap_clear_modify __P((vm_offset_t pa));
void pmap_clear_reference __P((vm_offset_t pa));
void pmap_collect __P((pmap_t));
void pmap_copy __P((pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
pmap_t pmap_create __P((vm_size_t));
void pmap_destroy __P((pmap_t));
void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
void pmap_init __P((vm_offset_t, vm_offset_t));
boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_is_referenced __P((vm_offset_t pa));
void pmap_kenter __P((vm_offset_t, vm_offset_t));
void pmap_kremove __P((vm_offset_t));
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t));
vm_offset_t pmap_phys_address __P((int));
void pmap_pinit __P((pmap_t));
void pmap_protect __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t));
void pmap_qenter __P((vm_offset_t, vm_page_t *, int));
void pmap_qremove __P((vm_offset_t, int));
void pmap_reference __P((pmap_t));
void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_zero_page __P((vm_offset_t));
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
void pmap_clear_modify __P((vm_offset_t pa));
void pmap_clear_reference __P((vm_offset_t pa));
void pmap_collect __P((pmap_t));
void pmap_copy __P((pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
pmap_t pmap_create __P((vm_size_t));
void pmap_destroy __P((pmap_t));
void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
void pmap_init __P((vm_offset_t, vm_offset_t));
boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_is_referenced __P((vm_offset_t pa));
void pmap_kenter __P((vm_offset_t, vm_offset_t));
void pmap_kremove __P((vm_offset_t));
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t));
vm_offset_t pmap_phys_address __P((int));
void pmap_pinit __P((pmap_t));
void pmap_protect __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t));
void pmap_qenter __P((vm_offset_t, vm_page_t *, int));
void pmap_qremove __P((vm_offset_t, int));
void pmap_reference __P((pmap_t));
void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_zero_page __P((vm_offset_t));
__END_DECLS
#endif
#endif /* _PMAP_VM_ */
#endif /* _PMAP_VM_ */

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90
* $Id: swap_pager.h,v 1.2 1994/05/25 09:18:39 rgrimes Exp $
* $Id: swap_pager.h,v 1.3 1994/10/09 01:52:06 phk Exp $
*/
/*
@ -56,46 +56,46 @@
* space is recovered by the swap pager now...
*/
#define SWB_NPAGES 8
struct swblock {
struct swblock {
unsigned short swb_valid; /* bitmask for valid pages */
unsigned short swb_locked; /* block locked */
int swb_block[SWB_NPAGES]; /* unfortunately int instead of daddr_t */
int swb_block[SWB_NPAGES]; /* unfortunately int instead of daddr_t */
};
typedef struct swblock *sw_blk_t;
typedef struct swblock *sw_blk_t;
/*
* Swap pager private data.
*/
struct swpager {
vm_size_t sw_osize; /* size of object we are backing (bytes) */
int sw_nblocks;/* number of blocks in list (sw_blk_t units) */
sw_blk_t sw_blocks; /* pointer to list of swap blocks */
short sw_flags; /* flags */
short sw_poip; /* pageouts in progress */
short sw_piip; /* pageins in progress */
vm_size_t sw_osize; /* size of object we are backing (bytes) */
int sw_nblocks; /* number of blocks in list (sw_blk_t units) */
sw_blk_t sw_blocks; /* pointer to list of swap blocks */
short sw_flags; /* flags */
short sw_poip; /* pageouts in progress */
short sw_piip; /* pageins in progress */
};
typedef struct swpager *sw_pager_t;
typedef struct swpager *sw_pager_t;
#define SW_WANTED 0x01
#define SW_NAMED 0x02
#ifdef KERNEL
void swap_pager_init(void);
vm_pager_t swap_pager_alloc(caddr_t, vm_size_t, vm_prot_t, vm_offset_t);
void swap_pager_dealloc(vm_pager_t);
boolean_t swap_pager_getpage(vm_pager_t, vm_page_t, boolean_t);
boolean_t swap_pager_putpage(vm_pager_t, vm_page_t, boolean_t);
boolean_t swap_pager_getmulti(vm_pager_t, vm_page_t *, int, int, boolean_t);
boolean_t swap_pager_haspage(vm_pager_t, vm_offset_t);
int swap_pager_io(sw_pager_t, vm_page_t *, int, int, int);
void swap_pager_iodone(struct buf *);
boolean_t swap_pager_clean();
void swap_pager_copy __P((vm_pager_t, vm_offset_t, vm_pager_t, vm_offset_t, vm_offset_t));
void swap_pager_freespace __P((vm_pager_t, vm_offset_t, vm_offset_t));
void swap_pager_init(void);
vm_pager_t swap_pager_alloc(caddr_t, vm_size_t, vm_prot_t, vm_offset_t);
void swap_pager_dealloc(vm_pager_t);
boolean_t swap_pager_getpage(vm_pager_t, vm_page_t, boolean_t);
boolean_t swap_pager_putpage(vm_pager_t, vm_page_t, boolean_t);
boolean_t swap_pager_getmulti(vm_pager_t, vm_page_t *, int, int, boolean_t);
boolean_t swap_pager_haspage(vm_pager_t, vm_offset_t);
int swap_pager_io(sw_pager_t, vm_page_t *, int, int, int);
void swap_pager_iodone(struct buf *);
boolean_t swap_pager_clean();
void swap_pager_copy __P((vm_pager_t, vm_offset_t, vm_pager_t, vm_offset_t, vm_offset_t));
void swap_pager_freespace __P((vm_pager_t, vm_offset_t, vm_offset_t));
extern struct pagerops swappagerops;
#endif
#endif /* _SWAP_PAGER_ */
#endif /* _SWAP_PAGER_ */

View File

@ -31,13 +31,13 @@
* SUCH DAMAGE.
*
* @(#)vm.h 8.2 (Berkeley) 12/13/93
* $Id$
* $Id: vm.h,v 1.3 1994/08/02 07:55:16 davidg Exp $
*/
#ifndef VM_H
#define VM_H
typedef char vm_inherit_t; /* XXX: inheritance codes */
typedef char vm_inherit_t; /* XXX: inheritance codes */
union vm_map_object;
typedef union vm_map_object vm_map_object_t;
@ -52,7 +52,7 @@ struct vm_object;
typedef struct vm_object *vm_object_t;
struct vm_page;
typedef struct vm_page *vm_page_t;
typedef struct vm_page *vm_page_t;
struct pager_struct;
typedef struct pager_struct *vm_pager_t;
@ -75,20 +75,21 @@ typedef struct pager_struct *vm_pager_t;
* Several fields are temporary (text, data stuff).
*/
struct vmspace {
struct vm_map vm_map; /* VM address map */
struct pmap vm_pmap; /* private physical map */
int vm_refcnt; /* number of references */
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
struct vm_map vm_map; /* VM address map */
struct pmap vm_pmap; /* private physical map */
int vm_refcnt; /* number of references */
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
/* we copy from vm_startcopy to the end of the structure on fork */
#define vm_startcopy vm_rssize
segsz_t vm_rssize; /* current resident set size in pages */
segsz_t vm_rssize; /* current resident set size in pages */
segsz_t vm_swrss; /* resident set size before last swap */
segsz_t vm_tsize; /* text size (pages) XXX */
segsz_t vm_dsize; /* data size (pages) XXX */
segsz_t vm_ssize; /* stack size (pages) */
caddr_t vm_taddr; /* user virtual address of text XXX */
caddr_t vm_daddr; /* user virtual address of data XXX */
caddr_t vm_taddr; /* user virtual address of text XXX */
caddr_t vm_daddr; /* user virtual address of data XXX */
caddr_t vm_maxsaddr; /* user VA at max stack growth */
caddr_t vm_minsaddr; /* user VA at max stack growth */
};
#endif /* VM_H */
#endif /* VM_H */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
* $Id: vm_extern.h,v 1.6 1994/09/27 18:00:26 davidg Exp $
* $Id: vm_extern.h,v 1.7 1994/12/30 08:02:16 bde Exp $
*/
#ifndef _VM_EXTERN_H_
@ -46,13 +46,15 @@ struct mount;
struct vnode;
#ifdef KGDB
void chgkprot __P((caddr_t, int, int));
void chgkprot __P((caddr_t, int, int));
#endif
/*
* Try to get semi-meaningful wait messages into thread_sleep...
*/
void thread_sleep_ __P((int, simple_lock_t, char *));
#if __GNUC__ >= 2
#define thread_sleep(a,b,c) thread_sleep_((a), (b), __FUNCTION__)
#else
@ -61,83 +63,77 @@ void thread_sleep_ __P((int, simple_lock_t, char *));
#ifdef KERNEL
#ifdef TYPEDEF_FOR_UAP
int getpagesize __P((struct proc *p, void *, int *));
int madvise __P((struct proc *, void *, int *));
int mincore __P((struct proc *, void *, int *));
int mprotect __P((struct proc *, void *, int *));
int msync __P((struct proc *, void *, int *));
int munmap __P((struct proc *, void *, int *));
int obreak __P((struct proc *, void *, int *));
int sbrk __P((struct proc *, void *, int *));
int smmap __P((struct proc *, void *, int *));
int sstk __P((struct proc *, void *, int *));
int getpagesize __P((struct proc * p, void *, int *));
int madvise __P((struct proc *, void *, int *));
int mincore __P((struct proc *, void *, int *));
int mprotect __P((struct proc *, void *, int *));
int msync __P((struct proc *, void *, int *));
int munmap __P((struct proc *, void *, int *));
int obreak __P((struct proc *, void *, int *));
int sbrk __P((struct proc *, void *, int *));
int smmap __P((struct proc *, void *, int *));
int sstk __P((struct proc *, void *, int *));
#endif
void assert_wait __P((int, boolean_t));
int grow __P((struct proc *, u_int));
void iprintf __P((const char *, ...));
int kernacc __P((caddr_t, int, int));
int kinfo_loadavg __P((int, char *, int *, int, int *));
int kinfo_meter __P((int, caddr_t, int *, int, int *));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_init __P((vm_offset_t, vm_offset_t));
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
vm_size_t, boolean_t));
void loadav __P((struct loadavg *));
void munmapfd __P((struct proc *, int));
int pager_cache __P((vm_object_t, boolean_t));
void sched __P((void));
int svm_allocate __P((struct proc *, void *, int *));
int svm_deallocate __P((struct proc *, void *, int *));
int svm_inherit __P((struct proc *, void *, int *));
int svm_protect __P((struct proc *, void *, int *));
void swapinit __P((void));
int swapon __P((struct proc *, void *, int *));
void swapout __P((struct proc *));
void swapout_threads __P((void));
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
void thread_block __P((char *));
void thread_sleep __P((int, simple_lock_t, boolean_t));
void thread_wakeup __P((int));
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t,
vm_offset_t *, vm_size_t, boolean_t));
int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
vm_size_t, boolean_t, vm_pager_t, vm_offset_t, boolean_t));
int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fork __P((struct proc *, struct proc *, int));
int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
void vm_init_limits __P((struct proc *));
void vm_mem_init __P((void));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t,
vm_offset_t, vm_offset_t));
int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_free __P((struct vmspace *));
void vmtotal __P((struct vmtotal *));
void vnode_pager_setsize __P((struct vnode *, u_long));
void vnode_pager_umount __P((struct mount *));
boolean_t vnode_pager_uncache __P((struct vnode *));
void vslock __P((caddr_t, u_int));
void vsunlock __P((caddr_t, u_int, int));
#endif /* KERNEL */
void assert_wait __P((int, boolean_t));
int grow __P((struct proc *, u_int));
void iprintf __P((const char *,...));
int kernacc __P((caddr_t, int, int));
int kinfo_loadavg __P((int, char *, int *, int, int *));
int kinfo_meter __P((int, caddr_t, int *, int, int *));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_init __P((vm_offset_t, vm_offset_t));
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, boolean_t));
void loadav __P((struct loadavg *));
void munmapfd __P((struct proc *, int));
int pager_cache __P((vm_object_t, boolean_t));
void sched __P((void));
int svm_allocate __P((struct proc *, void *, int *));
int svm_deallocate __P((struct proc *, void *, int *));
int svm_inherit __P((struct proc *, void *, int *));
int svm_protect __P((struct proc *, void *, int *));
void swapinit __P((void));
int swapon __P((struct proc *, void *, int *));
void swapout __P((struct proc *));
void swapout_threads __P((void));
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
void thread_block __P((char *));
void thread_sleep __P((int, simple_lock_t, boolean_t));
void thread_wakeup __P((int));
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t, boolean_t));
int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *, vm_size_t, boolean_t, vm_pager_t, vm_offset_t, boolean_t));
int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fork __P((struct proc *, struct proc *, int));
int vm_inherit __P((vm_map_t, vm_offset_t, vm_size_t, vm_inherit_t));
void vm_init_limits __P((struct proc *));
void vm_mem_init __P((void));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
int vm_protect __P((vm_map_t, vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_free __P((struct vmspace *));
void vmtotal __P((struct vmtotal *));
void vnode_pager_setsize __P((struct vnode *, u_long));
void vnode_pager_umount __P((struct mount *));
boolean_t vnode_pager_uncache __P((struct vnode *));
void vslock __P((caddr_t, u_int));
void vsunlock __P((caddr_t, u_int, int));
#endif /* !_VM_EXTERN_H_ */
#endif /* KERNEL */
#endif /* !_VM_EXTERN_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -38,17 +38,17 @@
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.9 1994/11/13 12:47:07 davidg Exp $
* $Id: vm_glue.c,v 1.10 1994/12/18 06:31:31 davidg Exp $
*/
#include <sys/param.h>
@ -81,8 +81,9 @@
#include <machine/cpu.h>
extern char kstack[];
int avefree = 0; /* XXX */
int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */
int avefree = 0; /* XXX */
int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */
/* vm_map_t upages_map; */
int
@ -95,9 +96,9 @@ kernacc(addr, len, rw)
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
saddr = trunc_page(addr);
eaddr = round_page(addr+len);
eaddr = round_page(addr + len);
rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
return(rv == TRUE);
return (rv == TRUE);
}
int
@ -111,20 +112,19 @@ useracc(addr, len, rw)
/*
* XXX - check separately to disallow access to user area and user
* page tables - they are in the map.
*
* XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was
* once only used (as an end address) in trap.c. Use it as an end
* address here too. This bogusness has spread. I just fixed
* where it was used as a max in vm_mmap.c.
*
* XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once
* only used (as an end address) in trap.c. Use it as an end address
* here too. This bogusness has spread. I just fixed where it was
* used as a max in vm_mmap.c.
*/
if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
|| (vm_offset_t) addr + len < (vm_offset_t) addr) {
return (FALSE);
}
rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
trunc_page(addr), round_page(addr+len), prot);
return(rv == TRUE);
trunc_page(addr), round_page(addr + len), prot);
return (rv == TRUE);
}
#ifdef KGDB
@ -140,29 +140,29 @@ chgkprot(addr, len, rw)
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
vm_map_protect(kernel_map, trunc_page(addr),
round_page(addr+len), prot, FALSE);
round_page(addr + len), prot, FALSE);
}
#endif
void
vslock(addr, len)
caddr_t addr;
u_int len;
caddr_t addr;
u_int len;
{
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
round_page(addr+len), FALSE);
round_page(addr + len), FALSE);
}
void
vsunlock(addr, len, dirtied)
caddr_t addr;
u_int len;
caddr_t addr;
u_int len;
int dirtied;
{
#ifdef lint
dirtied++;
#endif lint
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
round_page(addr+len), TRUE);
#endif /* lint */
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
round_page(addr + len), TRUE);
}
/*
@ -186,15 +186,17 @@ vm_fork(p1, p2, isvfork)
int i;
struct vm_map *vp;
while( cnt.v_free_count < cnt.v_free_min)
while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
VM_WAIT;
}
/*
* avoid copying any of the parent's pagetables or other per-process
* objects that reside in the map by marking all of them non-inheritable
* objects that reside in the map by marking all of them
* non-inheritable
*/
(void)vm_map_inherit(&p1->p_vmspace->vm_map,
UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
(void) vm_map_inherit(&p1->p_vmspace->vm_map,
UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
#ifdef SYSVSHM
@ -203,7 +205,8 @@ vm_fork(p1, p2, isvfork)
#endif
/*
* Allocate a wired-down (for now) pcb and kernel stack for the process
* Allocate a wired-down (for now) pcb and kernel stack for the
* process
*/
addr = (vm_offset_t) kstack;
@ -211,56 +214,57 @@ vm_fork(p1, p2, isvfork)
vp = &p2->p_vmspace->vm_map;
/* ream out old pagetables and kernel stack */
(void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
(void) vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
/* get new pagetables and kernel stack */
(void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
(void) vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
/* force in the page table encompassing the UPAGES */
ptaddr = trunc_page((u_int)vtopte(addr));
ptaddr = trunc_page((u_int) vtopte(addr));
vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE);
/* and force in (demand-zero) the UPAGES */
vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE);
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *)kmem_alloc_pageable(kernel_map, UPAGES * NBPG);
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * NBPG);
/* and force-map the upages into the kernel pmap */
for (i = 0; i < UPAGES; i++)
pmap_enter(vm_map_pmap(kernel_map),
((vm_offset_t) up) + NBPG * i,
pmap_extract(vp->pmap, addr + NBPG * i),
VM_PROT_READ|VM_PROT_WRITE, 1);
pmap_enter(vm_map_pmap(u_map),
((vm_offset_t) up) + NBPG * i,
pmap_extract(vp->pmap, addr + NBPG * i),
VM_PROT_READ | VM_PROT_WRITE, 1);
/* and allow the UPAGES page table entry to be paged (at the vm system level) */
/*
* and allow the UPAGES page table entry to be paged (at the vm system
* level)
*/
vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
p2->p_addr = up;
/*
* p_stats and p_sigacts currently point at fields
* in the user struct but not at &u, instead at p_addr.
* Copy p_sigacts and parts of p_stats; zero the rest
* of p_stats (statistics).
* p_stats and p_sigacts currently point at fields in the user struct
* but not at &u, instead at p_addr. Copy p_sigacts and parts of
* p_stats; zero the rest of p_stats (statistics).
*/
p2->p_stats = &up->u_stats;
p2->p_sigacts = &up->u_sigacts;
up->u_sigacts = *p1->p_sigacts;
bzero(&up->u_stats.pstat_startzero,
(unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
(caddr_t)&up->u_stats.pstat_startzero));
(unsigned) ((caddr_t) & up->u_stats.pstat_endzero -
(caddr_t) & up->u_stats.pstat_startzero));
bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
((caddr_t)&up->u_stats.pstat_endcopy -
(caddr_t)&up->u_stats.pstat_startcopy));
((caddr_t) & up->u_stats.pstat_endcopy -
(caddr_t) & up->u_stats.pstat_startcopy));
/*
* cpu_fork will copy and update the kernel stack and pcb,
* and make the child ready to run. It marks the child
* so that it can return differently than the parent.
* It returns twice, once in the parent process and
* once in the child.
* cpu_fork will copy and update the kernel stack and pcb, and make
* the child ready to run. It marks the child so that it can return
* differently than the parent. It returns twice, once in the parent
* process and once in the child.
*/
return (cpu_fork(p1, p2));
}
@ -276,27 +280,26 @@ vm_init_limits(p)
int rss_limit;
/*
* Set up the initial limits on process VM.
* Set the maximum resident set size to be half
* of (reasonably) available memory. Since this
* is a soft limit, it comes into effect only
* when the system is out of memory - half of
* main memory helps to favor smaller processes,
* Set up the initial limits on process VM. Set the maximum resident
* set size to be half of (reasonably) available memory. Since this
* is a soft limit, it comes into effect only when the system is out
* of memory - half of main memory helps to favor smaller processes,
* and reduces thrashing of the object cache.
*/
p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
/* limit the limit to no less than 2MB */
p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
/* limit the limit to no less than 2MB */
rss_limit = max(cnt.v_free_count / 2, 512);
p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
}
#ifdef DEBUG
int enableswap = 1;
int swapdebug = 0;
int enableswap = 1;
int swapdebug = 0;
#define SDB_FOLLOW 1
#define SDB_SWAPIN 2
#define SDB_SWAPOUT 4
@ -304,7 +307,7 @@ int swapdebug = 0;
void
faultin(p)
struct proc *p;
struct proc *p;
{
vm_offset_t i;
vm_offset_t ptaddr;
@ -317,22 +320,23 @@ struct proc *p;
map = &p->p_vmspace->vm_map;
/* force the page table encompassing the kernel stack (upages) */
ptaddr = trunc_page((u_int)vtopte(kstack));
ptaddr = trunc_page((u_int) vtopte(kstack));
vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE);
/* wire in the UPAGES */
vm_map_pageable(map, (vm_offset_t) kstack,
(vm_offset_t) kstack + UPAGES * NBPG, FALSE);
(vm_offset_t) kstack + UPAGES * NBPG, FALSE);
/* and map them nicely into the kernel pmap */
for (i = 0; i < UPAGES; i++) {
vm_offset_t off = i * NBPG;
vm_offset_t pa = (vm_offset_t)
pmap_extract(&p->p_vmspace->vm_pmap,
(vm_offset_t) kstack + off);
pmap_enter(vm_map_pmap(kernel_map),
((vm_offset_t)p->p_addr) + off,
pa, VM_PROT_READ|VM_PROT_WRITE, 1);
pmap_extract(&p->p_vmspace->vm_pmap,
(vm_offset_t) kstack + off);
pmap_enter(vm_map_pmap(u_map),
((vm_offset_t) p->p_addr) + off,
pa, VM_PROT_READ | VM_PROT_WRITE, 1);
}
/* and let the page table pages go (at least above pmap level) */
@ -343,18 +347,15 @@ struct proc *p;
if (p->p_stat == SRUN)
setrunqueue(p);
p->p_flag |= P_INMEM;
p->p_flag |= P_INMEM;
/* undo the effect of setting SLOCK above */
--p->p_lock;
splx(s);
}
}
int swapinreq;
int percentactive;
/*
* This swapin algorithm attempts to swap-in processes only if there
* is enough space for them. Of course, if a process waits for a long
@ -367,95 +368,45 @@ scheduler()
register int pri;
struct proc *pp;
int ppri;
int lastidle, lastrun;
int curidle, currun;
int forceload;
int percent;
int ntries;
lastidle = 0;
lastrun = 0;
loop:
ntries = 0;
while ((cnt.v_free_count + cnt.v_cache_count) < (cnt.v_free_reserved + UPAGES + 2)) {
VM_WAIT;
tsleep((caddr_t) & proc0, PVM, "schedm", 0);
}
curidle = cp_time[CP_IDLE];
currun = cp_time[CP_USER] + cp_time[CP_SYS] + cp_time[CP_NICE];
percent = (100*(currun-lastrun)) / ( 1 + (currun-lastrun) + (curidle-lastidle));
lastrun = currun;
lastidle = curidle;
if( percent > 100)
percent = 100;
percentactive = percent;
if( percentactive < 25)
forceload = 1;
else
forceload = 0;
loop1:
pp = NULL;
ppri = INT_MIN;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
if (p->p_stat == SRUN && (p->p_flag & (P_INMEM|P_SWAPPING)) == 0) {
for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
if (p->p_stat == SRUN && (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
int mempri;
pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
mempri = pri > 0 ? pri : 0;
/*
/*
* if this process is higher priority and there is
* enough space, then select this process instead
* of the previous selection.
* enough space, then select this process instead of
* the previous selection.
*/
if (pri > ppri &&
(((cnt.v_free_count + (mempri * (4*PAGE_SIZE) / PAGE_SIZE) >= (p->p_vmspace->vm_swrss)) || (ntries > 0 && forceload)))) {
if (pri > ppri) {
pp = p;
ppri = pri;
}
}
}
if ((pp == NULL) && (ntries == 0) && forceload) {
++ntries;
goto loop1;
}
/*
* Nothing to do, back to sleep
*/
if ((p = pp) == NULL) {
tsleep((caddr_t)&proc0, PVM, "sched", 0);
tsleep((caddr_t) & proc0, PVM, "sched", 0);
goto loop;
}
/*
* We would like to bring someone in. (only if there is space).
*/
/*
printf("swapin: %d, free: %d, res: %d, min: %d\n",
p->p_pid, cnt.v_free_count, cnt.v_free_reserved, cnt.v_free_min);
*/
(void) splhigh();
if ((forceload && (cnt.v_free_count > (cnt.v_free_reserved + UPAGES + 1))) ||
(cnt.v_free_count >= cnt.v_free_min)) {
spl0();
faultin(p);
p->p_swtime = 0;
goto loop;
}
/*
* log the memory shortage
*/
swapinreq += p->p_vmspace->vm_swrss;
/*
* Not enough memory, jab the pageout daemon and wait til the
* coast is clear.
*/
if( cnt.v_free_count < cnt.v_free_min) {
VM_WAIT;
} else {
tsleep((caddr_t)&proc0, PVM, "sched", 0);
}
(void) spl0();
faultin(p);
p->p_swtime = 0;
goto loop;
}
@ -464,6 +415,7 @@ scheduler()
((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM)
extern int vm_pageout_free_min;
/*
* Swapout is driven by the pageout daemon. Very simple, we find eligible
* procs and unwire their u-areas. We try to always "swap" at least one
@ -480,98 +432,58 @@ swapout_threads()
int outpri, outpri2;
int tpri;
int didswap = 0;
int swapneeded = swapinreq;
extern int maxslp;
int runnablenow;
runnablenow = 0;
outp = outp2 = NULL;
outpri = outpri2 = INT_MIN;
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
retry:
for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
if (!swappable(p))
continue;
switch (p->p_stat) {
case SRUN:
++runnablenow;
/*
* count the process as being in a runnable state
*/
if ((tpri = p->p_swtime + p->p_nice * 8) > outpri2) {
outp2 = p;
outpri2 = tpri;
}
default:
continue;
case SSLEEP:
case SSTOP:
/*
* do not swapout a realtime process
*/
if (p->p_rtprio.type == RTP_PRIO_REALTIME)
continue;
* do not swapout a realtime process
*/
if (p->p_rtprio.type == RTP_PRIO_REALTIME)
continue;
/*
* do not swapout a process that is waiting for VM datastructures
* there is a possible deadlock.
* do not swapout a process waiting on a critical
* event of some kind
*/
if (!lock_try_write( &p->p_vmspace->vm_map.lock)) {
if ((p->p_priority & 0x7f) < PSOCK)
continue;
/*
* do not swapout a process that is waiting for VM
* datastructures there is a possible deadlock.
*/
if (!lock_try_write(&p->p_vmspace->vm_map.lock)) {
continue;
}
vm_map_unlock( &p->p_vmspace->vm_map);
vm_map_unlock(&p->p_vmspace->vm_map);
/*
* If the process has been asleep for awhile and had most
* of its pages taken away already, swap it out.
* If the process has been asleep for awhile and had
* most of its pages taken away already, swap it out.
*/
if (p->p_slptime > maxslp) {
swapout(p);
didswap++;
} else if ((tpri = p->p_slptime + p->p_nice * 8) > outpri &&
(p->p_vmspace->vm_pmap.pm_stats.resident_count <= 6)) {
outp = p;
outpri = tpri ;
goto retry;
}
continue;
}
}
/*
* We swapout only if there are more than two runnable processes or if
* another process needs some space to swapin.
*/
if ((swapinreq || ((percentactive > 90) && (runnablenow > 2))) &&
(((cnt.v_free_count + cnt.v_inactive_count) <= (cnt.v_free_target + cnt.v_inactive_target)) ||
(cnt.v_free_count < cnt.v_free_min))) {
if ((p = outp) == 0) {
p = outp2;
}
/*
* Only swapout processes that have already had most
* of their pages taken away.
*/
if (p && (p->p_vmspace->vm_pmap.pm_stats.resident_count <= 6)) {
swapout(p);
didswap = 1;
}
}
/*
* if we previously had found a process to swapout, and we need to swapout
* more then try again.
*/
#if 0
if( p && swapinreq)
goto swapmore;
#endif
/*
* If we swapped something out, and another process needed memory,
* then wakeup the sched process.
*/
if (didswap) {
if (swapneeded)
wakeup((caddr_t)&proc0);
swapinreq = 0;
}
if (didswap)
wakeup((caddr_t) & proc0);
}
void
@ -585,11 +497,7 @@ swapout(p)
* remember the process resident count
*/
p->p_vmspace->vm_swrss =
p->p_vmspace->vm_pmap.pm_stats.resident_count;
/*
* and decrement the amount of needed space
*/
swapinreq -= min(swapinreq, p->p_vmspace->vm_pmap.pm_stats.resident_count);
p->p_vmspace->vm_pmap.pm_stats.resident_count;
(void) splhigh();
p->p_flag &= ~P_INMEM;
@ -598,12 +506,14 @@ swapout(p)
(void) spl0();
p->p_flag |= P_SWAPPING;
/* let the upages be paged */
pmap_remove(vm_map_pmap(kernel_map),
(vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
/*
* let the upages be paged
*/
pmap_remove(vm_map_pmap(u_map),
(vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
vm_map_pageable(map, (vm_offset_t) kstack,
(vm_offset_t) kstack + UPAGES * NBPG, TRUE);
(vm_offset_t) kstack + UPAGES * NBPG, TRUE);
p->p_flag &= ~P_SWAPPING;
p->p_swtime = 0;
@ -630,7 +540,7 @@ void
thread_block(char *msg)
{
if (curproc->p_thread)
tsleep((caddr_t)curproc->p_thread, PVM, msg, 0);
tsleep((caddr_t) curproc->p_thread, PVM, msg, 0);
}
@ -644,7 +554,7 @@ thread_sleep_(event, lock, wmesg)
curproc->p_thread = event;
simple_unlock(lock);
if (curproc->p_thread) {
tsleep((caddr_t)event, PVM, wmesg, 0);
tsleep((caddr_t) event, PVM, wmesg, 0);
}
}
@ -653,7 +563,7 @@ void
thread_wakeup(event)
int event;
{
wakeup((caddr_t)event);
wakeup((caddr_t) event);
}
#endif
@ -663,16 +573,17 @@ thread_wakeup(event)
int indent = 0;
#include <machine/stdarg.h> /* see subr_prf.c */
#include <machine/stdarg.h> /* see subr_prf.c */
/*ARGSUSED2*/
void
#if __STDC__
iprintf(const char *fmt, ...)
iprintf(const char *fmt,...)
#else
iprintf(fmt /* , va_alist */)
iprintf(fmt /* , va_alist */ )
char *fmt;
/* va_dcl */
/* va_dcl */
#endif
{
register int i;

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_inherit.h,v 1.2 1994/08/02 07:55:20 davidg Exp $
*/
/*
@ -82,4 +82,4 @@
#define VM_INHERIT_DEFAULT VM_INHERIT_COPY
#endif /* _VM_INHERIT_ */
#endif /* _VM_INHERIT_ */

View File

@ -1,5 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -41,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -62,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_init.c,v 1.3 1994/08/02 07:55:21 davidg Exp $
* $Id: vm_init.c,v 1.4 1994/10/09 01:52:09 phk Exp $
*/
/*
@ -86,13 +85,12 @@
void
vm_mem_init()
{
extern vm_offset_t avail_start, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
extern vm_offset_t avail_start, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
/*
* Initializes resident memory structures.
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
* Initializes resident memory structures. From here on, all physical
* memory is accounted for, and we use only virtual addresses.
*/
vm_set_page_size();

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.6 1994/08/07 14:53:26 davidg Exp $
* $Id: vm_kern.c,v 1.7 1994/08/18 22:36:02 wollman Exp $
*/
/*
@ -78,14 +78,16 @@
#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
vm_map_t buffer_map;
vm_map_t kernel_map;
vm_map_t kmem_map;
vm_map_t mb_map;
vm_map_t io_map;
vm_map_t clean_map;
vm_map_t pager_map;
vm_map_t phys_map;
vm_map_t buffer_map;
vm_map_t kernel_map;
vm_map_t kmem_map;
vm_map_t mb_map;
vm_map_t io_map;
vm_map_t clean_map;
vm_map_t pager_map;
vm_map_t phys_map;
vm_map_t exec_map;
vm_map_t u_map;
/*
* kmem_alloc_pageable:
@ -94,12 +96,13 @@ vm_map_t phys_map;
* map must be "kernel_map" below.
*/
vm_offset_t kmem_alloc_pageable(map, size)
vm_map_t map;
register vm_size_t size;
vm_offset_t
kmem_alloc_pageable(map, size)
vm_map_t map;
register vm_size_t size;
{
vm_offset_t addr;
register int result;
vm_offset_t addr;
register int result;
#if 0
if (map != kernel_map)
@ -110,38 +113,37 @@ vm_offset_t kmem_alloc_pageable(map, size)
addr = vm_map_min(map);
result = vm_map_find(map, NULL, (vm_offset_t) 0,
&addr, size, TRUE);
&addr, size, TRUE);
if (result != KERN_SUCCESS) {
return(0);
return (0);
}
return(addr);
return (addr);
}
/*
* Allocate wired-down memory in the kernel's address map
* or a submap.
*/
vm_offset_t kmem_alloc(map, size)
register vm_map_t map;
register vm_size_t size;
vm_offset_t
kmem_alloc(map, size)
register vm_map_t map;
register vm_size_t size;
{
vm_offset_t addr;
register vm_offset_t offset;
vm_offset_t i;
vm_offset_t addr;
register vm_offset_t offset;
vm_offset_t i;
size = round_page(size);
/*
* Use the kernel object for wired-down kernel pages.
* Assume that no region of the kernel object is
* referenced more than once.
* Use the kernel object for wired-down kernel pages. Assume that no
* region of the kernel object is referenced more than once.
*/
/*
* Locate sufficient space in the map. This will give us the
* final virtual address for the new memory, and thus will tell
* us the offset within the kernel map.
* Locate sufficient space in the map. This will give us the final
* virtual address for the new memory, and thus will tell us the
* offset within the kernel map.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr)) {
@ -154,56 +156,50 @@ vm_offset_t kmem_alloc(map, size)
vm_map_unlock(map);
/*
* Guarantee that there are pages already in this object
* before calling vm_map_pageable. This is to prevent the
* following scenario:
*
* 1) Threads have swapped out, so that there is a
* pager for the kernel_object.
* 2) The kmsg zone is empty, and so we are kmem_allocing
* a new page for it.
* 3) vm_map_pageable calls vm_fault; there is no page,
* but there is a pager, so we call
* pager_data_request. But the kmsg zone is empty,
* so we must kmem_alloc.
* 4) goto 1
* 5) Even if the kmsg zone is not empty: when we get
* the data back from the pager, it will be (very
* stale) non-zero data. kmem_alloc is defined to
* return zero-filled memory.
*
* We're intentionally not activating the pages we allocate
* to prevent a race with page-out. vm_map_pageable will wire
* the pages.
* Guarantee that there are pages already in this object before
* calling vm_map_pageable. This is to prevent the following
* scenario:
*
* 1) Threads have swapped out, so that there is a pager for the
* kernel_object. 2) The kmsg zone is empty, and so we are
* kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
* there is no page, but there is a pager, so we call
* pager_data_request. But the kmsg zone is empty, so we must
* kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
* we get the data back from the pager, it will be (very stale)
* non-zero data. kmem_alloc is defined to return zero-filled memory.
*
* We're intentionally not activating the pages we allocate to prevent a
* race with page-out. vm_map_pageable will wire the pages.
*/
vm_object_lock(kernel_object);
for (i = 0 ; i < size; i+= PAGE_SIZE) {
vm_page_t mem;
for (i = 0; i < size; i += PAGE_SIZE) {
vm_page_t mem;
while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) {
while ((mem = vm_page_alloc(kernel_object, offset + i, 0)) == NULL) {
vm_object_unlock(kernel_object);
VM_WAIT;
vm_object_lock(kernel_object);
}
vm_page_zero_fill(mem);
mem->flags &= ~PG_BUSY;
mem->valid |= VM_PAGE_BITS_ALL;
}
vm_object_unlock(kernel_object);
/*
* And finally, mark the data as non-pageable.
* And finally, mark the data as non-pageable.
*/
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
/*
* Try to coalesce the map
* Try to coalesce the map
*/
vm_map_simplify(map, addr);
return(addr);
return (addr);
}
/*
@ -213,10 +209,11 @@ vm_offset_t kmem_alloc(map, size)
* with kmem_alloc, and return the physical pages
* associated with that region.
*/
void kmem_free(map, addr, size)
vm_map_t map;
register vm_offset_t addr;
vm_size_t size;
void
kmem_free(map, addr, size)
vm_map_t map;
register vm_offset_t addr;
vm_size_t size;
{
(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
}
@ -234,20 +231,21 @@ void kmem_free(map, addr, size)
* min, max Returned endpoints of map
* pageable Can the region be paged
*/
vm_map_t kmem_suballoc(parent, min, max, size, pageable)
register vm_map_t parent;
vm_offset_t *min, *max;
register vm_size_t size;
boolean_t pageable;
vm_map_t
kmem_suballoc(parent, min, max, size, pageable)
register vm_map_t parent;
vm_offset_t *min, *max;
register vm_size_t size;
boolean_t pageable;
{
register int ret;
vm_map_t result;
register int ret;
vm_map_t result;
size = round_page(size);
*min = (vm_offset_t) vm_map_min(parent);
ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
min, size, TRUE);
min, size, TRUE);
if (ret != KERN_SUCCESS) {
printf("kmem_suballoc: bad status return of %d.\n", ret);
panic("kmem_suballoc");
@ -259,7 +257,7 @@ vm_map_t kmem_suballoc(parent, min, max, size, pageable)
panic("kmem_suballoc: cannot create submap");
if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
panic("kmem_suballoc: unable to change range to submap");
return(result);
return (result);
}
/*
@ -280,14 +278,14 @@ vm_map_t kmem_suballoc(parent, min, max, size, pageable)
*/
vm_offset_t
kmem_malloc(map, size, canwait)
register vm_map_t map;
register vm_size_t size;
boolean_t canwait;
register vm_map_t map;
register vm_size_t size;
boolean_t canwait;
{
register vm_offset_t offset, i;
vm_map_entry_t entry;
vm_offset_t addr;
vm_page_t m;
register vm_offset_t offset, i;
vm_map_entry_t entry;
vm_offset_t addr;
vm_page_t m;
if (map != kmem_map && map != mb_map)
panic("kern_malloc_alloc: map != {kmem,mb}_map");
@ -296,15 +294,15 @@ kmem_malloc(map, size, canwait)
addr = vm_map_min(map);
/*
* Locate sufficient space in the map. This will give us the
* final virtual address for the new memory, and thus will tell
* us the offset within the kernel map.
* Locate sufficient space in the map. This will give us the final
* virtual address for the new memory, and thus will tell us the
* offset within the kernel map.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr)) {
vm_map_unlock(map);
#if 0
if (canwait) /* XXX should wait */
if (canwait) /* XXX should wait */
panic("kmem_malloc: %s too small",
map == kmem_map ? "kmem_map" : "mb_map");
#endif
@ -317,29 +315,28 @@ kmem_malloc(map, size, canwait)
vm_map_insert(map, kmem_object, offset, addr, addr + size);
/*
* If we can wait, just mark the range as wired
* (will fault pages as necessary).
* If we can wait, just mark the range as wired (will fault pages as
* necessary).
*/
if (canwait) {
vm_map_unlock(map);
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
FALSE);
FALSE);
vm_map_simplify(map, addr);
return(addr);
return (addr);
}
/*
* If we cannot wait then we must allocate all memory up front,
* pulling it off the active queue to prevent pageout.
*/
vm_object_lock(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_alloc(kmem_object, offset + i);
m = vm_page_alloc(kmem_object, offset + i, 1);
/*
* Ran out of space, free everything up and return.
* Don't need to lock page queues here as we know
* that the pages we got aren't on any queues.
* Ran out of space, free everything up and return. Don't need
* to lock page queues here as we know that the pages we got
* aren't on any queues.
*/
if (m == NULL) {
while (i != 0) {
@ -350,20 +347,21 @@ kmem_malloc(map, size, canwait)
vm_object_unlock(kmem_object);
vm_map_delete(map, addr, addr + size);
vm_map_unlock(map);
return(0);
return (0);
}
#if 0
vm_page_zero_fill(m);
#endif
m->flags &= ~PG_BUSY;
m->valid |= VM_PAGE_BITS_ALL;
}
vm_object_unlock(kmem_object);
/*
* Mark map entry as non-pageable.
* Assert: vm_map_insert() will never be able to extend the previous
* entry so there will be a new entry exactly corresponding to this
* address range and it will have wired_count == 0.
* Mark map entry as non-pageable. Assert: vm_map_insert() will never
* be able to extend the previous entry so there will be a new entry
* exactly corresponding to this address range and it will have
* wired_count == 0.
*/
if (!vm_map_lookup_entry(map, addr, &entry) ||
entry->start != addr || entry->end != addr + size ||
@ -372,20 +370,20 @@ kmem_malloc(map, size, canwait)
entry->wired_count++;
/*
* Loop thru pages, entering them in the pmap.
* (We cannot add them to the wired count without
* wrapping the vm_page_queue_lock in splimp...)
* Loop thru pages, entering them in the pmap. (We cannot add them to
* the wired count without wrapping the vm_page_queue_lock in
* splimp...)
*/
for (i = 0; i < size; i += PAGE_SIZE) {
vm_object_lock(kmem_object);
m = vm_page_lookup(kmem_object, offset + i);
vm_object_unlock(kmem_object);
pmap_kenter( addr + i, VM_PAGE_TO_PHYS(m));
pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
}
vm_map_unlock(map);
vm_map_simplify(map, addr);
return(addr);
return (addr);
}
/*
@ -395,18 +393,19 @@ kmem_malloc(map, size, canwait)
* has no room, the caller sleeps waiting for more memory in the submap.
*
*/
vm_offset_t kmem_alloc_wait(map, size)
vm_map_t map;
vm_size_t size;
vm_offset_t
kmem_alloc_wait(map, size)
vm_map_t map;
vm_size_t size;
{
vm_offset_t addr;
vm_offset_t addr;
size = round_page(size);
for (;;) {
/*
* To make this work for more than one map,
* use the map's lock to lock out sleepers/wakers.
* To make this work for more than one map, use the map's lock
* to lock out sleepers/wakers.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr) == 0)
@ -416,11 +415,11 @@ vm_offset_t kmem_alloc_wait(map, size)
vm_map_unlock(map);
return (0);
}
assert_wait((int)map, TRUE);
assert_wait((int) map, TRUE);
vm_map_unlock(map);
thread_block("kmaw");
}
vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size);
vm_map_unlock(map);
return (addr);
}
@ -431,14 +430,15 @@ vm_offset_t kmem_alloc_wait(map, size)
* Returns memory to a submap of the kernel, and wakes up any threads
* waiting for memory in that map.
*/
void kmem_free_wakeup(map, addr, size)
vm_map_t map;
vm_offset_t addr;
vm_size_t size;
void
kmem_free_wakeup(map, addr, size)
vm_map_t map;
vm_offset_t addr;
vm_size_t size;
{
vm_map_lock(map);
(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
thread_wakeup((int)map);
thread_wakeup((int) map);
vm_map_unlock(map);
}
@ -448,7 +448,8 @@ void kmem_free_wakeup(map, addr, size)
* map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
* the range between `start' and `end' as free.
*/
void kmem_init(start, end)
void
kmem_init(start, end)
vm_offset_t start, end;
{
register vm_map_t m;
@ -457,7 +458,7 @@ void kmem_init(start, end)
vm_map_lock(m);
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
kernel_map = m;
(void) vm_map_insert(m, NULL, (vm_offset_t)0,
(void) vm_map_insert(m, NULL, (vm_offset_t) 0,
VM_MIN_KERNEL_ADDRESS, start);
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,20 +61,24 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.h,v 1.3 1994/08/02 07:55:23 davidg Exp $
* $Id: vm_kern.h,v 1.4 1994/08/18 22:36:03 wollman Exp $
*/
#ifndef _VM_VM_KERN_H_
#define _VM_VM_KERN_H_ 1
/* Kernel memory management definitions. */
extern vm_map_t buffer_map;
extern vm_map_t kernel_map;
extern vm_map_t kmem_map;
extern vm_map_t mb_map;
extern vm_map_t io_map;
extern vm_map_t clean_map;
extern vm_map_t pager_map;
extern vm_map_t phys_map;
extern vm_map_t buffer_map;
extern vm_map_t kernel_map;
extern vm_map_t kmem_map;
extern vm_map_t mb_map;
extern vm_map_t io_map;
extern vm_map_t clean_map;
extern vm_map_t pager_map;
extern vm_map_t phys_map;
extern vm_map_t exec_map;
extern vm_map_t u_map;
#endif /* _VM_VM_KERN_H_ */
extern vm_offset_t kernel_vm_end;
#endif /* _VM_VM_KERN_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_map.h,v 1.3 1994/08/02 07:55:26 davidg Exp $
*/
/*
@ -86,9 +86,9 @@
*/
union vm_map_object {
struct vm_object *vm_object; /* object object */
struct vm_map *share_map; /* share map */
struct vm_map *sub_map; /* belongs to another map */
struct vm_object *vm_object; /* object object */
struct vm_map *share_map; /* share map */
struct vm_map *sub_map; /* belongs to another map */
};
/*
@ -98,22 +98,22 @@ union vm_map_object {
* Also included is control information for virtual copy operations.
*/
struct vm_map_entry {
struct vm_map_entry *prev; /* previous entry */
struct vm_map_entry *next; /* next entry */
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
boolean_t is_a_map:1, /* Is "object" a map? */
is_sub_map:1, /* Is "object" a submap? */
/* Only in sharing maps: */
copy_on_write:1,/* is data copy-on-write */
needs_copy:1; /* does object need to be copied */
/* Only in task maps: */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
struct vm_map_entry *prev; /* previous entry */
struct vm_map_entry *next; /* next entry */
vm_offset_t start; /* start address */
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
boolean_t is_a_map:1, /* Is "object" a map? */
is_sub_map:1, /* Is "object" a submap? */
/* Only in sharing maps: */
copy_on_write:1, /* is data copy-on-write */
needs_copy:1; /* does object need to be copied */
/* Only in task maps: */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
};
/*
@ -123,19 +123,19 @@ struct vm_map_entry {
* insertion, or removal.
*/
struct vm_map {
struct pmap * pmap; /* Physical map */
lock_data_t lock; /* Lock for map data */
struct vm_map_entry header; /* List of entries */
int nentries; /* Number of entries */
vm_size_t size; /* virtual size */
boolean_t is_main_map; /* Am I a main map? */
int ref_count; /* Reference count */
simple_lock_data_t ref_lock; /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
simple_lock_data_t hint_lock; /* lock for hint storage */
vm_map_entry_t first_free; /* First free space hint */
boolean_t entries_pageable; /* map entries pageable?? */
unsigned int timestamp; /* Version number */
struct pmap *pmap; /* Physical map */
lock_data_t lock; /* Lock for map data */
struct vm_map_entry header; /* List of entries */
int nentries; /* Number of entries */
vm_size_t size; /* virtual size */
boolean_t is_main_map; /* Am I a main map? */
int ref_count; /* Reference count */
simple_lock_data_t ref_lock; /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
simple_lock_data_t hint_lock; /* lock for hint storage */
vm_map_entry_t first_free; /* First free space hint */
boolean_t entries_pageable; /* map entries pageable?? */
unsigned int timestamp; /* Version number */
#define min_offset header.start
#define max_offset header.end
};
@ -150,9 +150,9 @@ struct vm_map {
* does not include a reference for the imbedded share_map.]
*/
typedef struct {
int main_timestamp;
vm_map_t share_map;
int share_timestamp;
int main_timestamp;
vm_map_t share_map;
int share_timestamp;
} vm_map_version_t;
/*
@ -181,50 +181,36 @@ typedef struct {
#define MAX_KMAPENT 128
#ifdef KERNEL
boolean_t vm_map_check_protection __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_prot_t));
int vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t,
vm_size_t, vm_offset_t, boolean_t, boolean_t));
void vm_map_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
int vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, boolean_t, boolean_t));
void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t));
struct pmap;
vm_map_t vm_map_create __P((struct pmap *,
vm_offset_t, vm_offset_t, boolean_t));
void vm_map_deallocate __P((vm_map_t));
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
vm_map_entry_t vm_map_entry_create __P((vm_map_t));
void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
int vm_map_find __P((vm_map_t, vm_object_t,
vm_offset_t, vm_offset_t *, vm_size_t, boolean_t));
int vm_map_findspace __P((vm_map_t,
vm_offset_t, vm_size_t, vm_offset_t *));
int vm_map_inherit __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init __P((struct vm_map *,
vm_offset_t, vm_offset_t, boolean_t));
int vm_map_insert __P((vm_map_t,
vm_object_t, vm_offset_t, vm_offset_t, vm_offset_t));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t,
vm_map_entry_t *, vm_object_t *, vm_offset_t *, vm_prot_t *,
boolean_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry __P((vm_map_t,
vm_offset_t, vm_map_entry_t *));
int vm_map_pageable __P((vm_map_t,
vm_offset_t, vm_offset_t, boolean_t));
int vm_map_clean __P((vm_map_t,
vm_offset_t, vm_offset_t, boolean_t, boolean_t));
void vm_map_print __P((vm_map_t, boolean_t));
int vm_map_protect __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_map_reference __P((vm_map_t));
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_map_simplify __P((vm_map_t, vm_offset_t));
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_map_startup __P((void));
int vm_map_submap __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_map_t));
vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t, boolean_t));
void vm_map_deallocate __P((vm_map_t));
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
vm_map_entry_t vm_map_entry_create __P((vm_map_t));
void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
int vm_map_find __P((vm_map_t, vm_object_t, vm_offset_t, vm_offset_t *, vm_size_t, boolean_t));
int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t, boolean_t));
int vm_map_insert __P((vm_map_t, vm_object_t, vm_offset_t, vm_offset_t, vm_offset_t));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
vm_offset_t *, vm_prot_t *, boolean_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
void vm_map_print __P((vm_map_t, boolean_t));
int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_map_reference __P((vm_map_t));
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_map_simplify __P((vm_map_t, vm_offset_t));
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_map_startup __P((void));
int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
#endif
#endif /* _VM_MAP_ */
#endif /* _VM_MAP_ */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
* $Id: vm_meter.c,v 1.3 1994/08/02 07:55:27 davidg Exp $
* $Id: vm_meter.c,v 1.4 1994/09/12 11:38:31 davidg Exp $
*/
#include <sys/param.h>
@ -41,9 +41,9 @@
#include <vm/vm.h>
#include <sys/sysctl.h>
struct loadavg averunnable; /* load average, of runnable procs */
struct loadavg averunnable; /* load average, of runnable procs */
int maxslp = MAXSLP;
int maxslp = MAXSLP;
void
vmmeter()
@ -51,15 +51,15 @@ vmmeter()
if (time.tv_sec % 5 == 0)
loadav(&averunnable);
if (proc0.p_slptime > maxslp/2)
wakeup((caddr_t)&proc0);
if (proc0.p_slptime > maxslp / 2)
wakeup((caddr_t) & proc0);
}
/*
* Constants for averages over 1, 5, and 15 minutes
* when sampling at 5 second intervals.
*/
fixpt_t cexp[3] = {
fixpt_t cexp[3] = {
0.9200444146293232 * FSCALE, /* exp(-1/12) */
0.9834714538216174 * FSCALE, /* exp(-1/60) */
0.9944598480048967 * FSCALE, /* exp(-1/180) */
@ -76,7 +76,7 @@ loadav(avg)
register int i, nrun;
register struct proc *p;
for (nrun = 0, p = (struct proc *)allproc; p != NULL; p = p->p_next) {
for (nrun = 0, p = (struct proc *) allproc; p != NULL; p = p->p_next) {
switch (p->p_stat) {
case SSLEEP:
if (p->p_priority > PZERO || p->p_slptime != 0)
@ -89,7 +89,7 @@ loadav(avg)
}
for (i = 0; i < 3; i++)
avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
}
/*
@ -109,17 +109,32 @@ vm_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
/* all sysctl names at this level are terminal */
if (namelen != 1)
return (ENOTDIR); /* overloaded */
return (ENOTDIR); /* overloaded */
switch (name[0]) {
case VM_LOADAVG:
averunnable.fscale = FSCALE;
return (sysctl_rdstruct(oldp, oldlenp, newp, &averunnable,
sizeof(averunnable)));
sizeof(averunnable)));
case VM_METER:
vmtotal(&vmtotals);
return (sysctl_rdstruct(oldp, oldlenp, newp, &vmtotals,
sizeof(vmtotals)));
sizeof(vmtotals)));
case VM_V_FREE_MIN:
return (sysctl_int(oldp, oldlenp, newp, newlen, &cnt.v_free_min));
case VM_V_FREE_TARGET:
return (sysctl_int(oldp, oldlenp, newp, newlen, &cnt.v_free_target));
case VM_V_FREE_RESERVED:
return (sysctl_int(oldp, oldlenp, newp, newlen, &cnt.v_free_reserved));
case VM_V_INACTIVE_TARGET:
return (sysctl_int(oldp, oldlenp, newp, newlen, &cnt.v_inactive_target));
case VM_V_CACHE_MIN:
return (sysctl_int(oldp, oldlenp, newp, newlen, &cnt.v_cache_min));
case VM_V_CACHE_MAX:
return (sysctl_int(oldp, oldlenp, newp, newlen, &cnt.v_cache_max));
case VM_V_PAGEOUT_FREE_MIN:
return (sysctl_int(oldp, oldlenp, newp, newlen, &cnt.v_pageout_free_min));
default:
return (EOPNOTSUPP);
}
@ -135,7 +150,7 @@ vmtotal(totalp)
register struct vmtotal *totalp;
{
register struct proc *p;
register vm_map_entry_t entry;
register vm_map_entry_t entry;
register vm_object_t object;
register vm_map_t map;
int paging;
@ -146,14 +161,14 @@ vmtotal(totalp)
*/
simple_lock(&vm_object_list_lock);
for (object = vm_object_list.tqh_first;
object != NULL;
object = object->object_list.tqe_next)
object != NULL;
object = object->object_list.tqe_next)
object->flags &= ~OBJ_ACTIVE;
simple_unlock(&vm_object_list_lock);
/*
* Calculate process statistics.
*/
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
if (p->p_flag & P_SYSTEM)
continue;
switch (p->p_stat) {
@ -188,7 +203,7 @@ vmtotal(totalp)
*/
paging = 0;
for (map = &p->p_vmspace->vm_map, entry = map->header.next;
entry != &map->header; entry = entry->next) {
entry != &map->header; entry = entry->next) {
if (entry->is_a_map || entry->is_sub_map ||
entry->object.vm_object == NULL)
continue;
@ -203,8 +218,8 @@ vmtotal(totalp)
*/
simple_lock(&vm_object_list_lock);
for (object = vm_object_list.tqh_first;
object != NULL;
object = object->object_list.tqe_next) {
object != NULL;
object = object->object_list.tqe_next) {
totalp->t_vm += num_pages(object->size);
totalp->t_rm += object->resident_page_count;
if (object->flags & OBJ_ACTIVE) {
@ -221,5 +236,5 @@ vmtotal(totalp)
}
}
}
totalp->t_free = cnt.v_free_count;
totalp->t_free = cnt.v_free_count + cnt.v_cache_count;
}

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.6 1994/09/02 15:06:51 davidg Exp $
* $Id: vm_mmap.c,v 1.7 1994/10/09 01:52:11 phk Exp $
*/
/*
@ -63,6 +63,7 @@
#ifdef DEBUG
int mmapdebug = 0;
#define MDB_FOLLOW 0x01
#define MDB_SYNC 0x02
#define MDB_MAPIT 0x04
@ -71,8 +72,9 @@ int mmapdebug = 0;
void pmap_object_init_pt();
struct sbrk_args {
int incr;
int incr;
};
/* ARGSUSED */
int
sbrk(p, uap, retval)
@ -86,8 +88,9 @@ sbrk(p, uap, retval)
}
struct sstk_args {
int incr;
int incr;
};
/* ARGSUSED */
int
sstk(p, uap, retval)
@ -102,8 +105,9 @@ sstk(p, uap, retval)
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
struct getpagesize_args {
int dummy;
int dummy;
};
/* ARGSUSED */
int
ogetpagesize(p, uap, retval)
@ -115,16 +119,16 @@ ogetpagesize(p, uap, retval)
*retval = PAGE_SIZE;
return (0);
}
#endif /* COMPAT_43 || COMPAT_SUNOS */
#endif /* COMPAT_43 || COMPAT_SUNOS */
struct mmap_args {
caddr_t addr;
size_t len;
int prot;
int flags;
int fd;
long pad;
off_t pos;
caddr_t addr;
size_t len;
int prot;
int flags;
int fd;
long pad;
off_t pos;
};
int
@ -147,21 +151,21 @@ mmap(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n",
p->p_pid, uap->addr, uap->len, prot,
flags, uap->fd, (vm_offset_t)uap->pos);
p->p_pid, uap->addr, uap->len, prot,
flags, uap->fd, (vm_offset_t) uap->pos);
#endif
/*
* Address (if FIXED) must be page aligned.
* Size is implicitly rounded to a page boundary.
* Address (if FIXED) must be page aligned. Size is implicitly rounded
* to a page boundary.
*/
addr = (vm_offset_t) uap->addr;
if (((flags & MAP_FIXED) && (addr & PAGE_MASK)) ||
(ssize_t)uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1))
(ssize_t) uap->len < 0 || ((flags & MAP_ANON) && uap->fd != -1))
return (EINVAL);
size = (vm_size_t) round_page(uap->len);
/*
* Check for illegal addresses. Watch out for address wrap...
* Note that VM_*_ADDRESS are not constants due to casts (argh).
* Check for illegal addresses. Watch out for address wrap... Note
* that VM_*_ADDRESS are not constants due to casts (argh).
*/
if (flags & MAP_FIXED) {
if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
@ -174,11 +178,10 @@ mmap(p, uap, retval)
return (EINVAL);
}
/*
* XXX if no hint provided for a non-fixed mapping place it after
* the end of the largest possible heap.
*
* There should really be a pmap call to determine a reasonable
* location.
* XXX if no hint provided for a non-fixed mapping place it after the
* end of the largest possible heap.
*
* There should really be a pmap call to determine a reasonable location.
*/
if (addr == 0 && (flags & MAP_FIXED) == 0)
addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
@ -190,20 +193,20 @@ mmap(p, uap, retval)
maxprot = VM_PROT_ALL;
} else {
/*
* Mapping file, get fp for validation.
* Obtain vnode and make sure it is of appropriate type.
* Mapping file, get fp for validation. Obtain vnode and make
* sure it is of appropriate type.
*/
if (((unsigned)uap->fd) >= fdp->fd_nfiles ||
if (((unsigned) uap->fd) >= fdp->fd_nfiles ||
(fp = fdp->fd_ofiles[uap->fd]) == NULL)
return (EBADF);
if (fp->f_type != DTYPE_VNODE)
return (EINVAL);
vp = (struct vnode *)fp->f_data;
vp = (struct vnode *) fp->f_data;
if (vp->v_type != VREG && vp->v_type != VCHR)
return (EINVAL);
/*
* XXX hack to handle use of /dev/zero to map anon
* memory (ala SunOS).
* XXX hack to handle use of /dev/zero to map anon memory (ala
* SunOS).
*/
if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
handle = NULL;
@ -216,8 +219,8 @@ mmap(p, uap, retval)
* writability if mapping is shared; in this case,
* current and max prot are dictated by the open file.
* XXX use the vnode instead? Problem is: what
* credentials do we use for determination?
* What if proc does a setuid?
* credentials do we use for determination? What if
* proc does a setuid?
*/
maxprot = VM_PROT_EXECUTE; /* ??? */
if (fp->f_flag & FREAD)
@ -231,24 +234,24 @@ mmap(p, uap, retval)
return (EACCES);
} else
maxprot |= VM_PROT_WRITE;
handle = (caddr_t)vp;
handle = (caddr_t) vp;
}
}
error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
flags, handle, (vm_offset_t)uap->pos);
flags, handle, (vm_offset_t) uap->pos);
if (error == 0)
*retval = (int)addr;
*retval = (int) addr;
return (error);
}
#ifdef COMPAT_43
struct ommap_args {
caddr_t addr;
int len;
int prot;
int flags;
int fd;
long pos;
caddr_t addr;
int len;
int prot;
int flags;
int fd;
long pos;
};
int
ommap(p, uap, retval)
@ -261,12 +264,13 @@ ommap(p, uap, retval)
0,
PROT_EXEC,
PROT_WRITE,
PROT_EXEC|PROT_WRITE,
PROT_EXEC | PROT_WRITE,
PROT_READ,
PROT_EXEC|PROT_READ,
PROT_WRITE|PROT_READ,
PROT_EXEC|PROT_WRITE|PROT_READ,
PROT_EXEC | PROT_READ,
PROT_WRITE | PROT_READ,
PROT_EXEC | PROT_WRITE | PROT_READ,
};
#define OMAP_ANON 0x0002
#define OMAP_COPY 0x0020
#define OMAP_SHARED 0x0010
@ -275,7 +279,7 @@ ommap(p, uap, retval)
nargs.addr = uap->addr;
nargs.len = uap->len;
nargs.prot = cvtbsdprot[uap->prot&0x7];
nargs.prot = cvtbsdprot[uap->prot & 0x7];
nargs.flags = 0;
if (uap->flags & OMAP_ANON)
nargs.flags |= MAP_ANON;
@ -293,12 +297,12 @@ ommap(p, uap, retval)
nargs.pos = uap->pos;
return (mmap(p, &nargs, retval));
}
#endif /* COMPAT_43 */
#endif /* COMPAT_43 */
struct msync_args {
caddr_t addr;
int len;
caddr_t addr;
int len;
};
int
msync(p, uap, retval)
@ -313,22 +317,21 @@ msync(p, uap, retval)
boolean_t syncio, invalidate;
#ifdef DEBUG
if (mmapdebug & (MDB_FOLLOW|MDB_SYNC))
if (mmapdebug & (MDB_FOLLOW | MDB_SYNC))
printf("msync(%d): addr %x len %x\n",
p->p_pid, uap->addr, uap->len);
p->p_pid, uap->addr, uap->len);
#endif
if (((int)uap->addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
if (((int) uap->addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
return (EINVAL);
map = &p->p_vmspace->vm_map;
addr = (vm_offset_t)uap->addr;
size = (vm_size_t)uap->len;
addr = (vm_offset_t) uap->addr;
size = (vm_size_t) uap->len;
/*
* XXX Gak! If size is zero we are supposed to sync "all modified
* pages with the region containing addr". Unfortunately, we
* don't really keep track of individual mmaps so we approximate
* by flushing the range of the map entry containing addr.
* This can be incorrect if the region splits or is coalesced
* with a neighbor.
* pages with the region containing addr". Unfortunately, we don't
* really keep track of individual mmaps so we approximate by flushing
* the range of the map entry containing addr. This can be incorrect
* if the region splits or is coalesced with a neighbor.
*/
if (size == 0) {
vm_map_entry_t entry;
@ -344,23 +347,23 @@ msync(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_SYNC)
printf("msync: cleaning/flushing address range [%x-%x)\n",
addr, addr+size);
addr, addr + size);
#endif
/*
* Could pass this in as a third flag argument to implement
* Sun's MS_ASYNC.
* Could pass this in as a third flag argument to implement Sun's
* MS_ASYNC.
*/
syncio = TRUE;
/*
* XXX bummer, gotta flush all cached pages to ensure
* consistency with the file system cache. Otherwise, we could
* pass this in to implement Sun's MS_INVALIDATE.
* XXX bummer, gotta flush all cached pages to ensure consistency with
* the file system cache. Otherwise, we could pass this in to
* implement Sun's MS_INVALIDATE.
*/
invalidate = TRUE;
/*
* Clean the pages and interpret the return value.
*/
rv = vm_map_clean(map, addr, addr+size, syncio, invalidate);
rv = vm_map_clean(map, addr, addr + size, syncio, invalidate);
switch (rv) {
case KERN_SUCCESS:
break;
@ -375,8 +378,8 @@ msync(p, uap, retval)
}
struct munmap_args {
caddr_t addr;
int len;
caddr_t addr;
int len;
};
int
munmap(p, uap, retval)
@ -391,18 +394,18 @@ munmap(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("munmap(%d): addr %x len %x\n",
p->p_pid, uap->addr, uap->len);
p->p_pid, uap->addr, uap->len);
#endif
addr = (vm_offset_t) uap->addr;
if ((addr & PAGE_MASK) || uap->len < 0)
return(EINVAL);
return (EINVAL);
size = (vm_size_t) round_page(uap->len);
if (size == 0)
return(0);
return (0);
/*
* Check for illegal addresses. Watch out for address wrap...
* Note that VM_*_ADDRESS are not constants due to casts (argh).
* Check for illegal addresses. Watch out for address wrap... Note
* that VM_*_ADDRESS are not constants due to casts (argh).
*/
if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
return (EINVAL);
@ -417,10 +420,10 @@ munmap(p, uap, retval)
* Make sure entire range is allocated.
*/
if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
return(EINVAL);
return (EINVAL);
/* returns nothing but KERN_SUCCESS anyway */
(void) vm_map_remove(map, addr, addr+size);
return(0);
(void) vm_map_remove(map, addr, addr + size);
return (0);
}
void
@ -440,9 +443,9 @@ munmapfd(p, fd)
}
struct mprotect_args {
caddr_t addr;
int len;
int prot;
caddr_t addr;
int len;
int prot;
};
int
mprotect(p, uap, retval)
@ -457,17 +460,17 @@ mprotect(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("mprotect(%d): addr %x len %x prot %d\n",
p->p_pid, uap->addr, uap->len, uap->prot);
p->p_pid, uap->addr, uap->len, uap->prot);
#endif
addr = (vm_offset_t)uap->addr;
addr = (vm_offset_t) uap->addr;
if ((addr & PAGE_MASK) || uap->len < 0)
return(EINVAL);
size = (vm_size_t)uap->len;
return (EINVAL);
size = (vm_size_t) uap->len;
prot = uap->prot & VM_PROT_ALL;
switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,
FALSE)) {
switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
FALSE)) {
case KERN_SUCCESS:
return (0);
case KERN_PROTECTION_FAILURE:
@ -477,10 +480,11 @@ mprotect(p, uap, retval)
}
struct madvise_args {
caddr_t addr;
int len;
int behav;
caddr_t addr;
int len;
int behav;
};
/* ARGSUSED */
int
madvise(p, uap, retval)
@ -494,10 +498,11 @@ madvise(p, uap, retval)
}
struct mincore_args {
caddr_t addr;
int len;
char *vec;
caddr_t addr;
int len;
char *vec;
};
/* ARGSUSED */
int
mincore(p, uap, retval)
@ -511,8 +516,8 @@ mincore(p, uap, retval)
}
struct mlock_args {
caddr_t addr;
size_t len;
caddr_t addr;
size_t len;
};
int
mlock(p, uap, retval)
@ -528,12 +533,12 @@ mlock(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("mlock(%d): addr %x len %x\n",
p->p_pid, uap->addr, uap->len);
p->p_pid, uap->addr, uap->len);
#endif
addr = (vm_offset_t)uap->addr;
addr = (vm_offset_t) uap->addr;
if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
return (EINVAL);
size = round_page((vm_size_t)uap->len);
size = round_page((vm_size_t) uap->len);
if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
#ifdef pmap_wired_count
@ -546,13 +551,13 @@ mlock(p, uap, retval)
return (error);
#endif
error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE);
error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr + size, FALSE);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
struct munlock_args {
caddr_t addr;
size_t len;
caddr_t addr;
size_t len;
};
int
munlock(p, uap, retval)
@ -567,9 +572,9 @@ munlock(p, uap, retval)
#ifdef DEBUG
if (mmapdebug & MDB_FOLLOW)
printf("munlock(%d): addr %x len %x\n",
p->p_pid, uap->addr, uap->len);
p->p_pid, uap->addr, uap->len);
#endif
addr = (vm_offset_t)uap->addr;
addr = (vm_offset_t) uap->addr;
if ((addr & PAGE_MASK) || uap->addr + uap->len < uap->addr)
return (EINVAL);
#ifndef pmap_wired_count
@ -577,9 +582,9 @@ munlock(p, uap, retval)
if (error)
return (error);
#endif
size = round_page((vm_size_t)uap->len);
size = round_page((vm_size_t) uap->len);
error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE);
error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr + size, TRUE);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@ -613,21 +618,21 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
*addr = round_page(*addr);
} else {
fitit = FALSE;
(void)vm_deallocate(map, *addr, size);
(void) vm_deallocate(map, *addr, size);
}
/*
* Lookup/allocate pager. All except an unnamed anonymous lookup
* gain a reference to ensure continued existance of the object.
* (XXX the exception is to appease the pageout daemon)
* Lookup/allocate pager. All except an unnamed anonymous lookup gain
* a reference to ensure continued existance of the object. (XXX the
* exception is to appease the pageout daemon)
*/
if (flags & MAP_ANON)
type = PG_DFLT;
else {
vp = (struct vnode *)handle;
vp = (struct vnode *) handle;
if (vp->v_type == VCHR) {
type = PG_DEVICE;
handle = (caddr_t)vp->v_rdev;
handle = (caddr_t) vp->v_rdev;
} else
type = PG_VNODE;
}
@ -638,6 +643,9 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
* Find object and release extra reference gained by lookup
*/
object = vm_object_lookup(pager);
if (handle && object == NULL) {
panic("vm_mmap: vm_object_lookup failed");
}
vm_object_deallocate(object);
/*
@ -645,7 +653,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
*/
if (flags & MAP_ANON) {
rv = vm_allocate_with_pager(map, addr, size, fitit,
pager, foff, TRUE);
pager, foff, TRUE);
if (rv != KERN_SUCCESS) {
if (handle == NULL)
vm_pager_deallocate(pager);
@ -654,34 +662,32 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
goto out;
}
/*
* Don't cache anonymous objects.
* Loses the reference gained by vm_pager_allocate.
* Note that object will be NULL when handle == NULL,
* this is ok since vm_allocate_with_pager has made
* sure that these objects are uncached.
* Don't cache anonymous objects. Loses the reference gained
* by vm_pager_allocate. Note that object will be NULL when
* handle == NULL, this is ok since vm_allocate_with_pager has
* made sure that these objects are uncached.
*/
(void) pager_cache(object, FALSE);
#ifdef DEBUG
if (mmapdebug & MDB_MAPIT)
printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",
curproc->p_pid, *addr, size, pager);
curproc->p_pid, *addr, size, pager);
#endif
}
/*
* Must be a mapped file.
* Distinguish between character special and regular files.
* Must be a mapped file. Distinguish between character special and
* regular files.
*/
else if (vp->v_type == VCHR) {
rv = vm_allocate_with_pager(map, addr, size, fitit,
pager, foff, FALSE);
pager, foff, FALSE);
/*
* Uncache the object and lose the reference gained
* by vm_pager_allocate(). If the call to
* vm_allocate_with_pager() was sucessful, then we
* gained an additional reference ensuring the object
* will continue to exist. If the call failed then
* the deallocate call below will terminate the
* object which is fine.
* Uncache the object and lose the reference gained by
* vm_pager_allocate(). If the call to
* vm_allocate_with_pager() was sucessful, then we gained an
* additional reference ensuring the object will continue to
* exist. If the call failed then the deallocate call below
* will terminate the object which is fine.
*/
(void) pager_cache(object, FALSE);
if (rv != KERN_SUCCESS)
@ -694,23 +700,23 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
#ifdef DEBUG
if (object == NULL)
printf("vm_mmap: no object: vp %x, pager %x\n",
vp, pager);
vp, pager);
#endif
/*
* Map it directly.
* Allows modifications to go out to the vnode.
* Map it directly. Allows modifications to go out to the
* vnode.
*/
if (flags & MAP_SHARED) {
rv = vm_allocate_with_pager(map, addr, size,
fitit, pager,
foff, FALSE);
fitit, pager,
foff, FALSE);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
goto out;
}
/*
* Don't cache the object. This is the easiest way
* of ensuring that data gets back to the filesystem
* Don't cache the object. This is the easiest way of
* ensuring that data gets back to the filesystem
* because vnode_pager_deallocate() will fsync the
* vnode. pager_cache() will lose the extra ref.
*/
@ -719,43 +725,42 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
else
vm_object_deallocate(object);
if( map->pmap)
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
if (map->pmap)
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
}
/*
* Copy-on-write of file. Two flavors.
* MAP_COPY is true COW, you essentially get a snapshot of
* the region at the time of mapping. MAP_PRIVATE means only
* that your changes are not reflected back to the object.
* Changes made by others will be seen.
* Copy-on-write of file. Two flavors. MAP_COPY is true COW,
* you essentially get a snapshot of the region at the time of
* mapping. MAP_PRIVATE means only that your changes are not
* reflected back to the object. Changes made by others will
* be seen.
*/
else {
vm_map_t tmap;
vm_offset_t off;
/* locate and allocate the target address space */
rv = vm_map_find(map, NULL, (vm_offset_t)0,
addr, size, fitit);
rv = vm_map_find(map, NULL, (vm_offset_t) 0,
addr, size, fitit);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
goto out;
}
tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,
VM_MIN_ADDRESS+size, TRUE);
VM_MIN_ADDRESS + size, TRUE);
off = VM_MIN_ADDRESS;
rv = vm_allocate_with_pager(tmap, &off, size,
TRUE, pager,
foff, FALSE);
TRUE, pager,
foff, FALSE);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
vm_map_deallocate(tmap);
goto out;
}
/*
* (XXX)
* MAP_PRIVATE implies that we see changes made by
* others. To ensure that we need to guarentee that
* no copy object is created (otherwise original
* (XXX) MAP_PRIVATE implies that we see changes made
* by others. To ensure that we need to guarentee
* that no copy object is created (otherwise original
* pages would be pushed to the copy object and we
* would never see changes made by others). We
* totally sleeze it right now by marking the object
@ -764,13 +769,12 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
if ((flags & MAP_COPY) == 0)
object->flags |= OBJ_INTERNAL;
rv = vm_map_copy(map, tmap, *addr, size, off,
FALSE, FALSE);
FALSE, FALSE);
object->flags &= ~OBJ_INTERNAL;
/*
* (XXX)
* My oh my, this only gets worse...
* Force creation of a shadow object so that
* vm_map_fork will do the right thing.
* (XXX) My oh my, this only gets worse... Force
* creation of a shadow object so that vm_map_fork
* will do the right thing.
*/
if ((flags & MAP_COPY) == 0) {
vm_map_t tmap;
@ -782,19 +786,18 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
tmap = map;
vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,
&tentry, &tobject, &toffset,
&tprot, &twired, &tsu);
&tentry, &tobject, &toffset,
&tprot, &twired, &tsu);
vm_map_lookup_done(tmap, tentry);
}
/*
* (XXX)
* Map copy code cannot detect sharing unless a
* (XXX) Map copy code cannot detect sharing unless a
* sharing map is involved. So we cheat and write
* protect everything ourselves.
*/
vm_object_pmap_copy(object, foff, foff + size);
if( map->pmap)
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
if (map->pmap)
pmap_object_init_pt(map->pmap, *addr, object, foff, size);
vm_object_deallocate(object);
vm_map_deallocate(tmap);
if (rv != KERN_SUCCESS)
@ -803,18 +806,18 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
#ifdef DEBUG
if (mmapdebug & MDB_MAPIT)
printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",
curproc->p_pid, *addr, size, pager);
curproc->p_pid, *addr, size, pager);
#endif
}
/*
* Correct protection (default is VM_PROT_ALL).
* If maxprot is different than prot, we must set both explicitly.
* Correct protection (default is VM_PROT_ALL). If maxprot is
* different than prot, we must set both explicitly.
*/
rv = KERN_SUCCESS;
if (maxprot != VM_PROT_ALL)
rv = vm_map_protect(map, *addr, *addr+size, maxprot, TRUE);
rv = vm_map_protect(map, *addr, *addr + size, maxprot, TRUE);
if (rv == KERN_SUCCESS && prot != maxprot)
rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);
rv = vm_map_protect(map, *addr, *addr + size, prot, FALSE);
if (rv != KERN_SUCCESS) {
(void) vm_deallocate(map, *addr, size);
goto out;
@ -823,7 +826,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
* Shared memory is also shared with children.
*/
if (flags & MAP_SHARED) {
rv = vm_map_inherit(map, *addr, *addr+size, VM_INHERIT_SHARE);
rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
if (rv != KERN_SUCCESS) {
(void) vm_deallocate(map, *addr, size);
goto out;

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.2 1994/08/02 07:55:31 davidg Exp $
* $Id: vm_object.h,v 1.3 1994/11/06 05:07:52 davidg Exp $
*/
/*
@ -81,97 +81,131 @@
*/
struct vm_object {
struct pglist memq; /* Resident memory */
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
u_short flags; /* see below */
u_short paging_in_progress; /* Paging (in or out) so
don't collapse or destroy */
simple_lock_data_t Lock; /* Synchronization */
int ref_count; /* How many refs?? */
vm_size_t size; /* Object size */
int resident_page_count;
/* number of resident pages */
struct vm_object *copy; /* Object that holds copies of
my changed pages */
vm_pager_t pager; /* Where to get data */
vm_offset_t paging_offset; /* Offset into paging space */
struct vm_object *shadow; /* My shadow */
vm_offset_t shadow_offset; /* Offset in shadow */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
TAILQ_ENTRY(vm_object) reverse_shadow_list; /* chain of objects that are shadowed */
TAILQ_HEAD(rslist, vm_object) reverse_shadow_head; /* objects that this is a shadow for */
struct pglist memq; /* Resident memory */
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
u_short flags; /* see below */
u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
int ref_count; /* How many refs?? */
struct {
int recursion; /* object locking */
struct proc *proc; /* process owned */
} lock;
vm_size_t size; /* Object size */
int resident_page_count;
/* number of resident pages */
struct vm_object *copy; /* Object that holds copies of my changed pages */
vm_pager_t pager; /* Where to get data */
vm_offset_t paging_offset; /* Offset into paging space */
struct vm_object *shadow; /* My shadow */
vm_offset_t shadow_offset; /* Offset in shadow */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
TAILQ_ENTRY(vm_object) reverse_shadow_list; /* chain of objects that are shadowed */
TAILQ_HEAD(rslist, vm_object) reverse_shadow_head; /* objects that this is a shadow for */
};
/*
* Flags
*/
#define OBJ_CANPERSIST 0x0001 /* allow to persist */
#define OBJ_INTERNAL 0x0002 /* internally created object */
#define OBJ_ACTIVE 0x0004 /* used to mark active objects */
#define OBJ_CANPERSIST 0x0001 /* allow to persist */
#define OBJ_INTERNAL 0x0002 /* internally created object */
#define OBJ_ACTIVE 0x0004 /* used to mark active objects */
#define OBJ_DEAD 0x0008 /* used to mark dead objects during rundown */
#define OBJ_ILOCKED 0x0010 /* lock from modification */
#define OBJ_ILOCKWT 0x0020 /* wait for lock from modification */
TAILQ_HEAD(vm_object_hash_head, vm_object_hash_entry);
struct vm_object_hash_entry {
TAILQ_ENTRY(vm_object_hash_entry) hash_links; /* hash chain links */
vm_object_t object; /* object represened */
TAILQ_ENTRY(vm_object_hash_entry) hash_links; /* hash chain links */
vm_object_t object; /* object represened */
};
typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
#ifdef KERNEL
TAILQ_HEAD(object_q, vm_object);
struct object_q vm_object_cached_list; /* list of objects persisting */
int vm_object_cached; /* size of cached list */
simple_lock_data_t vm_cache_lock; /* lock for object cache */
struct object_q vm_object_cached_list; /* list of objects persisting */
int vm_object_cached; /* size of cached list */
simple_lock_data_t vm_cache_lock; /* lock for object cache */
struct object_q vm_object_list; /* list of allocated objects */
long vm_object_count; /* count of all objects */
simple_lock_data_t vm_object_list_lock;
/* lock for object list and count */
struct object_q vm_object_list; /* list of allocated objects */
long vm_object_count; /* count of all objects */
simple_lock_data_t vm_object_list_lock;
vm_object_t kernel_object; /* the single kernel object */
vm_object_t kmem_object;
/* lock for object list and count */
vm_object_t kernel_object; /* the single kernel object */
vm_object_t kmem_object;
#define vm_object_cache_lock() simple_lock(&vm_cache_lock)
#define vm_object_cache_unlock() simple_unlock(&vm_cache_lock)
#endif /* KERNEL */
#endif /* KERNEL */
#define vm_object_sleep(event, object, interruptible) \
thread_sleep((event), &(object)->Lock, (interruptible))
#if 0
#define vm_object_lock_init(object) simple_lock_init(&(object)->Lock)
#define vm_object_lock(object) simple_lock(&(object)->Lock)
#define vm_object_unlock(object) simple_unlock(&(object)->Lock)
#define vm_object_lock_try(object) simple_lock_try(&(object)->Lock)
#define vm_object_sleep(event, object, interruptible) \
thread_sleep((event), &(object)->Lock, (interruptible))
#endif
#define vm_object_lock_init(object) (object->flags &= ~OBJ_ILOCKED, object->lock.recursion = 0, object->lock.proc = 0)
static __inline void
vm_object_lock(vm_object_t obj)
{
if (obj->flags & OBJ_ILOCKED) {
++obj->lock.recursion;
return;
}
obj->flags |= OBJ_ILOCKED;
obj->lock.recursion = 1;
}
static __inline void
vm_object_unlock(vm_object_t obj)
{
--obj->lock.recursion;
if (obj->lock.recursion != 0)
return;
obj->flags &= ~OBJ_ILOCKED;
}
static __inline int
vm_object_lock_try(vm_object_t obj)
{
if (obj->flags & OBJ_ILOCKED) {
++obj->lock.recursion;
return 1;
}
obj->flags |= OBJ_ILOCKED;
obj->lock.recursion = 1;
return 1;
}
#ifdef KERNEL
vm_object_t vm_object_allocate __P((vm_size_t));
void vm_object_cache_clear __P((void));
void vm_object_cache_trim __P((void));
boolean_t vm_object_coalesce __P((vm_object_t, vm_object_t,
vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t));
void vm_object_collapse __P((vm_object_t));
void vm_object_copy __P((vm_object_t, vm_offset_t, vm_size_t,
vm_object_t *, vm_offset_t *, boolean_t *));
void vm_object_deactivate_pages __P((vm_object_t));
void vm_object_deallocate __P((vm_object_t));
void vm_object_enter __P((vm_object_t, vm_pager_t));
void vm_object_init __P((vm_size_t));
vm_object_t vm_object_lookup __P((vm_pager_t));
boolean_t vm_object_page_clean __P((vm_object_t,
vm_offset_t, vm_offset_t, boolean_t, boolean_t));
void vm_object_page_remove __P((vm_object_t,
vm_offset_t, vm_offset_t));
void vm_object_pmap_copy __P((vm_object_t,
vm_offset_t, vm_offset_t));
void vm_object_pmap_remove __P((vm_object_t,
vm_offset_t, vm_offset_t));
void vm_object_print __P((vm_object_t, boolean_t));
void vm_object_reference __P((vm_object_t));
void vm_object_remove __P((vm_pager_t));
void vm_object_setpager __P((vm_object_t,
vm_pager_t, vm_offset_t, boolean_t));
void vm_object_shadow __P((vm_object_t *,
vm_offset_t *, vm_size_t));
void vm_object_terminate __P((vm_object_t));
vm_object_t vm_object_allocate __P((vm_size_t));
void vm_object_cache_clear __P((void));
void vm_object_cache_trim __P((void));
boolean_t vm_object_coalesce __P((vm_object_t, vm_object_t, vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t));
void vm_object_collapse __P((vm_object_t));
void vm_object_copy __P((vm_object_t, vm_offset_t, vm_size_t, vm_object_t *, vm_offset_t *, boolean_t *));
void vm_object_deactivate_pages __P((vm_object_t));
void vm_object_deallocate __P((vm_object_t));
void vm_object_enter __P((vm_object_t, vm_pager_t));
void vm_object_init __P((vm_size_t));
vm_object_t vm_object_lookup __P((vm_pager_t));
boolean_t vm_object_page_clean __P((vm_object_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
void vm_object_page_remove __P((vm_object_t, vm_offset_t, vm_offset_t));
void vm_object_pmap_copy __P((vm_object_t, vm_offset_t, vm_offset_t));
void vm_object_pmap_remove __P((vm_object_t, vm_offset_t, vm_offset_t));
void vm_object_print __P((vm_object_t, boolean_t));
void vm_object_reference __P((vm_object_t));
void vm_object_remove __P((vm_pager_t));
void vm_object_setpager __P((vm_object_t, vm_pager_t, vm_offset_t, boolean_t));
void vm_object_shadow __P((vm_object_t *, vm_offset_t *, vm_size_t));
void vm_object_terminate __P((vm_object_t));
#endif
#endif /* _VM_OBJECT_ */
#endif /* _VM_OBJECT_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.9 1994/10/21 01:19:28 wollman Exp $
* $Id: vm_page.h,v 1.10 1994/11/14 08:19:08 bde Exp $
*/
/*
@ -99,21 +99,22 @@
TAILQ_HEAD(pglist, vm_page);
struct vm_page {
TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
* queue or free list (P) */
TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */
TAILQ_ENTRY(vm_page) hashq; /* hash table links (O) */
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
vm_object_t object; /* which object am I in (O,P)*/
vm_offset_t offset; /* offset into object (O,P) */
vm_object_t object; /* which object am I in (O,P) */
vm_offset_t offset; /* offset into object (O,P) */
vm_offset_t phys_addr; /* physical address of page */
u_short wire_count; /* wired down maps refs (P) */
u_short flags; /* see below */
short hold_count; /* page hold count */
u_short act_count; /* page usage count */
u_short busy; /* page busy count */
vm_offset_t phys_addr; /* physical address of page */
u_short wire_count; /* wired down maps refs (P) */
u_short flags; /* see below */
short hold_count; /* page hold count */
u_short act_count; /* page usage count */
u_short bmapped; /* number of buffers mapped */
u_short busy; /* page busy count */
u_short valid; /* map of valid DEV_BSIZE chunks */
u_short dirty; /* map of dirty DEV_BSIZE chunks */
};
/*
@ -123,7 +124,7 @@ struct vm_page {
*/
#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
#define PG_ACTIVE 0x0002 /* page is in active list (P) */
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P)*/
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P) */
#define PG_CLEAN 0x0008 /* page has not been modified */
#define PG_BUSY 0x0010 /* page is in transit (O) */
#define PG_WANTED 0x0020 /* someone is waiting for page (O) */
@ -135,7 +136,7 @@ struct vm_page {
#define PG_DIRTY 0x0800 /* client flag to set when dirty */
#define PG_REFERENCED 0x1000 /* page has been referenced */
#define PG_VMIO 0x2000 /* VMIO flag */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_CACHE 0x4000 /* On VMIO cache */
#define PG_FREE 0x8000 /* page is in free list */
#if VM_PAGE_DEBUG
@ -147,15 +148,15 @@ struct vm_page {
(PG_ACTIVE | PG_INACTIVE))) \
panic("vm_page_check: not valid!"); \
}
#else /* VM_PAGE_DEBUG */
#else /* VM_PAGE_DEBUG */
#define VM_PAGE_CHECK(mem)
#endif /* VM_PAGE_DEBUG */
#endif /* VM_PAGE_DEBUG */
#ifdef KERNEL
/*
* Each pageable resident page falls into one of three lists:
*
* free
* free
* Available for allocation now.
* inactive
* Not referenced in any map, but still has an
@ -168,26 +169,21 @@ struct vm_page {
* ordered, in LRU-like fashion.
*/
extern
struct pglist vm_page_queue_free; /* memory free queue */
extern
struct pglist vm_page_queue_active; /* active memory queue */
extern
struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern struct pglist vm_page_queue_free; /* memory free queue */
extern struct pglist vm_page_queue_active; /* active memory queue */
extern struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern struct pglist vm_page_queue_cache; /* cache memory queue */
extern
vm_page_t vm_page_array; /* First resident page in table */
extern
long first_page; /* first physical page number */
/* ... represented in vm_page_array */
extern
long last_page; /* last physical page number */
/* ... represented in vm_page_array */
/* [INCLUSIVE] */
extern
vm_offset_t first_phys_addr; /* physical address for first_page */
extern
vm_offset_t last_phys_addr; /* physical address for last_page */
extern vm_page_t vm_page_array; /* First resident page in table */
extern long first_page; /* first physical page number */
/* ... represented in vm_page_array */
extern long last_page; /* last physical page number */
/* ... represented in vm_page_array */
/* [INCLUSIVE] */
extern vm_offset_t first_phys_addr; /* physical address for first_page */
extern vm_offset_t last_phys_addr; /* physical address for last_page */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
@ -197,11 +193,8 @@ vm_offset_t last_phys_addr; /* physical address for last_page */
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[atop(pa) - first_page ])
extern
simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
page queues */
extern /* lock on free page queue */
simple_lock_data_t vm_page_queue_free_lock;
extern simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive page queues */
extern simple_lock_data_t vm_page_queue_free_lock; /* lock on free page queue */
/*
* Functions implemented as macros
@ -231,21 +224,41 @@ simple_lock_data_t vm_page_queue_free_lock;
(mem)->wire_count = 0; \
(mem)->hold_count = 0; \
(mem)->act_count = 0; \
(mem)->busy = 0; \
(mem)->valid = 0; \
(mem)->dirty = 0; \
(mem)->bmapped = 0; \
}
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
void vm_page_copy __P((vm_page_t, vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_free __P((vm_page_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
#if PAGE_SIZE == 4096
#define VM_PAGE_BITS_ALL 0xff
#endif
#if PAGE_SIZE == 8192
#define VM_PAGE_BITS_ALL 0xffff
#endif
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t, int));
void vm_page_copy __P((vm_page_t, vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_free __P((vm_page_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
void vm_page_set_dirty __P((vm_page_t, int, int));
void vm_page_set_clean __P((vm_page_t, int, int));
int vm_page_is_clean __P((vm_page_t, int, int));
void vm_page_set_valid __P((vm_page_t, int, int));
void vm_page_set_invalid __P((vm_page_t, int, int));
int vm_page_is_valid __P((vm_page_t, int, int));
void vm_page_test_dirty __P((vm_page_t));
/*
@ -268,13 +281,13 @@ static __inline void
vm_page_unhold(vm_page_t mem)
{
#ifdef DIAGNOSTIC
if( --mem->hold_count < 0)
if (--mem->hold_count < 0)
panic("vm_page_unhold: hold count < 0!!!");
#else
--mem->hold_count;
#endif
}
#endif /* KERNEL */
#endif /* KERNEL */
#endif /* !_VM_PAGE_ */
#endif /* !_VM_PAGE_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Author: Avadis Tevanian, Jr.
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.h,v 1.5 1994/08/21 07:19:45 paul Exp $
* $Id: vm_pageout.h,v 1.6 1994/10/09 01:52:16 phk Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@ -75,8 +75,8 @@
* Exported data structures.
*/
extern int vm_pages_needed; /* should be some "event" structure */
simple_lock_data_t vm_pages_needed_lock;
extern int vm_pages_needed; /* should be some "event" structure */
simple_lock_data_t vm_pages_needed_lock;
extern int vm_pageout_pages_needed;
#define VM_PAGEOUT_ASYNC 0
@ -93,26 +93,30 @@ extern int vm_pageout_pages_needed;
#define VM_WAIT vm_wait()
inline static void vm_wait() {
inline static void
vm_wait()
{
int s;
s = splhigh();
if (curproc == pageproc) {
vm_pageout_pages_needed = 1;
tsleep((caddr_t) &vm_pageout_pages_needed, PSWP, "vmwait", 0);
tsleep((caddr_t) & vm_pageout_pages_needed, PSWP, "vmwait", 0);
vm_pageout_pages_needed = 0;
} else {
wakeup((caddr_t) &vm_pages_needed);
tsleep((caddr_t) &cnt.v_free_count, PVM, "vmwait", 0);
wakeup((caddr_t) & vm_pages_needed);
tsleep((caddr_t) & cnt.v_free_count, PVM, "vmwait", 0);
}
splx(s);
}
#ifdef KERNEL
int vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
int vm_pageout_clean __P((vm_page_t, int));
int vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
int vm_pageout_clean __P((vm_page_t, int));
#endif
#endif

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.9 1994/12/19 00:02:56 davidg Exp $
* $Id: vm_pager.c,v 1.10 1994/12/23 04:56:51 davidg Exp $
*/
/*
@ -89,7 +89,7 @@ struct pagerops *pagertab[] = {
&vnodepagerops, /* PG_VNODE */
&devicepagerops, /* PG_DEV */
};
int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
struct pagerops *dfltpagerops = NULL; /* default pager */
@ -120,7 +120,7 @@ vm_pager_init()
*/
for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
if (pgops)
(*(*pgops)->pgo_init)();
(*(*pgops)->pgo_init) ();
if (dfltpagerops == NULL)
panic("no default pager");
}
@ -130,6 +130,7 @@ vm_pager_bufferinit()
{
struct buf *bp;
int i;
bp = swbuf;
/*
* Now set up swap and physical I/O buffer headers.
@ -143,8 +144,8 @@ vm_pager_bufferinit()
bp->b_vnbufs.le_next = NOLIST;
bp->b_actf = NULL;
swapbkva = kmem_alloc_pageable( pager_map, nswbuf * MAXPHYS);
if( !swapbkva)
swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
if (!swapbkva)
panic("Not enough pager_map VM space for physical buffers");
}
@ -165,34 +166,34 @@ vm_pager_allocate(type, handle, size, prot, off)
ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
if (ops)
return ((*ops->pgo_alloc)(handle, size, prot, off));
return ((*ops->pgo_alloc) (handle, size, prot, off));
return (NULL);
}
void
vm_pager_deallocate(pager)
vm_pager_t pager;
vm_pager_t pager;
{
if (pager == NULL)
panic("vm_pager_deallocate: null pager");
(*pager->pg_ops->pgo_dealloc)(pager);
(*pager->pg_ops->pgo_dealloc) (pager);
}
int
vm_pager_get_pages(pager, m, count, reqpage, sync)
vm_pager_t pager;
vm_page_t *m;
int count;
int reqpage;
boolean_t sync;
vm_pager_t pager;
vm_page_t *m;
int count;
int reqpage;
boolean_t sync;
{
int i;
if (pager == NULL) {
for (i=0;i<count;i++) {
if( i != reqpage) {
for (i = 0; i < count; i++) {
if (i != reqpage) {
PAGE_WAKEUP(m[i]);
vm_page_free(m[i]);
}
@ -200,35 +201,34 @@ vm_pager_get_pages(pager, m, count, reqpage, sync)
vm_page_zero_fill(m[reqpage]);
return VM_PAGER_OK;
}
if( pager->pg_ops->pgo_getpages == 0) {
for(i=0;i<count;i++) {
if( i != reqpage) {
if (pager->pg_ops->pgo_getpages == 0) {
for (i = 0; i < count; i++) {
if (i != reqpage) {
PAGE_WAKEUP(m[i]);
vm_page_free(m[i]);
}
}
return(VM_PAGER_GET(pager, m[reqpage], sync));
return (VM_PAGER_GET(pager, m[reqpage], sync));
} else {
return(VM_PAGER_GET_MULTI(pager, m, count, reqpage, sync));
return (VM_PAGER_GET_MULTI(pager, m, count, reqpage, sync));
}
}
int
vm_pager_put_pages(pager, m, count, sync, rtvals)
vm_pager_t pager;
vm_page_t *m;
int count;
boolean_t sync;
int *rtvals;
vm_pager_t pager;
vm_page_t *m;
int count;
boolean_t sync;
int *rtvals;
{
int i;
if( pager->pg_ops->pgo_putpages)
return(VM_PAGER_PUT_MULTI(pager, m, count, sync, rtvals));
if (pager->pg_ops->pgo_putpages)
return (VM_PAGER_PUT_MULTI(pager, m, count, sync, rtvals));
else {
for(i=0;i<count;i++) {
rtvals[i] = VM_PAGER_PUT( pager, m[i], sync);
for (i = 0; i < count; i++) {
rtvals[i] = VM_PAGER_PUT(pager, m[i], sync);
}
return rtvals[0];
}
@ -236,12 +236,12 @@ vm_pager_put_pages(pager, m, count, sync, rtvals)
boolean_t
vm_pager_has_page(pager, offset)
vm_pager_t pager;
vm_offset_t offset;
vm_pager_t pager;
vm_offset_t offset;
{
if (pager == NULL)
panic("vm_pager_has_page: null pager");
return ((*pager->pg_ops->pgo_haspage)(pager, offset));
return ((*pager->pg_ops->pgo_haspage) (pager, offset));
}
/*
@ -255,37 +255,37 @@ vm_pager_sync()
for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
if (pgops)
(*(*pgops)->pgo_putpage)(NULL, NULL, 0);
(*(*pgops)->pgo_putpage) (NULL, NULL, 0);
}
#if 0
void
vm_pager_cluster(pager, offset, loff, hoff)
vm_pager_t pager;
vm_offset_t offset;
vm_offset_t *loff;
vm_offset_t *hoff;
vm_pager_t pager;
vm_offset_t offset;
vm_offset_t *loff;
vm_offset_t *hoff;
{
if (pager == NULL)
panic("vm_pager_cluster: null pager");
return ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
return ((*pager->pg_ops->pgo_cluster) (pager, offset, loff, hoff));
}
#endif
vm_offset_t
vm_pager_map_page(m)
vm_page_t m;
vm_page_t m;
{
vm_offset_t kva;
kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
return(kva);
return (kva);
}
void
vm_pager_unmap_page(kva)
vm_offset_t kva;
vm_offset_t kva;
{
pmap_kremove(kva);
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
@ -293,11 +293,11 @@ vm_pager_unmap_page(kva)
vm_page_t
vm_pager_atop(kva)
vm_offset_t kva;
vm_offset_t kva;
{
vm_offset_t pa;
pa = pmap_kextract( kva);
pa = pmap_kextract(kva);
if (pa == 0)
panic("vm_pager_atop");
return (PHYS_TO_VM_PAGE(pa));
@ -322,8 +322,8 @@ vm_pager_lookup(pglist, handle)
*/
int
pager_cache(object, should_cache)
vm_object_t object;
boolean_t should_cache;
vm_object_t object;
boolean_t should_cache;
{
if (object == NULL)
return (KERN_INVALID_ARGUMENT);
@ -343,10 +343,11 @@ pager_cache(object, should_cache)
}
/*
* allocate a physical buffer
* allocate a physical buffer
*/
struct buf *
getpbuf() {
getpbuf()
{
int s;
struct buf *bp;
@ -354,7 +355,7 @@ getpbuf() {
/* get a bp from the swap buffer header pool */
while ((bp = bswlist.tqh_first) == NULL) {
bswneeded = 1;
tsleep((caddr_t)&bswneeded, PVM, "wswbuf", 0);
tsleep((caddr_t) & bswneeded, PVM, "wswbuf", 0);
}
TAILQ_REMOVE(&bswlist, bp, b_freelist);
splx(s);
@ -362,7 +363,7 @@ getpbuf() {
bzero(bp, sizeof *bp);
bp->b_rcred = NOCRED;
bp->b_wcred = NOCRED;
bp->b_data = (caddr_t) (MAXPHYS * (bp-swbuf)) + swapbkva;
bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
bp->b_vnbufs.le_next = NOLIST;
return bp;
}
@ -371,7 +372,8 @@ getpbuf() {
* allocate a physical buffer, if one is available
*/
struct buf *
trypbuf() {
trypbuf()
{
int s;
struct buf *bp;
@ -386,7 +388,7 @@ trypbuf() {
bzero(bp, sizeof *bp);
bp->b_rcred = NOCRED;
bp->b_wcred = NOCRED;
bp->b_data = (caddr_t) (MAXPHYS * (bp-swbuf)) + swapbkva;
bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
bp->b_vnbufs.le_next = NOLIST;
return bp;
}
@ -410,18 +412,17 @@ relpbuf(bp)
crfree(bp->b_wcred);
bp->b_wcred = NOCRED;
}
if (bp->b_vp)
brelvp(bp);
pbrelvp(bp);
if (bp->b_flags & B_WANTED)
wakeup((caddr_t)bp);
wakeup((caddr_t) bp);
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
if (bswneeded) {
bswneeded = 0;
wakeup((caddr_t)&bswlist);
wakeup((caddr_t) & bswlist);
}
splx(s);
}

View File

@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
* $Id: vm_pager.h,v 1.3 1994/08/02 07:55:36 davidg Exp $
* $Id: vm_pager.h,v 1.4 1994/10/09 01:52:17 phk Exp $
*/
/*
@ -50,13 +50,12 @@
TAILQ_HEAD(pagerlst, pager_struct);
struct pager_struct {
struct pager_struct {
TAILQ_ENTRY(pager_struct) pg_list; /* links for list management */
caddr_t pg_handle; /* ext. handle (vp, dev, fp) */
int pg_type; /* type of pager */
int pg_flags; /* flags */
struct pagerops *pg_ops; /* pager operations */
void *pg_data; /* private pager data */
caddr_t pg_handle; /* ext. handle (vp, dev, fp) */
int pg_type; /* type of pager */
struct pagerops *pg_ops; /* pager operations */
void *pg_data; /* private pager data */
};
/* pager types */
@ -69,23 +68,15 @@ struct pager_struct {
#define PG_CLUSTERGET 1
#define PG_CLUSTERPUT 2
struct pagerops {
void (*pgo_init) /* Initialize pager. */
__P((void));
vm_pager_t (*pgo_alloc) /* Allocate pager. */
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
void (*pgo_dealloc) /* Disassociate. */
__P((vm_pager_t));
int (*pgo_getpage)
__P((vm_pager_t, vm_page_t, boolean_t));
int (*pgo_getpages) /* Get (read) page. */
__P((vm_pager_t, vm_page_t *, int, int, boolean_t));
int (*pgo_putpage)
__P((vm_pager_t, vm_page_t, boolean_t));
int (*pgo_putpages) /* Put (write) page. */
__P((vm_pager_t, vm_page_t *, int, boolean_t, int *));
boolean_t (*pgo_haspage) /* Does pager have page? */
__P((vm_pager_t, vm_offset_t));
struct pagerops {
void (*pgo_init) __P((void)); /* Initialize pager. */
vm_pager_t(*pgo_alloc) __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t)); /* Allocate pager. */
void (*pgo_dealloc) __P((vm_pager_t)); /* Disassociate. */
int (*pgo_getpage) __P((vm_pager_t, vm_page_t, boolean_t));
int (*pgo_getpages) __P((vm_pager_t, vm_page_t *, int, int, boolean_t)); /* Get (read) page. */
int (*pgo_putpage) __P((vm_pager_t, vm_page_t, boolean_t));
int (*pgo_putpages) __P((vm_pager_t, vm_page_t *, int, boolean_t, int *)); /* Put (write) page. */
boolean_t(*pgo_haspage) __P((vm_pager_t, vm_offset_t)); /* Does pager have page? */
};
#define VM_PAGER_ALLOC(h, s, p, o) (*(pg)->pg_ops->pgo_alloc)(h, s, p, o)
@ -115,24 +106,19 @@ struct pagerops {
#ifdef KERNEL
extern struct pagerops *dfltpagerops;
vm_pager_t vm_pager_allocate
__P((int, caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
vm_page_t vm_pager_atop __P((vm_offset_t));
void vm_pager_deallocate __P((vm_pager_t));
int vm_pager_get_pages
__P((vm_pager_t, vm_page_t *, int, int, boolean_t));
boolean_t vm_pager_has_page __P((vm_pager_t, vm_offset_t));
void vm_pager_init __P((void));
vm_pager_t vm_pager_lookup __P((struct pagerlst *, caddr_t));
vm_offset_t vm_pager_map_pages __P((vm_page_t *, int, boolean_t));
vm_offset_t vm_pager_map_page __P((vm_page_t));
int vm_pager_put_pages
__P((vm_pager_t, vm_page_t *, int, boolean_t, int *));
void vm_pager_sync __P((void));
void vm_pager_unmap_pages __P((vm_offset_t, int));
void vm_pager_unmap_page __P((vm_offset_t));
#define vm_pager_cancluster(p, b) ((p)->pg_flags & (b))
vm_pager_t vm_pager_allocate __P((int, caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
vm_page_t vm_pager_atop __P((vm_offset_t));
void vm_pager_deallocate __P((vm_pager_t));
int vm_pager_get_pages __P((vm_pager_t, vm_page_t *, int, int, boolean_t));
boolean_t vm_pager_has_page __P((vm_pager_t, vm_offset_t));
void vm_pager_init __P((void));
vm_pager_t vm_pager_lookup __P((struct pagerlst *, caddr_t));
vm_offset_t vm_pager_map_pages __P((vm_page_t *, int, boolean_t));
vm_offset_t vm_pager_map_page __P((vm_page_t));
int vm_pager_put_pages __P((vm_pager_t, vm_page_t *, int, boolean_t, int *));
void vm_pager_sync __P((void));
void vm_pager_unmap_pages __P((vm_offset_t, int));
void vm_pager_unmap_page __P((vm_offset_t));
/*
* XXX compat with old interface
@ -154,4 +140,4 @@ void vm_pager_unmap_page __P((vm_offset_t));
})
#endif
#endif /* _VM_PAGER_ */
#endif /* _VM_PAGER_ */

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_param.h,v 1.3 1994/08/02 07:55:37 davidg Exp $
*/
/*
@ -76,7 +76,8 @@
/*
* This belongs in types.h, but breaks too many existing programs.
*/
typedef int boolean_t;
typedef int boolean_t;
#define TRUE 1
#define FALSE 0
@ -94,36 +95,51 @@ typedef int boolean_t;
* we can easily make them constant if we so desire.
*/
#ifndef PAGE_SIZE
#define PAGE_SIZE cnt.v_page_size /* size of page */
#define PAGE_SIZE cnt.v_page_size /* size of page */
#endif
#ifndef PAGE_MASK
#define PAGE_MASK page_mask /* size of page - 1 */
#define PAGE_MASK page_mask /* size of page - 1 */
#endif
#ifndef PAGE_SHIFT
#define PAGE_SHIFT page_shift /* bits to shift for pages */
#define PAGE_SHIFT page_shift /* bits to shift for pages */
#endif
#endif
#ifdef KERNEL
extern vm_size_t page_mask;
extern int page_shift;
extern vm_size_t page_mask;
extern int page_shift;
#endif
/*
* CTL_VM identifiers
*/
#define VM_METER 1 /* struct vmmeter */
#define VM_LOADAVG 2 /* struct loadavg */
#define VM_MAXID 3 /* number of valid vm ids */
#define VM_METER 1 /* struct vmmeter */
#define VM_LOADAVG 2 /* struct loadavg */
#define VM_V_FREE_MIN 3 /* cnt.v_free_min */
#define VM_V_FREE_TARGET 4 /* cnt.v_free_target */
#define VM_V_FREE_RESERVED 5 /* cnt.v_free_reserved */
#define VM_V_INACTIVE_TARGET 6 /* cnt.v_inactive_target */
#define VM_V_CACHE_MIN 7 /* cnt.v_cache_max */
#define VM_V_CACHE_MAX 8 /* cnt.v_cache_min */
#define VM_V_PAGEOUT_FREE_MIN 9 /* cnt.v_pageout_free_min */
#define VM_MAXID 10 /* number of valid vm ids */
#define CTL_VM_NAMES { \
{ 0, 0 }, \
{ "vmmeter", CTLTYPE_STRUCT }, \
{ "loadavg", CTLTYPE_STRUCT }, \
{ "v_free_min", CTLTYPE_INT }, \
{ "v_free_target", CTLTYPE_INT }, \
{ "v_free_reserved", CTLTYPE_INT }, \
{ "v_inactive_target", CTLTYPE_INT }, \
{ "v_cache_min", CTLTYPE_INT }, \
{ "v_cache_max", CTLTYPE_INT }, \
{ "v_pageout_free_min", CTLTYPE_INT}, \
}
/*
/*
* Return values from the VM routines.
*/
#define KERN_SUCCESS 0
@ -142,50 +158,12 @@ extern int page_shift;
* No rounding is used.
*/
#ifdef KERNEL
#if 0
#ifndef atop
#define atop(x) (((unsigned)(x)) >> PAGE_SHIFT)
#endif
#ifndef ptoa
#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT))
#endif
/*
* Round off or truncate to the nearest page. These will work
* for either addresses or counts (i.e., 1 byte rounds to 1 page).
*/
#ifndef round_page
#define round_page(x) \
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
#endif
#ifndef trunc_page
#define trunc_page(x) \
((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
#endif
#ifndef num_pages
#define num_pages(x) \
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) >> PAGE_SHIFT))
#endif
#endif
#define num_pages(x) \
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) >> PAGE_SHIFT))
extern vm_size_t mem_size; /* size of physical memory (bytes) */
extern vm_offset_t first_addr; /* first physical page */
extern vm_offset_t last_addr; /* last physical page */
#else
#if 0
/* out-of-kernel versions of round_page and trunc_page */
#define round_page(x) \
((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * vm_page_size)
#define trunc_page(x) \
((((vm_offset_t)(x)) / vm_page_size) * vm_page_size)
#endif
#endif /* KERNEL */
#endif /* ASSEMBLER */
#endif /* _VM_PARAM_ */
extern vm_size_t mem_size; /* size of physical memory (bytes) */
extern vm_offset_t first_addr; /* first physical page */
extern vm_offset_t last_addr; /* last physical page */
#endif /* KERNEL */
#endif /* ASSEMBLER */
#endif /* _VM_PARAM_ */

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_prot.h,v 1.3 1994/08/02 07:55:38 davidg Exp $
*/
/*
@ -77,7 +77,7 @@
* vm_prot_t VM protection values.
*/
typedef u_char vm_prot_t;
typedef u_char vm_prot_t;
/*
* Protection values, defined as bits within the vm_prot_t type
@ -101,4 +101,4 @@ typedef u_char vm_prot_t;
#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
#endif /* _VM_PROT_ */
#endif /* _VM_PROT_ */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
* $Id: vm_swap.c,v 1.11 1994/10/22 17:53:35 phk Exp $
* $Id: vm_swap.c,v 1.12 1994/11/22 08:47:20 davidg Exp $
*/
#include <sys/param.h>
@ -51,15 +51,18 @@
* Indirect driver for multi-controller paging.
*/
int nswap, nswdev;
int vm_swap_size;
int nswap, nswdev;
int vm_swap_size;
#ifdef SEQSWAP
int niswdev; /* number of interleaved swap devices */
int niswap; /* size of interleaved swap area */
int niswdev; /* number of interleaved swap devices */
int niswap; /* size of interleaved swap area */
#endif
int bswneeded;
vm_offset_t swapbkva; /* swap buffers kva */
/*
* Set up swap devices.
* Initialize linked list of free swap
@ -75,12 +78,12 @@ swapinit()
int error;
/*
* Count swap devices, and adjust total swap space available.
* Some of the space will not be countable until later (dynamically
* Count swap devices, and adjust total swap space available. Some of
* the space will not be countable until later (dynamically
* configurable devices) and some of the counted space will not be
* available until a swapon() system call is issued, both usually
* happen when the system goes multi-user.
*
*
* If using NFS for swap, swdevt[0] will already be bdevvp'd. XXX
*/
#ifdef SEQSWAP
@ -104,7 +107,7 @@ swapinit()
/*
* The remainder must be sequential
*/
for ( ; swp->sw_dev != NODEV; swp++) {
for (; swp->sw_dev != NODEV; swp++) {
if ((swp->sw_flags & SW_SEQUENTIAL) == 0)
panic("binit: mis-ordered swap devices");
nswdev++;
@ -136,13 +139,13 @@ swapinit()
panic("swapvp");
#endif
/*
* If there is no swap configured, tell the user. We don't automatically
* activate any swapspaces in the kernel; the user must explicitly use
* swapon to enable swaping on a device.
* If there is no swap configured, tell the user. We don't
* automatically activate any swapspaces in the kernel; the user must
* explicitly use swapon to enable swaping on a device.
*/
if (nswap == 0)
printf("WARNING: no swap space found\n");
for (swp = swdevt; ;swp++) {
for (swp = swdevt;; swp++) {
if (swp->sw_dev == NODEV) {
if (swp->sw_vp == NULL)
break;
@ -151,8 +154,8 @@ swapinit()
error = swfree(p, swp - swdevt);
if (error) {
printf(
"Couldn't enable swapspace %d, error = %d",
swp-swdevt,error);
"Couldn't enable swapspace %d, error = %d",
swp - swdevt, error);
}
}
}
@ -168,10 +171,9 @@ swstrategy(bp)
#ifdef GENERIC
/*
* A mini-root gets copied into the front of the swap
* and we run over top of the swap area just long
* enough for us to do a mkfs and restor of the real
* root (sure beats rewriting standalone restor).
* A mini-root gets copied into the front of the swap and we run over
* top of the swap area just long enough for us to do a mkfs and
* restor of the real root (sure beats rewriting standalone restor).
*/
#define MINIROOTSIZE 4096
if (rootdev == dumpdev)
@ -189,7 +191,7 @@ swstrategy(bp)
if (bp->b_blkno < niswap) {
if (niswdev > 1) {
off = bp->b_blkno % dmmax;
if (off+sz > dmmax) {
if (off + sz > dmmax) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
biodone(bp);
@ -198,7 +200,7 @@ swstrategy(bp)
seg = bp->b_blkno / dmmax;
index = seg % niswdev;
seg /= niswdev;
bp->b_blkno = seg*dmmax + off;
bp->b_blkno = seg * dmmax + off;
} else
index = 0;
} else {
@ -206,16 +208,16 @@ swstrategy(bp)
bp->b_blkno -= niswap;
for (index = niswdev, swp = &swdevt[niswdev];
swp->sw_dev != NODEV;
swp++, index++) {
swp->sw_dev != NODEV;
swp++, index++) {
if (bp->b_blkno < swp->sw_nblks)
break;
bp->b_blkno -= swp->sw_nblks;
}
if (swp->sw_dev == NODEV ||
bp->b_blkno+sz > swp->sw_nblks) {
bp->b_blkno + sz > swp->sw_nblks) {
bp->b_error = swp->sw_dev == NODEV ?
ENODEV : EINVAL;
ENODEV : EINVAL;
bp->b_flags |= B_ERROR;
biodone(bp);
return;
@ -223,7 +225,7 @@ swstrategy(bp)
}
#else
off = bp->b_blkno % dmmax;
if (off+sz > dmmax) {
if (off + sz > dmmax) {
bp->b_error = EINVAL;
bp->b_flags |= B_ERROR;
biodone(bp);
@ -232,7 +234,7 @@ swstrategy(bp)
seg = bp->b_blkno / dmmax;
index = seg % nswdev;
seg /= nswdev;
bp->b_blkno = seg*dmmax + off;
bp->b_blkno = seg * dmmax + off;
#endif
} else
index = 0;
@ -252,13 +254,13 @@ swstrategy(bp)
vp->v_numoutput--;
if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
vp->v_flag &= ~VBWAIT;
wakeup((caddr_t)&vp->v_numoutput);
wakeup((caddr_t) & vp->v_numoutput);
}
}
sp->sw_vp->v_numoutput++;
}
if (bp->b_vp != NULL)
brelvp(bp);
pbrelvp(bp);
bp->b_vp = sp->sw_vp;
VOP_STRATEGY(bp);
}
@ -269,8 +271,9 @@ swstrategy(bp)
* if already swapping on this device.
*/
struct swapon_args {
char *name;
char *name;
};
/* ARGSUSED */
int
swapon(p, uap, retval)
@ -296,7 +299,7 @@ swapon(p, uap, retval)
vrele(vp);
return (ENOTBLK);
}
dev = (dev_t)vp->v_rdev;
dev = (dev_t) vp->v_rdev;
if (major(dev) >= nblkdev) {
vrele(vp);
return (ENXIO);
@ -318,11 +321,11 @@ swapon(p, uap, retval)
#ifdef SEQSWAP
/*
* If we have reached a non-freed sequential device without
* finding what we are looking for, it is an error.
* That is because all interleaved devices must come first
* and sequential devices must be freed in order.
* finding what we are looking for, it is an error. That is
* because all interleaved devices must come first and
* sequential devices must be freed in order.
*/
if ((sp->sw_flags & (SW_SEQUENTIAL|SW_FREED)) == SW_SEQUENTIAL)
if ((sp->sw_flags & (SW_SEQUENTIAL | SW_FREED)) == SW_SEQUENTIAL)
break;
#endif
}
@ -351,22 +354,22 @@ swfree(p, index)
sp = &swdevt[index];
vp = sp->sw_vp;
error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p);
error = VOP_OPEN(vp, FREAD | FWRITE, p->p_ucred, p);
if (error)
return (error);
sp->sw_flags |= SW_FREED;
nblks = sp->sw_nblks;
/*
* Some devices may not exist til after boot time.
* If so, their nblk count will be 0.
* Some devices may not exist til after boot time. If so, their nblk
* count will be 0.
*/
if (nblks <= 0) {
int perdev;
dev_t dev = sp->sw_dev;
if (bdevsw[major(dev)].d_psize == 0 ||
(nblks = (*bdevsw[major(dev)].d_psize)(dev)) == -1) {
(void) VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
(nblks = (*bdevsw[major(dev)].d_psize) (dev)) == -1) {
(void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
sp->sw_flags &= ~SW_FREED;
return (ENXIO);
}
@ -388,7 +391,7 @@ swfree(p, index)
sp->sw_nblks = nblks;
}
if (nblks == 0) {
(void) VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
(void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
sp->sw_flags &= ~SW_FREED;
return (0); /* XXX error? */
}
@ -399,26 +402,26 @@ swfree(p, index)
blk = niswap;
for (swp = &swdevt[niswdev]; swp != sp; swp++)
blk += swp->sw_nblks;
rlist_free(&swaplist, blk, blk + nblks - 1);
rlist_free(&swaplist, blk, blk + nblks - 1);
vm_swap_size += nblks;
return (0);
}
#endif
for (dvbase = dmmax; dvbase < nblks; dvbase += dmmax) {
blk = nblks - dvbase;
#ifdef SEQSWAP
if ((vsbase = index*dmmax + dvbase*niswdev) >= niswap)
if ((vsbase = index * dmmax + dvbase * niswdev) >= niswap)
panic("swfree");
#else
if ((vsbase = index*dmmax + dvbase*nswdev) >= nswap)
if ((vsbase = index * dmmax + dvbase * nswdev) >= nswap)
panic("swfree");
#endif
if (blk > dmmax)
blk = dmmax;
/* XXX -- we need to exclude the first cluster as above */
/* but for now, this will work fine... */
rlist_free(&swaplist, vsbase, vsbase + blk - 1);
rlist_free(&swaplist, vsbase, vsbase + blk - 1);
vm_swap_size += blk;
}
return (0);

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
*
* @(#)vm_unix.c 8.1 (Berkeley) 6/11/93
* $Id$
* $Id: vm_unix.c,v 1.3 1994/08/02 07:55:41 davidg Exp $
*/
/*
@ -54,7 +54,7 @@
extern int swap_pager_full;
struct obreak_args {
char *nsiz;
char *nsiz;
};
/* ARGSUSED */
@ -69,34 +69,34 @@ obreak(p, uap, retval)
int rv;
register int diff;
old = (vm_offset_t)vm->vm_daddr;
old = (vm_offset_t) vm->vm_daddr;
new = round_page(uap->nsiz);
if ((int)(new - old) > p->p_rlimit[RLIMIT_DATA].rlim_cur)
return(ENOMEM);
if ((int) (new - old) > p->p_rlimit[RLIMIT_DATA].rlim_cur)
return (ENOMEM);
old = round_page(old + ctob(vm->vm_dsize));
diff = new - old;
if (diff > 0) {
if (swap_pager_full) {
return(ENOMEM);
return (ENOMEM);
}
rv = vm_allocate(&vm->vm_map, &old, diff, FALSE);
if (rv != KERN_SUCCESS) {
return(ENOMEM);
return (ENOMEM);
}
vm->vm_dsize += btoc(diff);
} else if (diff < 0) {
diff = -diff;
rv = vm_deallocate(&vm->vm_map, new, diff);
if (rv != KERN_SUCCESS) {
return(ENOMEM);
return (ENOMEM);
}
vm->vm_dsize -= btoc(diff);
}
return(0);
return (0);
}
struct ovadvise_args {
int anom;
int anom;
};
/* ARGSUSED */

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_user.c,v 1.3 1994/08/02 07:55:42 davidg Exp $
* $Id: vm_user.c,v 1.4 1994/10/15 10:28:47 davidg Exp $
*/
/*
@ -74,7 +74,7 @@
#include <vm/vm.h>
simple_lock_data_t vm_alloc_lock; /* XXX */
simple_lock_data_t vm_alloc_lock; /* XXX */
#ifdef MACHVMCOMPAT
/*
@ -87,6 +87,7 @@ struct svm_allocate_args {
vm_size_t size;
boolean_t anywhere;
};
/* ARGSUSED */
int
svm_allocate(p, uap, retval)
@ -97,17 +98,17 @@ svm_allocate(p, uap, retval)
vm_offset_t addr;
int rv;
uap->map = p->p_map; /* XXX */
uap->map = p->p_map; /* XXX */
if (copyin((caddr_t)uap->addr, (caddr_t)&addr, sizeof (addr)))
if (copyin((caddr_t) uap->addr, (caddr_t) & addr, sizeof(addr)))
rv = KERN_INVALID_ARGUMENT;
else
rv = vm_allocate(uap->map, &addr, uap->size, uap->anywhere);
if (rv == KERN_SUCCESS) {
if (copyout((caddr_t)&addr, (caddr_t)uap->addr, sizeof(addr)))
if (copyout((caddr_t) & addr, (caddr_t) uap->addr, sizeof(addr)))
rv = KERN_INVALID_ARGUMENT;
}
return((int)rv);
return ((int) rv);
}
struct svm_deallocate_args {
@ -115,6 +116,7 @@ struct svm_deallocate_args {
vm_offset_t addr;
vm_size_t size;
};
/* ARGSUSED */
int
svm_deallocate(p, uap, retval)
@ -124,9 +126,9 @@ svm_deallocate(p, uap, retval)
{
int rv;
uap->map = p->p_map; /* XXX */
uap->map = p->p_map; /* XXX */
rv = vm_deallocate(uap->map, uap->addr, uap->size);
return((int)rv);
return ((int) rv);
}
struct svm_inherit_args {
@ -135,6 +137,7 @@ struct svm_inherit_args {
vm_size_t size;
vm_inherit_t inherit;
};
/* ARGSUSED */
int
svm_inherit(p, uap, retval)
@ -144,9 +147,9 @@ svm_inherit(p, uap, retval)
{
int rv;
uap->map = p->p_map; /* XXX */
uap->map = p->p_map; /* XXX */
rv = vm_inherit(uap->map, uap->addr, uap->size, uap->inherit);
return((int)rv);
return ((int) rv);
}
struct svm_protect_args {
@ -156,6 +159,7 @@ struct svm_protect_args {
boolean_t setmax;
vm_prot_t prot;
};
/* ARGSUSED */
int
svm_protect(p, uap, retval)
@ -165,9 +169,9 @@ svm_protect(p, uap, retval)
{
int rv;
uap->map = p->p_map; /* XXX */
uap->map = p->p_map; /* XXX */
rv = vm_protect(uap->map, uap->addr, uap->size, uap->setmax, uap->prot);
return((int)rv);
return ((int) rv);
}
#endif
@ -177,15 +181,15 @@ svm_protect(p, uap, retval)
*/
int
vm_inherit(map, start, size, new_inheritance)
register vm_map_t map;
vm_offset_t start;
vm_size_t size;
vm_inherit_t new_inheritance;
register vm_map_t map;
vm_offset_t start;
vm_size_t size;
vm_inherit_t new_inheritance;
{
if (map == NULL)
return(KERN_INVALID_ARGUMENT);
return (KERN_INVALID_ARGUMENT);
return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance));
return (vm_map_inherit(map, trunc_page(start), round_page(start + size), new_inheritance));
}
/*
@ -195,16 +199,16 @@ vm_inherit(map, start, size, new_inheritance)
int
vm_protect(map, start, size, set_maximum, new_protection)
register vm_map_t map;
vm_offset_t start;
vm_size_t size;
boolean_t set_maximum;
vm_prot_t new_protection;
register vm_map_t map;
vm_offset_t start;
vm_size_t size;
boolean_t set_maximum;
vm_prot_t new_protection;
{
if (map == NULL)
return(KERN_INVALID_ARGUMENT);
return (KERN_INVALID_ARGUMENT);
return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum));
return (vm_map_protect(map, trunc_page(start), round_page(start + size), new_protection, set_maximum));
}
/*
@ -213,20 +217,19 @@ vm_protect(map, start, size, set_maximum, new_protection)
*/
int
vm_allocate(map, addr, size, anywhere)
register vm_map_t map;
register vm_offset_t *addr;
register vm_size_t size;
boolean_t anywhere;
register vm_map_t map;
register vm_offset_t *addr;
register vm_size_t size;
boolean_t anywhere;
{
int result;
int result;
if (map == NULL)
return(KERN_INVALID_ARGUMENT);
return (KERN_INVALID_ARGUMENT);
if (size == 0) {
*addr = 0;
return(KERN_SUCCESS);
return (KERN_SUCCESS);
}
if (anywhere)
*addr = vm_map_min(map);
else
@ -235,7 +238,7 @@ vm_allocate(map, addr, size, anywhere)
result = vm_map_find(map, NULL, (vm_offset_t) 0, addr, size, anywhere);
return(result);
return (result);
}
/*
@ -244,17 +247,17 @@ vm_allocate(map, addr, size, anywhere)
*/
int
vm_deallocate(map, start, size)
register vm_map_t map;
vm_offset_t start;
vm_size_t size;
register vm_map_t map;
vm_offset_t start;
vm_size_t size;
{
if (map == NULL)
return(KERN_INVALID_ARGUMENT);
return (KERN_INVALID_ARGUMENT);
if (size == (vm_offset_t) 0)
return(KERN_SUCCESS);
return (KERN_SUCCESS);
return(vm_map_remove(map, trunc_page(start), round_page(start+size)));
return (vm_map_remove(map, trunc_page(start), round_page(start + size)));
}
#if 1
@ -263,27 +266,26 @@ vm_deallocate(map, start, size)
*/
int
vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
register vm_map_t map;
register vm_offset_t *addr;
register vm_size_t size;
boolean_t anywhere;
vm_pager_t pager;
vm_offset_t poffset;
boolean_t internal;
register vm_map_t map;
register vm_offset_t *addr;
register vm_size_t size;
boolean_t anywhere;
vm_pager_t pager;
vm_offset_t poffset;
boolean_t internal;
{
register vm_object_t object;
register int result;
register vm_object_t object;
register int result;
if (map == NULL)
return(KERN_INVALID_ARGUMENT);
return (KERN_INVALID_ARGUMENT);
*addr = trunc_page(*addr);
size = round_page(size);
/*
* Lookup the pager/paging-space in the object cache.
* If it's not there, then create a new object and cache
* it.
* Lookup the pager/paging-space in the object cache. If it's not
* there, then create a new object and cache it.
*/
object = vm_object_lookup(pager);
if (object == NULL) {
@ -291,8 +293,8 @@ vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
/*
* From Mike Hibler: "unnamed anonymous objects should never
* be on the hash list ... For now you can just change
* vm_allocate_with_pager to not do vm_object_enter if this
* is an internal object ..."
* vm_allocate_with_pager to not do vm_object_enter if this is
* an internal object ..."
*/
if (!internal)
vm_object_enter(object, pager);
@ -309,6 +311,6 @@ vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
vm_object_deallocate(object);
else if (pager != NULL)
vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
return(result);
return (result);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vnode_pager.h 8.1 (Berkeley) 6/11/93
* $Id$
* $Id: vnode_pager.h,v 1.3 1994/08/02 07:55:43 davidg Exp $
*/
#ifndef _VNODE_PAGER_
@ -46,12 +46,12 @@
* VNODE pager private data.
*/
struct vnpager {
int vnp_flags; /* flags */
struct vnode *vnp_vp; /* vnode */
vm_size_t vnp_size; /* vnode current size */
int vnp_flags; /* flags */
struct vnode *vnp_vp; /* vnode */
vm_size_t vnp_size; /* vnode current size */
};
typedef struct vnpager *vn_pager_t;
typedef struct vnpager *vn_pager_t;
#define VN_PAGER_NULL ((vn_pager_t)0)
#endif /* _VNODE_PAGER_ */
#endif /* _VNODE_PAGER_ */