- Remove a number of extra newlines that do not belong here according to
style(9) - Minor space adjustment in cases where we have "( ", " )", if(), return(), while(), for(), etc. - Add /* SYMBOL */ after a few #endifs. Reviewed by: alc
This commit is contained in:
parent
b4f198c344
commit
a128794977
@ -93,7 +93,6 @@ default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
* the swapblk in the underlying vm_page's when we free the vm_page or
|
||||
* garbage collect the vm_page cache list.
|
||||
*/
|
||||
|
||||
static void
|
||||
default_pager_dealloc(object)
|
||||
vm_object_t object;
|
||||
@ -108,7 +107,6 @@ default_pager_dealloc(object)
|
||||
* OBJT_SWAP at the time a swap-backed vm_page_t is freed, we will never
|
||||
* see a vm_page with assigned swap here.
|
||||
*/
|
||||
|
||||
static int
|
||||
default_pager_getpages(object, m, count, reqpage)
|
||||
vm_object_t object;
|
||||
@ -125,7 +123,6 @@ default_pager_getpages(object, m, count, reqpage)
|
||||
* object will be converted when the written-out vm_page_t is moved from the
|
||||
* cache to the free list.
|
||||
*/
|
||||
|
||||
static void
|
||||
default_pager_putpages(object, m, c, sync, rtvals)
|
||||
vm_object_t object;
|
||||
@ -149,7 +146,6 @@ default_pager_putpages(object, m, c, sync, rtvals)
|
||||
* deal with it since it must already deal with it plus deal with swap
|
||||
* meta-data structures.
|
||||
*/
|
||||
|
||||
static boolean_t
|
||||
default_pager_haspage(object, pindex, before, after)
|
||||
vm_object_t object;
|
||||
|
@ -71,7 +71,6 @@
|
||||
|
||||
#ifndef _PMAP_VM_
|
||||
#define _PMAP_VM_
|
||||
|
||||
/*
|
||||
* Each machine dependent implementation is expected to
|
||||
* keep certain statistics. They may do this anyway they
|
||||
@ -87,7 +86,6 @@ typedef struct pmap_statistics *pmap_statistics_t;
|
||||
#include <machine/pmap.h>
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
struct proc;
|
||||
struct thread;
|
||||
|
||||
@ -147,7 +145,5 @@ void pmap_activate __P((struct thread *td));
|
||||
vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size));
|
||||
void *pmap_kenter_temporary __P((vm_offset_t pa, int i));
|
||||
void pmap_init2 __P((void));
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* _PMAP_VM_ */
|
||||
|
@ -109,7 +109,6 @@
|
||||
* vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
|
||||
* in the old system.
|
||||
*/
|
||||
|
||||
extern int vm_swap_size; /* number of free swap blocks, in pages */
|
||||
|
||||
int swap_pager_full; /* swap space exhaustion (task killing) */
|
||||
@ -156,7 +155,6 @@ vm_zone_t swap_zone;
|
||||
* calls hooked from other parts of the VM system and do not appear here.
|
||||
* (see vm/swap_pager.h).
|
||||
*/
|
||||
|
||||
static vm_object_t
|
||||
swap_pager_alloc __P((void *handle, vm_ooffset_t size,
|
||||
vm_prot_t prot, vm_ooffset_t offset));
|
||||
@ -188,7 +186,6 @@ static void waitchainbuf(struct bio *bp, int count, int done);
|
||||
* swap_*() routines are externally accessible. swp_*() routines are
|
||||
* internal.
|
||||
*/
|
||||
|
||||
int dmmax;
|
||||
static int dmmax_mask;
|
||||
int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
|
||||
@ -204,14 +201,12 @@ static void swp_pager_async_iodone __P((struct buf *bp));
|
||||
/*
|
||||
* Swap bitmap functions
|
||||
*/
|
||||
|
||||
static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages));
|
||||
static __inline daddr_t swp_pager_getswapspace __P((int npages));
|
||||
|
||||
/*
|
||||
* Metadata functions
|
||||
*/
|
||||
|
||||
static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
|
||||
static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
|
||||
static void swp_pager_meta_free_all __P((vm_object_t));
|
||||
@ -229,7 +224,6 @@ static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
|
||||
* This routine may not block.
|
||||
* This routine must be called at splvm()
|
||||
*/
|
||||
|
||||
static __inline void
|
||||
swp_sizecheck()
|
||||
{
|
||||
@ -254,7 +248,6 @@ swp_sizecheck()
|
||||
* before much else so be careful what you depend on. Most of the VM
|
||||
* system has yet to be initialized at this point.
|
||||
*/
|
||||
|
||||
static void
|
||||
swap_pager_init()
|
||||
{
|
||||
@ -271,7 +264,6 @@ swap_pager_init()
|
||||
/*
|
||||
* Device Stripe, in PAGE_SIZE'd blocks
|
||||
*/
|
||||
|
||||
dmmax = SWB_NPAGES * 2;
|
||||
dmmax_mask = ~(dmmax - 1);
|
||||
}
|
||||
@ -282,7 +274,6 @@ swap_pager_init()
|
||||
* Expected to be started from pageout process once, prior to entering
|
||||
* its main loop.
|
||||
*/
|
||||
|
||||
void
|
||||
swap_pager_swap_init()
|
||||
{
|
||||
@ -310,7 +301,6 @@ swap_pager_swap_init()
|
||||
* have one NFS swap device due to the command/ack latency over NFS.
|
||||
* So it all works out pretty well.
|
||||
*/
|
||||
|
||||
nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
|
||||
|
||||
mtx_lock(&pbuf_mtx);
|
||||
@ -326,12 +316,10 @@ swap_pager_swap_init()
|
||||
* can hold 16 pages, so this is probably overkill. This reservation
|
||||
* is typically limited to around 70MB by default.
|
||||
*/
|
||||
|
||||
n = cnt.v_page_count;
|
||||
if (maxswzone && n > maxswzone / sizeof(struct swblock))
|
||||
n = maxswzone / sizeof(struct swblock);
|
||||
n2 = n;
|
||||
|
||||
do {
|
||||
swap_zone = zinit(
|
||||
"SWAPMETA",
|
||||
@ -348,7 +336,6 @@ swap_pager_swap_init()
|
||||
*/
|
||||
n -= ((n + 2) / 3);
|
||||
} while (n > 0);
|
||||
|
||||
if (swap_zone == NULL)
|
||||
panic("failed to zinit swap_zone.");
|
||||
if (n2 != n)
|
||||
@ -363,12 +350,9 @@ swap_pager_swap_init()
|
||||
* n: size of hash table, must be power of 2
|
||||
* swhash_mask: hash table index mask
|
||||
*/
|
||||
|
||||
for (n = 1; n < n2 / 8; n *= 2)
|
||||
;
|
||||
|
||||
swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
|
||||
|
||||
swhash_mask = n - 1;
|
||||
}
|
||||
|
||||
@ -388,7 +372,6 @@ swap_pager_swap_init()
|
||||
* a new swap object w/handle when a default object with that handle
|
||||
* already exists.
|
||||
*/
|
||||
|
||||
static vm_object_t
|
||||
swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
vm_ooffset_t offset)
|
||||
@ -439,7 +422,6 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
*
|
||||
* The object must be locked or unreferenceable.
|
||||
*/
|
||||
|
||||
static void
|
||||
swap_pager_dealloc(object)
|
||||
vm_object_t object;
|
||||
@ -493,7 +475,6 @@ swap_pager_dealloc(object)
|
||||
* This routine may not block
|
||||
* This routine must be called at splvm().
|
||||
*/
|
||||
|
||||
static __inline daddr_t
|
||||
swp_pager_getswapspace(npages)
|
||||
int npages;
|
||||
@ -514,7 +495,7 @@ swp_pager_getswapspace(npages)
|
||||
swdevt[BLK2DEVIDX(blk)].sw_used += npages;
|
||||
swp_sizecheck();
|
||||
}
|
||||
return(blk);
|
||||
return (blk);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -531,7 +512,6 @@ swp_pager_getswapspace(npages)
|
||||
* This routine may not block
|
||||
* This routine must be called at splvm().
|
||||
*/
|
||||
|
||||
static __inline void
|
||||
swp_pager_freeswapspace(blk, npages)
|
||||
daddr_t blk;
|
||||
@ -561,7 +541,6 @@ swp_pager_freeswapspace(blk, npages)
|
||||
* This routine may be called at any spl. We up our spl to splvm temporarily
|
||||
* in order to perform the metadata removal.
|
||||
*/
|
||||
|
||||
void
|
||||
swap_pager_freespace(object, start, size)
|
||||
vm_object_t object;
|
||||
@ -583,7 +562,6 @@ swap_pager_freespace(object, start, size)
|
||||
*
|
||||
* Returns 0 on success, -1 on failure.
|
||||
*/
|
||||
|
||||
int
|
||||
swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
|
||||
{
|
||||
@ -601,7 +579,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
|
||||
if (n == 0) {
|
||||
swp_pager_meta_free(object, beg, start - beg);
|
||||
splx(s);
|
||||
return(-1);
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -613,7 +591,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
|
||||
}
|
||||
swp_pager_meta_free(object, start, n);
|
||||
splx(s);
|
||||
return(0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -642,7 +620,6 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
|
||||
* The source and destination objects must be locked or
|
||||
* inaccessible (XXX are they ?)
|
||||
*/
|
||||
|
||||
void
|
||||
swap_pager_copy(srcobject, dstobject, offset, destroysource)
|
||||
vm_object_t srcobject;
|
||||
@ -660,7 +637,6 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
|
||||
* If destroysource is set, we remove the source object from the
|
||||
* swap_pager internal queue now.
|
||||
*/
|
||||
|
||||
if (destroysource) {
|
||||
mtx_lock(&sw_alloc_mtx);
|
||||
if (srcobject->handle == NULL) {
|
||||
@ -682,7 +658,6 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
|
||||
/*
|
||||
* transfer source to destination.
|
||||
*/
|
||||
|
||||
for (i = 0; i < dstobject->size; ++i) {
|
||||
daddr_t dstaddr;
|
||||
|
||||
@ -692,7 +667,6 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
|
||||
* if the destination is a resident page, in which case the
|
||||
* source is thrown away.
|
||||
*/
|
||||
|
||||
dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
|
||||
|
||||
if (dstaddr == SWAPBLK_NONE) {
|
||||
@ -726,7 +700,6 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
|
||||
* We have to revert the type to OBJT_DEFAULT so we do not accidently
|
||||
* double-remove the object from the swap queues.
|
||||
*/
|
||||
|
||||
if (destroysource) {
|
||||
swp_pager_meta_free_all(srcobject);
|
||||
/*
|
||||
@ -753,7 +726,6 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
|
||||
* (that is handled in getpages/putpages). It probably isn't worth
|
||||
* doing here.
|
||||
*/
|
||||
|
||||
boolean_t
|
||||
swap_pager_haspage(object, pindex, before, after)
|
||||
vm_object_t object;
|
||||
@ -767,7 +739,6 @@ swap_pager_haspage(object, pindex, before, after)
|
||||
/*
|
||||
* do we have good backing store at the requested index ?
|
||||
*/
|
||||
|
||||
s = splvm();
|
||||
blk0 = swp_pager_meta_ctl(object, pindex, 0);
|
||||
|
||||
@ -783,7 +754,6 @@ swap_pager_haspage(object, pindex, before, after)
|
||||
/*
|
||||
* find backwards-looking contiguous good backing store
|
||||
*/
|
||||
|
||||
if (before != NULL) {
|
||||
int i;
|
||||
|
||||
@ -802,7 +772,6 @@ swap_pager_haspage(object, pindex, before, after)
|
||||
/*
|
||||
* find forward-looking contiguous good backing store
|
||||
*/
|
||||
|
||||
if (after != NULL) {
|
||||
int i;
|
||||
|
||||
@ -837,7 +806,6 @@ swap_pager_haspage(object, pindex, before, after)
|
||||
* This routine may not block
|
||||
* This routine must be called at splvm()
|
||||
*/
|
||||
|
||||
static void
|
||||
swap_pager_unswapped(m)
|
||||
vm_page_t m;
|
||||
@ -862,7 +830,6 @@ swap_pager_unswapped(m)
|
||||
* sequencing when we run multiple ops in parallel to satisfy a request.
|
||||
* But this is swap, so we let it all hang out.
|
||||
*/
|
||||
|
||||
static void
|
||||
swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
{
|
||||
@ -884,7 +851,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
/*
|
||||
* Clear error indication, initialize page index, count, data pointer.
|
||||
*/
|
||||
|
||||
bp->bio_error = 0;
|
||||
bp->bio_flags &= ~BIO_ERROR;
|
||||
bp->bio_resid = bp->bio_bcount;
|
||||
@ -899,7 +865,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
/*
|
||||
* Deal with BIO_DELETE
|
||||
*/
|
||||
|
||||
if (bp->bio_cmd == BIO_DELETE) {
|
||||
/*
|
||||
* FREE PAGE(s) - destroy underlying swap that is no longer
|
||||
@ -942,7 +907,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
* - we cross a physical disk boundry in the
|
||||
* stripe.
|
||||
*/
|
||||
|
||||
if (
|
||||
nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
|
||||
((nbp->b_blkno ^ blk) & dmmax_mask)
|
||||
@ -966,7 +930,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
* Add new swapblk to nbp, instantiating nbp if necessary.
|
||||
* Zero-fill reads are able to take a shortcut.
|
||||
*/
|
||||
|
||||
if (blk == SWAPBLK_NONE) {
|
||||
/*
|
||||
* We can only get here if we are reading. Since
|
||||
@ -992,7 +955,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
/*
|
||||
* Flush out last buffer
|
||||
*/
|
||||
|
||||
splx(s);
|
||||
|
||||
if (nbp) {
|
||||
@ -1010,7 +972,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
/*
|
||||
* Wait for completion.
|
||||
*/
|
||||
|
||||
waitchainbuf(bp, 0, 1);
|
||||
}
|
||||
|
||||
@ -1033,7 +994,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
* The parent has BUSY'd the pages. We should return with 'm'
|
||||
* left busy, but the others adjusted.
|
||||
*/
|
||||
|
||||
static int
|
||||
swap_pager_getpages(object, m, count, reqpage)
|
||||
vm_object_t object;
|
||||
@ -1069,7 +1029,6 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
* The swp_*() calls must be made at splvm(). vm_page_free() does
|
||||
* not need to be, but it will go a little faster if it is.
|
||||
*/
|
||||
|
||||
s = splvm();
|
||||
blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
|
||||
|
||||
@ -1098,7 +1057,6 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
* free pages outside our collection range. Note: we never free
|
||||
* mreq, it must remain busy throughout.
|
||||
*/
|
||||
|
||||
{
|
||||
int k;
|
||||
|
||||
@ -1114,14 +1072,12 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
* Return VM_PAGER_FAIL if we have nothing to do. Return mreq
|
||||
* still busy, but the others unbusied.
|
||||
*/
|
||||
|
||||
if (blk == SWAPBLK_NONE)
|
||||
return(VM_PAGER_FAIL);
|
||||
return (VM_PAGER_FAIL);
|
||||
|
||||
/*
|
||||
* Get a swap buffer header to perform the IO
|
||||
*/
|
||||
|
||||
bp = getpbuf(&nsw_rcount);
|
||||
kva = (vm_offset_t) bp->b_data;
|
||||
|
||||
@ -1130,7 +1086,6 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
*
|
||||
* NOTE: B_PAGING is set by pbgetvp()
|
||||
*/
|
||||
|
||||
pmap_qenter(kva, m + i, j - i);
|
||||
|
||||
bp->b_iocmd = BIO_READ;
|
||||
@ -1162,7 +1117,6 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
* We still hold the lock on mreq, and our automatic completion routine
|
||||
* does not remove it.
|
||||
*/
|
||||
|
||||
vm_object_pip_add(mreq->object, bp->b_npages);
|
||||
lastpindex = m[j-1]->pindex;
|
||||
|
||||
@ -1185,9 +1139,7 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
* cleared on completion. If an I/O error occurs, SWAPBLK_NONE
|
||||
* is set in the meta-data.
|
||||
*/
|
||||
|
||||
s = splvm();
|
||||
|
||||
while ((mreq->flags & PG_SWAPINPROG) != 0) {
|
||||
vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
|
||||
cnt.v_intrans++;
|
||||
@ -1200,19 +1152,17 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
splx(s);
|
||||
|
||||
/*
|
||||
* mreq is left bussied after completion, but all the other pages
|
||||
* mreq is left busied after completion, but all the other pages
|
||||
* are freed. If we had an unrecoverable read error the page will
|
||||
* not be valid.
|
||||
*/
|
||||
|
||||
if (mreq->valid != VM_PAGE_BITS_ALL) {
|
||||
return(VM_PAGER_ERROR);
|
||||
return (VM_PAGER_ERROR);
|
||||
} else {
|
||||
return(VM_PAGER_OK);
|
||||
return (VM_PAGER_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1245,7 +1195,6 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
* those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
|
||||
* We need to unbusy the rest on I/O completion.
|
||||
*/
|
||||
|
||||
void
|
||||
swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
vm_object_t object;
|
||||
@ -1271,7 +1220,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
* check for bogus sysops
|
||||
* force sync if not pageout process
|
||||
*/
|
||||
|
||||
if (object->type != OBJT_SWAP)
|
||||
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
|
||||
|
||||
@ -1284,7 +1232,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
* Update nsw parameters from swap_async_max sysctl values.
|
||||
* Do not let the sysop crash the machine with bogus numbers.
|
||||
*/
|
||||
|
||||
mtx_lock(&pbuf_mtx);
|
||||
if (swap_async_max != nsw_wcount_async_max) {
|
||||
int n;
|
||||
@ -1322,7 +1269,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
* The page is left dirty until the pageout operation completes
|
||||
* successfully.
|
||||
*/
|
||||
|
||||
for (i = 0; i < count; i += n) {
|
||||
int s;
|
||||
int j;
|
||||
@ -1332,7 +1278,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
/*
|
||||
* Maximum I/O size is limited by a number of factors.
|
||||
*/
|
||||
|
||||
n = min(BLIST_MAX_ALLOC, count - i);
|
||||
n = min(n, nsw_cluster_max);
|
||||
|
||||
@ -1374,7 +1319,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
*
|
||||
* NOTE: B_PAGING is set by pbgetvp()
|
||||
*/
|
||||
|
||||
if (sync == TRUE) {
|
||||
bp = getpbuf(&nsw_wcount_sync);
|
||||
} else {
|
||||
@ -1426,7 +1370,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
*
|
||||
* NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
|
||||
*/
|
||||
|
||||
if (sync == FALSE) {
|
||||
bp->b_iodone = swp_pager_async_iodone;
|
||||
BUF_KERNPROC(bp);
|
||||
@ -1443,7 +1386,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
*
|
||||
* NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
|
||||
*/
|
||||
|
||||
bp->b_iodone = swp_pager_sync_iodone;
|
||||
BUF_STRATEGY(bp);
|
||||
|
||||
@ -1454,19 +1396,15 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
* double-free.
|
||||
*/
|
||||
s = splbio();
|
||||
|
||||
while ((bp->b_flags & B_DONE) == 0) {
|
||||
tsleep(bp, PVM, "swwrt", 0);
|
||||
}
|
||||
|
||||
for (j = 0; j < n; ++j)
|
||||
rtvals[i+j] = VM_PAGER_PEND;
|
||||
|
||||
/*
|
||||
* Now that we are through with the bp, we can call the
|
||||
* normal async completion, which frees everything up.
|
||||
*/
|
||||
|
||||
swp_pager_async_iodone(bp);
|
||||
splx(s);
|
||||
}
|
||||
@ -1480,7 +1418,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
*
|
||||
* This routine may not block. This routine is called at splbio() or better.
|
||||
*/
|
||||
|
||||
static void
|
||||
swp_pager_sync_iodone(bp)
|
||||
struct buf *bp;
|
||||
@ -1508,7 +1445,6 @@ swp_pager_sync_iodone(bp)
|
||||
* We up ourselves to splvm() as required for various vm_page related
|
||||
* calls.
|
||||
*/
|
||||
|
||||
static void
|
||||
swp_pager_async_iodone(bp)
|
||||
struct buf *bp;
|
||||
@ -1518,13 +1454,11 @@ swp_pager_async_iodone(bp)
|
||||
vm_object_t object = NULL;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
bp->b_flags |= B_DONE;
|
||||
|
||||
/*
|
||||
* report error
|
||||
*/
|
||||
|
||||
if (bp->b_ioflags & BIO_ERROR) {
|
||||
printf(
|
||||
"swap_pager: I/O error - %s failed; blkno %ld,"
|
||||
@ -1539,7 +1473,6 @@ swp_pager_async_iodone(bp)
|
||||
/*
|
||||
* set object, raise to splvm().
|
||||
*/
|
||||
|
||||
if (bp->b_npages)
|
||||
object = bp->b_pages[0]->object;
|
||||
s = splvm();
|
||||
@ -1557,7 +1490,6 @@ swp_pager_async_iodone(bp)
|
||||
* but do not free it in the rlist. The errornous block(s) are thus
|
||||
* never reallocated as swap. Redirty the page and continue.
|
||||
*/
|
||||
|
||||
for (i = 0; i < bp->b_npages; ++i) {
|
||||
vm_page_t m = bp->b_pages[i];
|
||||
|
||||
@ -1570,7 +1502,6 @@ swp_pager_async_iodone(bp)
|
||||
* can never be used again. But I can't from an
|
||||
* interrupt.
|
||||
*/
|
||||
|
||||
if (bp->b_iocmd == BIO_READ) {
|
||||
/*
|
||||
* When reading, reqpage needs to stay
|
||||
@ -1593,10 +1524,8 @@ swp_pager_async_iodone(bp)
|
||||
* not legal to mess with object->memq from an
|
||||
* interrupt.
|
||||
*/
|
||||
|
||||
m->valid = 0;
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
|
||||
if (i != bp->b_pager.pg_reqpage)
|
||||
vm_page_free(m);
|
||||
else
|
||||
@ -1639,7 +1568,6 @@ swp_pager_async_iodone(bp)
|
||||
* vm_page_wakeup(). We do not set reqpage's
|
||||
* valid bits here, it is up to the caller.
|
||||
*/
|
||||
|
||||
pmap_clear_modify(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_undirty(m);
|
||||
@ -1677,14 +1605,12 @@ swp_pager_async_iodone(bp)
|
||||
* adjust pip. NOTE: the original parent may still have its own
|
||||
* pip refs on the object.
|
||||
*/
|
||||
|
||||
if (object)
|
||||
vm_object_pip_wakeupn(object, bp->b_npages);
|
||||
|
||||
/*
|
||||
* release the physical I/O buffer
|
||||
*/
|
||||
|
||||
relpbuf(
|
||||
bp,
|
||||
((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
|
||||
@ -1721,7 +1647,6 @@ swp_pager_async_iodone(bp)
|
||||
*
|
||||
* This routine must be called at splvm().
|
||||
*/
|
||||
|
||||
static __inline struct swblock **
|
||||
swp_pager_hash(vm_object_t object, vm_pindex_t index)
|
||||
{
|
||||
@ -1730,7 +1655,6 @@ swp_pager_hash(vm_object_t object, vm_pindex_t index)
|
||||
|
||||
index &= ~SWAP_META_MASK;
|
||||
pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
|
||||
|
||||
while ((swap = *pswap) != NULL) {
|
||||
if (swap->swb_object == object &&
|
||||
swap->swb_index == index
|
||||
@ -1739,7 +1663,7 @@ swp_pager_hash(vm_object_t object, vm_pindex_t index)
|
||||
}
|
||||
pswap = &swap->swb_hnext;
|
||||
}
|
||||
return(pswap);
|
||||
return (pswap);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1755,7 +1679,6 @@ swp_pager_hash(vm_object_t object, vm_pindex_t index)
|
||||
* This routine must be called at splvm(), except when used to convert
|
||||
* an OBJT_DEFAULT object into an OBJT_SWAP object.
|
||||
*/
|
||||
|
||||
static void
|
||||
swp_pager_meta_build(
|
||||
vm_object_t object,
|
||||
@ -1769,7 +1692,6 @@ swp_pager_meta_build(
|
||||
/*
|
||||
* Convert default object to swap object if necessary
|
||||
*/
|
||||
|
||||
if (object->type != OBJT_SWAP) {
|
||||
object->type = OBJT_SWAP;
|
||||
object->un_pager.swp.swp_bcount = 0;
|
||||
@ -1796,7 +1718,6 @@ swp_pager_meta_build(
|
||||
* anything just return. If we run out of space in the map we wait
|
||||
* and, since the hash table may have changed, retry.
|
||||
*/
|
||||
|
||||
retry:
|
||||
pswap = swp_pager_hash(object, index);
|
||||
|
||||
@ -1825,7 +1746,6 @@ retry:
|
||||
/*
|
||||
* Delete prior contents of metadata
|
||||
*/
|
||||
|
||||
index &= SWAP_META_MASK;
|
||||
|
||||
if (swap->swb_pages[index] != SWAPBLK_NONE) {
|
||||
@ -1836,7 +1756,6 @@ retry:
|
||||
/*
|
||||
* Enter block into metadata
|
||||
*/
|
||||
|
||||
swap->swb_pages[index] = swapblk;
|
||||
if (swapblk != SWAPBLK_NONE)
|
||||
++swap->swb_count;
|
||||
@ -1854,7 +1773,6 @@ retry:
|
||||
*
|
||||
* This routine must be called at splvm()
|
||||
*/
|
||||
|
||||
static void
|
||||
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
|
||||
{
|
||||
@ -1900,7 +1818,6 @@ swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
|
||||
*
|
||||
* This routine must be called at splvm()
|
||||
*/
|
||||
|
||||
static void
|
||||
swp_pager_meta_free_all(vm_object_t object)
|
||||
{
|
||||
@ -1960,7 +1877,6 @@ swp_pager_meta_free_all(vm_object_t object)
|
||||
* SWM_FREE remove and free swap block from metadata
|
||||
* SWM_POP remove from meta data but do not free.. pop it out
|
||||
*/
|
||||
|
||||
static daddr_t
|
||||
swp_pager_meta_ctl(
|
||||
vm_object_t object,
|
||||
@ -1976,9 +1892,8 @@ swp_pager_meta_ctl(
|
||||
* The meta data only exists of the object is OBJT_SWAP
|
||||
* and even then might not be allocated yet.
|
||||
*/
|
||||
|
||||
if (object->type != OBJT_SWAP)
|
||||
return(SWAPBLK_NONE);
|
||||
return (SWAPBLK_NONE);
|
||||
|
||||
r1 = SWAPBLK_NONE;
|
||||
pswap = swp_pager_hash(object, index);
|
||||
@ -2002,7 +1917,7 @@ swp_pager_meta_ctl(
|
||||
}
|
||||
}
|
||||
}
|
||||
return(r1);
|
||||
return (r1);
|
||||
}
|
||||
|
||||
/********************************************************
|
||||
@ -2022,7 +1937,6 @@ swp_pager_meta_ctl(
|
||||
* on dealing with b_resid. Since users of these routines may issue
|
||||
* multiple children simultaneously, sequencing of the error can be lost.
|
||||
*/
|
||||
|
||||
static void
|
||||
vm_pager_chain_iodone(struct buf *nbp)
|
||||
{
|
||||
@ -2060,7 +1974,6 @@ vm_pager_chain_iodone(struct buf *nbp)
|
||||
* I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
|
||||
* automatically propagated to the parent
|
||||
*/
|
||||
|
||||
struct buf *
|
||||
getchainbuf(struct bio *bp, struct vnode *vp, int flags)
|
||||
{
|
||||
@ -2086,7 +1999,7 @@ getchainbuf(struct bio *bp, struct vnode *vp, int flags)
|
||||
|
||||
if (vp)
|
||||
pbgetvp(vp, nbp);
|
||||
return(nbp);
|
||||
return (nbp);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -66,9 +66,7 @@
|
||||
*
|
||||
* Overall memory utilization is about the same as the old swap structure.
|
||||
*/
|
||||
|
||||
#define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t))
|
||||
|
||||
#define SWAP_META_PAGES (SWB_NPAGES * 2)
|
||||
#define SWAP_META_MASK (SWAP_META_PAGES - 1)
|
||||
|
||||
@ -106,6 +104,5 @@ void swap_pager_page_removed __P((vm_page_t, vm_object_t));
|
||||
struct buf;
|
||||
void swstrategy __P((struct buf *bp)); /* probably needs to move elsewhere */
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* _SWAP_PAGER_ */
|
||||
|
@ -111,7 +111,7 @@ typedef int boolean_t;
|
||||
*/
|
||||
struct vm_page;
|
||||
typedef struct vm_page *vm_page_t;
|
||||
#endif
|
||||
#endif /* _KERNEL */
|
||||
|
||||
/*
|
||||
* Information passed from the machine-independant VM initialization code
|
||||
|
@ -57,7 +57,7 @@ int obreak __P((struct thread *, void *, int *));
|
||||
int sbrk __P((struct thread *, void *, int *));
|
||||
int sstk __P((struct thread *, void *, int *));
|
||||
int swapon __P((struct thread *, void *, int *));
|
||||
#endif
|
||||
#endif /* TYPEDEF_FOR_UAP */
|
||||
|
||||
int grow __P((struct proc *, size_t));
|
||||
int grow_stack __P((struct proc *, size_t));
|
||||
@ -97,7 +97,5 @@ void vsunlock __P((caddr_t, u_int));
|
||||
void vm_object_print __P((/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
|
||||
char *));
|
||||
int vm_fault_quick __P((caddr_t v, int prot));
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* !_VM_EXTERN_H_ */
|
||||
|
@ -72,7 +72,6 @@
|
||||
/*
|
||||
* Page fault handling module.
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
@ -162,7 +161,6 @@ _unlock_things(struct faultstate *fs, int dealloc)
|
||||
*
|
||||
* default objects are zero-fill, there is no real pager.
|
||||
*/
|
||||
|
||||
#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
|
||||
(((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
|
||||
|
||||
@ -294,15 +292,12 @@ RetryFault:;
|
||||
/*
|
||||
* Search for the page at object/offset.
|
||||
*/
|
||||
|
||||
fs.object = fs.first_object;
|
||||
fs.pindex = fs.first_pindex;
|
||||
|
||||
while (TRUE) {
|
||||
/*
|
||||
* If the object is dead, we stop here
|
||||
*/
|
||||
|
||||
if (fs.object->flags & OBJ_DEAD) {
|
||||
unlock_and_deallocate(&fs);
|
||||
return (KERN_PROTECTION_FAILURE);
|
||||
@ -311,7 +306,6 @@ RetryFault:;
|
||||
/*
|
||||
* See if page is resident
|
||||
*/
|
||||
|
||||
fs.m = vm_page_lookup(fs.object, fs.pindex);
|
||||
if (fs.m != NULL) {
|
||||
int queue, s;
|
||||
@ -338,8 +332,8 @@ RetryFault:;
|
||||
vm_object_deallocate(fs.first_object);
|
||||
goto RetryFault;
|
||||
}
|
||||
|
||||
queue = fs.m->queue;
|
||||
|
||||
s = splvm();
|
||||
vm_pageq_remove_nowakeup(fs.m);
|
||||
splx(s);
|
||||
@ -357,7 +351,6 @@ RetryFault:;
|
||||
* (readable), jump to readrest, else break-out ( we
|
||||
* found the page ).
|
||||
*/
|
||||
|
||||
vm_page_busy(fs.m);
|
||||
if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
|
||||
fs.m->object != kernel_object && fs.m->object != kmem_object) {
|
||||
@ -371,7 +364,6 @@ RetryFault:;
|
||||
* Page is not resident, If this is the search termination
|
||||
* or the pager might contain the page, allocate a new page.
|
||||
*/
|
||||
|
||||
if (TRYPAGER || fs.object == fs.first_object) {
|
||||
if (fs.pindex >= fs.object->size) {
|
||||
unlock_and_deallocate(&fs);
|
||||
@ -403,7 +395,6 @@ readrest:
|
||||
* pager has it, and potentially fault in additional pages
|
||||
* at the same time.
|
||||
*/
|
||||
|
||||
if (TRYPAGER) {
|
||||
int rv;
|
||||
int reqpage;
|
||||
@ -441,12 +432,12 @@ readrest:
|
||||
* included in the lookahead - NFS piecemeal
|
||||
* writes will barf on it badly.
|
||||
*/
|
||||
|
||||
for(tmppindex = fs.first_pindex - 1;
|
||||
for (tmppindex = fs.first_pindex - 1;
|
||||
tmppindex >= firstpindex;
|
||||
--tmppindex) {
|
||||
vm_page_t mt;
|
||||
mt = vm_page_lookup( fs.first_object, tmppindex);
|
||||
|
||||
mt = vm_page_lookup(fs.first_object, tmppindex);
|
||||
if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
|
||||
break;
|
||||
if (mt->busy ||
|
||||
@ -514,7 +505,7 @@ readrest:
|
||||
* if moved.
|
||||
*/
|
||||
fs.m = vm_page_lookup(fs.object, fs.pindex);
|
||||
if(!fs.m) {
|
||||
if (!fs.m) {
|
||||
unlock_and_deallocate(&fs);
|
||||
goto RetryFault;
|
||||
}
|
||||
@ -535,7 +526,6 @@ readrest:
|
||||
* past us, and inserting the page in that object at
|
||||
* the same time that we are.
|
||||
*/
|
||||
|
||||
if (rv == VM_PAGER_ERROR)
|
||||
printf("vm_fault: pager read error, pid %d (%s)\n",
|
||||
curproc->p_pid, curproc->p_comm);
|
||||
@ -575,7 +565,6 @@ readrest:
|
||||
* Move on to the next object. Lock the next object before
|
||||
* unlocking the current one.
|
||||
*/
|
||||
|
||||
fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
|
||||
next_object = fs.object->backing_object;
|
||||
if (next_object == NULL) {
|
||||
@ -626,12 +615,10 @@ readrest:
|
||||
* top-level object, we have to copy it into a new page owned by the
|
||||
* top-level object.
|
||||
*/
|
||||
|
||||
if (fs.object != fs.first_object) {
|
||||
/*
|
||||
* We only really need to copy if we want to write it.
|
||||
*/
|
||||
|
||||
if (fault_type & VM_PROT_WRITE) {
|
||||
/*
|
||||
* This allows pages to be virtually copied from a
|
||||
@ -709,13 +696,11 @@ readrest:
|
||||
* fs.object != fs.first_object due to above
|
||||
* conditional
|
||||
*/
|
||||
|
||||
vm_object_pip_wakeup(fs.object);
|
||||
|
||||
/*
|
||||
* Only use the new page below...
|
||||
*/
|
||||
|
||||
cnt.v_cow_faults++;
|
||||
fs.m = fs.first_m;
|
||||
fs.object = fs.first_object;
|
||||
@ -730,7 +715,6 @@ readrest:
|
||||
* We must verify that the maps have not changed since our last
|
||||
* lookup.
|
||||
*/
|
||||
|
||||
if (!fs.lookup_still_valid &&
|
||||
(fs.map->timestamp != map_generation)) {
|
||||
vm_object_t retry_object;
|
||||
@ -747,7 +731,6 @@ readrest:
|
||||
* avoid a deadlock between the inode and exec_map that can
|
||||
* occur due to locks being obtained in different orders.
|
||||
*/
|
||||
|
||||
if (fs.vp != NULL) {
|
||||
vput(fs.vp);
|
||||
fs.vp = NULL;
|
||||
@ -776,7 +759,6 @@ readrest:
|
||||
* list (the easiest thing to do here). If no one needs it,
|
||||
* pageout will grab it eventually.
|
||||
*/
|
||||
|
||||
if (result != KERN_SUCCESS) {
|
||||
release_page(&fs);
|
||||
unlock_and_deallocate(&fs);
|
||||
@ -845,28 +827,22 @@ readrest:
|
||||
/*
|
||||
* Page had better still be busy
|
||||
*/
|
||||
|
||||
KASSERT(fs.m->flags & PG_BUSY,
|
||||
("vm_fault: page %p not busy!", fs.m));
|
||||
|
||||
unlock_things(&fs);
|
||||
|
||||
/*
|
||||
* Sanity check: page must be completely valid or it is not fit to
|
||||
* map into user space. vm_pager_get_pages() ensures this.
|
||||
*/
|
||||
|
||||
if (fs.m->valid != VM_PAGE_BITS_ALL) {
|
||||
vm_page_zero_invalid(fs.m, TRUE);
|
||||
printf("Warning: page %p partially invalid on fault\n", fs.m);
|
||||
}
|
||||
|
||||
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
|
||||
|
||||
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
|
||||
pmap_prefault(fs.map->pmap, vaddr, fs.entry);
|
||||
}
|
||||
|
||||
vm_page_flag_clear(fs.m, PG_ZERO);
|
||||
vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED);
|
||||
if (fault_flags & VM_FAULT_HOLD)
|
||||
@ -876,7 +852,6 @@ readrest:
|
||||
* If the page is not wired down, then put it where the pageout daemon
|
||||
* can find it.
|
||||
*/
|
||||
|
||||
if (fault_flags & VM_FAULT_WIRE_MASK) {
|
||||
if (wired)
|
||||
vm_page_wire(fs.m);
|
||||
@ -899,10 +874,8 @@ readrest:
|
||||
/*
|
||||
* Unlock everything, and return
|
||||
*/
|
||||
|
||||
vm_page_wakeup(fs.m);
|
||||
vm_object_deallocate(fs.first_object);
|
||||
|
||||
return (KERN_SUCCESS);
|
||||
|
||||
}
|
||||
@ -928,14 +901,12 @@ vm_fault_wire(map, start, end)
|
||||
* Inform the physical mapping system that the range of addresses may
|
||||
* not fault, so that page tables and such can be locked down as well.
|
||||
*/
|
||||
|
||||
pmap_pageable(pmap, start, end, FALSE);
|
||||
|
||||
/*
|
||||
* We simulate a fault to get the page and enter it in the physical
|
||||
* map.
|
||||
*/
|
||||
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
|
||||
VM_FAULT_CHANGE_WIRING);
|
||||
@ -973,7 +944,6 @@ vm_fault_user_wire(map, start, end)
|
||||
* Inform the physical mapping system that the range of addresses may
|
||||
* not fault, so that page tables and such can be locked down as well.
|
||||
*/
|
||||
|
||||
pmap_pageable(pmap, start, end, FALSE);
|
||||
|
||||
/*
|
||||
@ -1012,7 +982,6 @@ vm_fault_unwire(map, start, end)
|
||||
* Since the pages are wired down, we must be able to get their
|
||||
* mappings from the physical map system.
|
||||
*/
|
||||
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
pa = pmap_extract(pmap, va);
|
||||
if (pa != (vm_offset_t) 0) {
|
||||
@ -1025,7 +994,6 @@ vm_fault_unwire(map, start, end)
|
||||
* Inform the physical mapping system that the range of addresses may
|
||||
* fault, so that page tables and such may be unwired themselves.
|
||||
*/
|
||||
|
||||
pmap_pageable(pmap, start, end, TRUE);
|
||||
|
||||
}
|
||||
@ -1041,7 +1009,6 @@ vm_fault_unwire(map, start, end)
|
||||
* The source map entry must be wired down (or be a sharing map
|
||||
* entry corresponding to a main map entry that is wired down).
|
||||
*/
|
||||
|
||||
void
|
||||
vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
|
||||
vm_map_t dst_map;
|
||||
@ -1112,7 +1079,6 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
|
||||
/*
|
||||
* Enter it in the pmap...
|
||||
*/
|
||||
|
||||
vm_page_flag_clear(dst_m, PG_ZERO);
|
||||
pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
|
||||
vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED);
|
||||
@ -1173,7 +1139,6 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
|
||||
/*
|
||||
* if the requested page is not available, then give up now
|
||||
*/
|
||||
|
||||
if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
|
||||
return 0;
|
||||
}
|
||||
@ -1214,8 +1179,8 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
|
||||
startpindex = pindex - rbehind;
|
||||
}
|
||||
|
||||
for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
|
||||
if (vm_page_lookup( object, tpindex)) {
|
||||
for (tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
|
||||
if (vm_page_lookup(object, tpindex)) {
|
||||
startpindex = tpindex + 1;
|
||||
break;
|
||||
}
|
||||
@ -1223,7 +1188,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
|
||||
break;
|
||||
}
|
||||
|
||||
for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
|
||||
for (i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
|
||||
|
||||
rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
|
||||
if (rtm == NULL) {
|
||||
@ -1256,7 +1221,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
|
||||
if (endpindex > object->size)
|
||||
endpindex = object->size;
|
||||
|
||||
for( ; tpindex < endpindex; i++, tpindex++) {
|
||||
for (; tpindex < endpindex; i++, tpindex++) {
|
||||
|
||||
if (vm_page_lookup(object, tpindex)) {
|
||||
break;
|
||||
|
@ -99,7 +99,6 @@ extern int maxslp;
|
||||
*
|
||||
* Note: proc0 from proc.h
|
||||
*/
|
||||
|
||||
static void vm_init_limits __P((void *));
|
||||
SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
|
||||
|
||||
@ -413,7 +412,6 @@ loop:
|
||||
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
|
||||
pri -= kg->kg_nice * 8;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* if this ksegrp is higher priority
|
||||
|
@ -90,7 +90,6 @@
|
||||
/*
|
||||
* System initialization
|
||||
*/
|
||||
|
||||
static void vm_mem_init __P((void *));
|
||||
SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL)
|
||||
|
||||
@ -100,7 +99,6 @@ SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL)
|
||||
*
|
||||
* The start and end address of physical memory is passed in.
|
||||
*/
|
||||
|
||||
/* ARGSUSED*/
|
||||
static void
|
||||
vm_mem_init(dummy)
|
||||
|
@ -97,7 +97,6 @@ vm_map_t buffer_map=0;
|
||||
* Allocate pageable memory to the kernel's address map.
|
||||
* "map" must be kernel_map or a submap of kernel_map.
|
||||
*/
|
||||
|
||||
vm_offset_t
|
||||
kmem_alloc_pageable(map, size)
|
||||
vm_map_t map;
|
||||
@ -123,7 +122,6 @@ kmem_alloc_pageable(map, size)
|
||||
*
|
||||
* Same as kmem_alloc_pageable, except that it create a nofault entry.
|
||||
*/
|
||||
|
||||
vm_offset_t
|
||||
kmem_alloc_nofault(map, size)
|
||||
vm_map_t map;
|
||||
@ -199,7 +197,6 @@ kmem_alloc(map, size)
|
||||
* We're intentionally not activating the pages we allocate to prevent a
|
||||
* race with page-out. vm_map_pageable will wire the pages.
|
||||
*/
|
||||
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
vm_page_t mem;
|
||||
|
||||
@ -215,7 +212,6 @@ kmem_alloc(map, size)
|
||||
/*
|
||||
* And finally, mark the data as non-pageable.
|
||||
*/
|
||||
|
||||
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
|
||||
|
||||
return (addr);
|
||||
@ -443,7 +439,6 @@ bad:
|
||||
*
|
||||
* This routine may block.
|
||||
*/
|
||||
|
||||
vm_offset_t
|
||||
kmem_alloc_wait(map, size)
|
||||
vm_map_t map;
|
||||
@ -504,7 +499,6 @@ kmem_free_wakeup(map, addr, size)
|
||||
* new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
|
||||
* `start' as allocated, and the range between `start' and `end' as free.
|
||||
*/
|
||||
|
||||
void
|
||||
kmem_init(start, end)
|
||||
vm_offset_t start, end;
|
||||
|
@ -191,7 +191,7 @@ vm_init2(void)
|
||||
}
|
||||
|
||||
static __inline void
|
||||
vmspace_dofree( struct vmspace *vm)
|
||||
vmspace_dofree(struct vmspace *vm)
|
||||
{
|
||||
CTR1(KTR_VM, "vmspace_free: %p", vm);
|
||||
/*
|
||||
@ -260,7 +260,7 @@ vmspace_swap_count(struct vmspace *vmspace)
|
||||
}
|
||||
}
|
||||
}
|
||||
return(count);
|
||||
return (count);
|
||||
}
|
||||
|
||||
u_char
|
||||
@ -320,7 +320,7 @@ _vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
|
||||
int
|
||||
vm_map_lock_upgrade(vm_map_t map)
|
||||
{
|
||||
return(_vm_map_lock_upgrade(map, curthread));
|
||||
return (_vm_map_lock_upgrade(map, curthread));
|
||||
}
|
||||
|
||||
void
|
||||
@ -349,19 +349,19 @@ vm_map_clear_recursive(vm_map_t map)
|
||||
vm_offset_t
|
||||
vm_map_min(vm_map_t map)
|
||||
{
|
||||
return(map->min_offset);
|
||||
return (map->min_offset);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
vm_map_max(vm_map_t map)
|
||||
{
|
||||
return(map->max_offset);
|
||||
return (map->max_offset);
|
||||
}
|
||||
|
||||
struct pmap *
|
||||
vm_map_pmap(vm_map_t map)
|
||||
{
|
||||
return(map->pmap);
|
||||
return (map->pmap);
|
||||
}
|
||||
|
||||
struct pmap *
|
||||
@ -454,7 +454,7 @@ vm_map_entry_create(vm_map_t map)
|
||||
kmapentzone : mapentzone);
|
||||
if (new_entry == NULL)
|
||||
panic("vm_map_entry_create: kernel resources exhausted");
|
||||
return(new_entry);
|
||||
return (new_entry);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -524,7 +524,6 @@ vm_map_lookup_entry(
|
||||
/*
|
||||
* Start looking either from the head of the list, or from the hint.
|
||||
*/
|
||||
|
||||
cur = map->hint;
|
||||
|
||||
if (cur == &map->header)
|
||||
@ -556,7 +555,6 @@ vm_map_lookup_entry(
|
||||
/*
|
||||
* Search linearly
|
||||
*/
|
||||
|
||||
while (cur != last) {
|
||||
if (cur->end > address) {
|
||||
if (address >= cur->start) {
|
||||
@ -564,7 +562,6 @@ vm_map_lookup_entry(
|
||||
* Save this lookup for future hints, and
|
||||
* return
|
||||
*/
|
||||
|
||||
*entry = cur;
|
||||
SAVE_HINT(map, cur);
|
||||
return (TRUE);
|
||||
@ -605,7 +602,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
||||
/*
|
||||
* Check that the start and end points are not bogus.
|
||||
*/
|
||||
|
||||
if ((start < map->min_offset) || (end > map->max_offset) ||
|
||||
(start >= end))
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
@ -614,7 +610,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
||||
* Find the entry prior to the proposed starting address; if it's part
|
||||
* of an existing entry, this range is bogus.
|
||||
*/
|
||||
|
||||
if (vm_map_lookup_entry(map, start, &temp_entry))
|
||||
return (KERN_NO_SPACE);
|
||||
|
||||
@ -623,7 +618,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
||||
/*
|
||||
* Assert that the next entry doesn't overlap the end point.
|
||||
*/
|
||||
|
||||
if ((prev_entry->next != &map->header) &&
|
||||
(prev_entry->next->start < end))
|
||||
return (KERN_NO_SPACE);
|
||||
@ -698,7 +692,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
||||
/*
|
||||
* Create a new entry
|
||||
*/
|
||||
|
||||
new_entry = vm_map_entry_create(map);
|
||||
new_entry->start = start;
|
||||
new_entry->end = end;
|
||||
@ -716,7 +709,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
||||
/*
|
||||
* Insert the new entry into the list
|
||||
*/
|
||||
|
||||
vm_map_entry_link(map, prev_entry, new_entry);
|
||||
map->size += new_entry->end - new_entry->start;
|
||||
|
||||
@ -958,7 +950,6 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
|
||||
* entry BEFORE this one, so that this entry has the specified
|
||||
* starting address.
|
||||
*/
|
||||
|
||||
vm_map_simplify_entry(map, entry);
|
||||
|
||||
/*
|
||||
@ -968,7 +959,6 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
|
||||
* map. This is a bit of a hack, but is also about the best place to
|
||||
* put this improvement.
|
||||
*/
|
||||
|
||||
if (entry->object.vm_object == NULL && !map->system_map) {
|
||||
vm_object_t object;
|
||||
object = vm_object_allocate(OBJT_DEFAULT,
|
||||
@ -998,7 +988,6 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
|
||||
* the specified address; if necessary,
|
||||
* it splits the entry into two.
|
||||
*/
|
||||
|
||||
#define vm_map_clip_end(map, entry, endaddr) \
|
||||
{ \
|
||||
if (endaddr < entry->end) \
|
||||
@ -1021,7 +1010,6 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
|
||||
* map. This is a bit of a hack, but is also about the best place to
|
||||
* put this improvement.
|
||||
*/
|
||||
|
||||
if (entry->object.vm_object == NULL && !map->system_map) {
|
||||
vm_object_t object;
|
||||
object = vm_object_allocate(OBJT_DEFAULT,
|
||||
@ -1033,7 +1021,6 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
|
||||
/*
|
||||
* Create a new entry and insert it AFTER the specified entry
|
||||
*/
|
||||
|
||||
new_entry = vm_map_entry_create(map);
|
||||
*new_entry = *entry;
|
||||
|
||||
@ -1145,7 +1132,6 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
/*
|
||||
* Make a first pass to check for protection violations.
|
||||
*/
|
||||
|
||||
current = entry;
|
||||
while ((current != &map->header) && (current->start < end)) {
|
||||
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
|
||||
@ -1163,9 +1149,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
* Go back and fix up protections. [Note that clipping is not
|
||||
* necessary the second time.]
|
||||
*/
|
||||
|
||||
current = entry;
|
||||
|
||||
while ((current != &map->header) && (current->start < end)) {
|
||||
vm_prot_t old_prot;
|
||||
|
||||
@ -1183,22 +1167,17 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
* Update physical map if necessary. Worry about copy-on-write
|
||||
* here -- CHECK THIS XXX
|
||||
*/
|
||||
|
||||
if (current->protection != old_prot) {
|
||||
#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
|
||||
VM_PROT_ALL)
|
||||
|
||||
pmap_protect(map->pmap, current->start,
|
||||
current->end,
|
||||
current->protection & MASK(current));
|
||||
#undef MASK
|
||||
}
|
||||
|
||||
vm_map_simplify_entry(map, current);
|
||||
|
||||
current = current->next;
|
||||
}
|
||||
|
||||
vm_map_unlock(map);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
@ -1211,7 +1190,6 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
* the vm_map_entry structure, or those effecting the underlying
|
||||
* objects.
|
||||
*/
|
||||
|
||||
int
|
||||
vm_map_madvise(
|
||||
vm_map_t map,
|
||||
@ -1230,7 +1208,6 @@ vm_map_madvise(
|
||||
* various clipping operations. Otherwise we only need a read-lock
|
||||
* on the map.
|
||||
*/
|
||||
|
||||
switch(behav) {
|
||||
case MADV_NORMAL:
|
||||
case MADV_SEQUENTIAL:
|
||||
@ -1254,7 +1231,6 @@ vm_map_madvise(
|
||||
/*
|
||||
* Locate starting entry and clip if necessary.
|
||||
*/
|
||||
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
|
||||
if (vm_map_lookup_entry(map, start, &entry)) {
|
||||
@ -1358,7 +1334,7 @@ vm_map_madvise(
|
||||
}
|
||||
vm_map_unlock_read(map);
|
||||
}
|
||||
return(0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
||||
@ -1593,7 +1569,6 @@ vm_map_pageable(
|
||||
* changing the pageability for the entire region. We do so before
|
||||
* making any changes.
|
||||
*/
|
||||
|
||||
if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
|
||||
vm_map_unlock(map);
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
@ -1604,9 +1579,7 @@ vm_map_pageable(
|
||||
* Actions are rather different for wiring and unwiring, so we have
|
||||
* two separate cases.
|
||||
*/
|
||||
|
||||
if (new_pageable) {
|
||||
|
||||
vm_map_clip_start(map, entry, start);
|
||||
|
||||
/*
|
||||
@ -1614,7 +1587,6 @@ vm_map_pageable(
|
||||
* really wired down and that there are no holes.
|
||||
*/
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
|
||||
if (entry->wired_count == 0 ||
|
||||
(entry->end < end &&
|
||||
(entry->next == &map->header ||
|
||||
@ -1889,7 +1861,7 @@ vm_map_clean(
|
||||
while (object && object->backing_object) {
|
||||
object = object->backing_object;
|
||||
offset += object->backing_object_offset;
|
||||
if (object->size < OFF_TO_IDX( offset + size))
|
||||
if (object->size < OFF_TO_IDX(offset + size))
|
||||
size = IDX_TO_OFF(object->size) - offset;
|
||||
}
|
||||
if (object && (object->type == OBJT_VNODE) &&
|
||||
@ -1984,7 +1956,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
|
||||
/*
|
||||
* Find the start of the region, and clip it
|
||||
*/
|
||||
|
||||
if (!vm_map_lookup_entry(map, start, &first_entry))
|
||||
entry = first_entry->next;
|
||||
else {
|
||||
@ -2000,7 +1971,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
|
||||
/*
|
||||
* Save the free space hint
|
||||
*/
|
||||
|
||||
if (entry == &map->header) {
|
||||
map->first_free = &map->header;
|
||||
} else if (map->first_free->start >= start) {
|
||||
@ -2010,7 +1980,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
|
||||
/*
|
||||
* Step through all entries in this region
|
||||
*/
|
||||
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
vm_map_entry_t next;
|
||||
vm_offset_t s, e;
|
||||
@ -2123,19 +2092,16 @@ vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
/*
|
||||
* No holes allowed!
|
||||
*/
|
||||
|
||||
if (start < entry->start) {
|
||||
return (FALSE);
|
||||
}
|
||||
/*
|
||||
* Check protection associated with entry.
|
||||
*/
|
||||
|
||||
if ((entry->protection & protection) != protection) {
|
||||
return (FALSE);
|
||||
}
|
||||
/* go to next entry */
|
||||
|
||||
start = entry->end;
|
||||
entry = entry->next;
|
||||
}
|
||||
@ -2389,14 +2355,12 @@ vmspace_fork(struct vmspace *vm1)
|
||||
* Insert the entry into the new map -- we know we're
|
||||
* inserting at the end of the new map.
|
||||
*/
|
||||
|
||||
vm_map_entry_link(new_map, new_map->header.prev,
|
||||
new_entry);
|
||||
|
||||
/*
|
||||
* Update the physical map
|
||||
*/
|
||||
|
||||
pmap_copy(new_map->pmap, old_map->pmap,
|
||||
new_entry->start,
|
||||
(old_entry->end - old_entry->start),
|
||||
@ -2643,7 +2607,6 @@ Retry:
|
||||
* Unshare the specified VM space for exec. If other processes are
|
||||
* mapped to it, then create a new one. The new vmspace is null.
|
||||
*/
|
||||
|
||||
void
|
||||
vmspace_exec(struct proc *p)
|
||||
{
|
||||
@ -2673,7 +2636,6 @@ vmspace_exec(struct proc *p)
|
||||
* Unshare the specified VM space for forcing COW. This
|
||||
* is called by rfork, for the (RFMEM|RFPROC) == 0 case.
|
||||
*/
|
||||
|
||||
void
|
||||
vmspace_unshare(struct proc *p)
|
||||
{
|
||||
@ -2690,7 +2652,6 @@ vmspace_unshare(struct proc *p)
|
||||
if (p == curthread->td_proc) /* XXXKSE ? */
|
||||
pmap_activate(curthread);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vm_map_lookup:
|
||||
@ -2731,28 +2692,23 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
|
||||
|
||||
GIANT_REQUIRED;
|
||||
RetryLookup:;
|
||||
|
||||
/*
|
||||
* Lookup the faulting address.
|
||||
*/
|
||||
|
||||
vm_map_lock_read(map);
|
||||
|
||||
#define RETURN(why) \
|
||||
{ \
|
||||
vm_map_unlock_read(map); \
|
||||
return(why); \
|
||||
return (why); \
|
||||
}
|
||||
|
||||
/*
|
||||
* If the map has an interesting hint, try it before calling full
|
||||
* blown lookup routine.
|
||||
*/
|
||||
|
||||
entry = map->hint;
|
||||
|
||||
*out_entry = entry;
|
||||
|
||||
if ((entry == &map->header) ||
|
||||
(vaddr < entry->start) || (vaddr >= entry->end)) {
|
||||
vm_map_entry_t tmp_entry;
|
||||
@ -2771,7 +2727,6 @@ RetryLookup:;
|
||||
/*
|
||||
* Handle submaps.
|
||||
*/
|
||||
|
||||
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
|
||||
vm_map_t old_map = map;
|
||||
|
||||
@ -2786,17 +2741,14 @@ RetryLookup:;
|
||||
* pages with an override. This is to implement a forced
|
||||
* COW for debuggers.
|
||||
*/
|
||||
|
||||
if (fault_type & VM_PROT_OVERRIDE_WRITE)
|
||||
prot = entry->max_protection;
|
||||
else
|
||||
prot = entry->protection;
|
||||
|
||||
fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
|
||||
if ((fault_type & prot) != fault_type) {
|
||||
RETURN(KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
|
||||
if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
|
||||
(entry->eflags & MAP_ENTRY_COW) &&
|
||||
(fault_type & VM_PROT_WRITE) &&
|
||||
@ -2808,7 +2760,6 @@ RetryLookup:;
|
||||
* If this page is not pageable, we have to get it for all possible
|
||||
* accesses.
|
||||
*/
|
||||
|
||||
*wired = (entry->wired_count != 0);
|
||||
if (*wired)
|
||||
prot = fault_type = entry->protection;
|
||||
@ -2816,7 +2767,6 @@ RetryLookup:;
|
||||
/*
|
||||
* If the entry was copy-on-write, we either ...
|
||||
*/
|
||||
|
||||
if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
|
||||
/*
|
||||
* If we want to write the page, we may as well handle that
|
||||
@ -2825,7 +2775,6 @@ RetryLookup:;
|
||||
* If we don't need to write the page, we just demote the
|
||||
* permissions allowed.
|
||||
*/
|
||||
|
||||
if (fault_type & VM_PROT_WRITE) {
|
||||
/*
|
||||
* Make a new object, and place it in the object
|
||||
@ -2833,15 +2782,12 @@ RetryLookup:;
|
||||
* -- one just moved from the map to the new
|
||||
* object.
|
||||
*/
|
||||
|
||||
if (vm_map_lock_upgrade(map))
|
||||
goto RetryLookup;
|
||||
|
||||
vm_object_shadow(
|
||||
&entry->object.vm_object,
|
||||
&entry->offset,
|
||||
atop(entry->end - entry->start));
|
||||
|
||||
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
|
||||
vm_map_lock_downgrade(map);
|
||||
} else {
|
||||
@ -2849,7 +2795,6 @@ RetryLookup:;
|
||||
* We're attempting to read a copy-on-write page --
|
||||
* don't allow writes.
|
||||
*/
|
||||
|
||||
prot &= ~VM_PROT_WRITE;
|
||||
}
|
||||
}
|
||||
@ -2861,7 +2806,6 @@ RetryLookup:;
|
||||
!map->system_map) {
|
||||
if (vm_map_lock_upgrade(map))
|
||||
goto RetryLookup;
|
||||
|
||||
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
|
||||
atop(entry->end - entry->start));
|
||||
entry->offset = 0;
|
||||
@ -2872,14 +2816,12 @@ RetryLookup:;
|
||||
* Return the object/offset from this entry. If the entry was
|
||||
* copy-on-write or empty, it has been fixed up.
|
||||
*/
|
||||
|
||||
*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
|
||||
*object = entry->object.vm_object;
|
||||
|
||||
/*
|
||||
* Return whether this is the only map sharing this data.
|
||||
*/
|
||||
|
||||
*out_prot = prot;
|
||||
return (KERN_SUCCESS);
|
||||
|
||||
@ -2892,7 +2834,6 @@ RetryLookup:;
|
||||
* Releases locks acquired by a vm_map_lookup
|
||||
* (according to the handle returned by that lookup).
|
||||
*/
|
||||
|
||||
void
|
||||
vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
|
||||
{
|
||||
@ -3052,18 +2993,17 @@ vm_uiomove(
|
||||
}
|
||||
|
||||
/*
|
||||
* Force copy on write for mmaped regions
|
||||
*/
|
||||
* Force copy on write for mmaped regions
|
||||
*/
|
||||
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
|
||||
|
||||
/*
|
||||
* Point the object appropriately
|
||||
*/
|
||||
* Point the object appropriately
|
||||
*/
|
||||
if (oldobject != srcobject) {
|
||||
|
||||
/*
|
||||
* Set the object optimization hint flag
|
||||
*/
|
||||
/*
|
||||
* Set the object optimization hint flag
|
||||
*/
|
||||
vm_object_set_flag(srcobject, OBJ_OPT);
|
||||
vm_object_reference(srcobject);
|
||||
|
||||
@ -3157,7 +3097,7 @@ vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
|
||||
if (object->shadow_count > object->ref_count)
|
||||
panic("vm_freeze_copyopts: sc > rc");
|
||||
|
||||
while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
|
||||
while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
|
||||
vm_pindex_t bo_pindex;
|
||||
vm_page_t m_in, m_out;
|
||||
|
||||
|
@ -67,7 +67,6 @@
|
||||
/*
|
||||
* Virtual memory map module definitions.
|
||||
*/
|
||||
|
||||
#ifndef _VM_MAP_
|
||||
#define _VM_MAP_
|
||||
|
||||
@ -94,7 +93,6 @@ typedef u_int vm_eflags_t;
|
||||
* another map (called a "sharing map") which denotes read-write
|
||||
* sharing with other maps.
|
||||
*/
|
||||
|
||||
union vm_map_object {
|
||||
struct vm_object *vm_object; /* object object */
|
||||
struct vm_map *sub_map; /* belongs to another map */
|
||||
@ -193,7 +191,6 @@ struct vmspace {
|
||||
};
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
u_char vm_map_entry_behavior(struct vm_map_entry *entry);
|
||||
void vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior);
|
||||
|
||||
@ -214,7 +211,7 @@ void vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior);
|
||||
lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
|
||||
&(map)->ref_lock, curthread); \
|
||||
(map)->timestamp++; \
|
||||
} while(0)
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
void vm_map_lock(vm_map_t map);
|
||||
@ -231,7 +228,6 @@ struct pmap *vm_map_pmap(vm_map_t map);
|
||||
|
||||
struct pmap *vmspace_pmap(struct vmspace *vmspace);
|
||||
long vmspace_resident_count(struct vmspace *vmspace);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
||||
@ -293,5 +289,5 @@ int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
|
||||
int vm_map_growstack (struct proc *p, vm_offset_t addr);
|
||||
int vmspace_swap_count (struct vmspace *vmspace);
|
||||
|
||||
#endif
|
||||
#endif /* _KERNEL */
|
||||
#endif /* _VM_MAP_ */
|
||||
|
@ -93,7 +93,6 @@ SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, "");
|
||||
* if attacked from compromised user account but generous enough such that
|
||||
* multi-threaded processes are not unduly inconvenienced.
|
||||
*/
|
||||
|
||||
static void vmmapentry_rsrc_init __P((void *));
|
||||
SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL)
|
||||
|
||||
@ -380,7 +379,6 @@ mmap(td, uap)
|
||||
* we're at securelevel < 1, to allow the XIG X server
|
||||
* to continue to work.
|
||||
*/
|
||||
|
||||
if ((flags & MAP_SHARED) != 0 ||
|
||||
(vp->v_type == VCHR && disablexworkaround)) {
|
||||
if ((fp->f_flag & FWRITE) != 0) {
|
||||
@ -517,7 +515,7 @@ msync(td, uap)
|
||||
size += pageoff;
|
||||
size = (vm_size_t) round_page(size);
|
||||
if (addr + size < addr)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
|
||||
return (EINVAL);
|
||||
@ -558,7 +556,7 @@ done2:
|
||||
|
||||
switch (rv) {
|
||||
case KERN_SUCCESS:
|
||||
return(0);
|
||||
return (0);
|
||||
case KERN_INVALID_ADDRESS:
|
||||
return (EINVAL); /* Sun returns ENOMEM? */
|
||||
case KERN_FAILURE:
|
||||
@ -594,7 +592,7 @@ munmap(td, uap)
|
||||
size += pageoff;
|
||||
size = (vm_size_t) round_page(size);
|
||||
if (addr + size < addr)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
if (size == 0)
|
||||
return (0);
|
||||
@ -672,7 +670,7 @@ mprotect(td, uap)
|
||||
size += pageoff;
|
||||
size = (vm_size_t) round_page(size);
|
||||
if (addr + size < addr)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
mtx_lock(&Giant);
|
||||
ret = vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
|
||||
@ -716,7 +714,7 @@ minherit(td, uap)
|
||||
size += pageoff;
|
||||
size = (vm_size_t) round_page(size);
|
||||
if (addr + size < addr)
|
||||
return(EINVAL);
|
||||
return (EINVAL);
|
||||
|
||||
mtx_lock(&Giant);
|
||||
ret = vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, addr+size,
|
||||
@ -921,7 +919,7 @@ RestartScan:
|
||||
* the byte vector is zeroed for those skipped entries.
|
||||
*/
|
||||
while ((lastvecindex + 1) < vecindex) {
|
||||
error = subyte( vec + lastvecindex, 0);
|
||||
error = subyte(vec + lastvecindex, 0);
|
||||
if (error) {
|
||||
error = EFAULT;
|
||||
goto done2;
|
||||
@ -932,7 +930,7 @@ RestartScan:
|
||||
/*
|
||||
* Pass the page information to the user
|
||||
*/
|
||||
error = subyte( vec + vecindex, mincoreinfo);
|
||||
error = subyte(vec + vecindex, mincoreinfo);
|
||||
if (error) {
|
||||
error = EFAULT;
|
||||
goto done2;
|
||||
@ -962,7 +960,7 @@ RestartScan:
|
||||
*/
|
||||
vecindex = OFF_TO_IDX(end - first_addr);
|
||||
while ((lastvecindex + 1) < vecindex) {
|
||||
error = subyte( vec + lastvecindex, 0);
|
||||
error = subyte(vec + lastvecindex, 0);
|
||||
if (error) {
|
||||
error = EFAULT;
|
||||
goto done2;
|
||||
|
@ -174,7 +174,7 @@ _vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object)
|
||||
object->resident_page_count = 0;
|
||||
object->shadow_count = 0;
|
||||
object->pg_color = next_index;
|
||||
if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
|
||||
if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
|
||||
incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
|
||||
else
|
||||
incr = size;
|
||||
@ -308,7 +308,6 @@ vm_object_pip_wait(vm_object_t object, char *waitid)
|
||||
*
|
||||
* Returns a new object with the given size.
|
||||
*/
|
||||
|
||||
vm_object_t
|
||||
vm_object_allocate(objtype_t type, vm_size_t size)
|
||||
{
|
||||
@ -589,7 +588,6 @@ vm_object_terminate(vm_object_t object)
|
||||
*
|
||||
* The object must be locked.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
|
||||
{
|
||||
@ -695,7 +693,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
||||
* stay dirty so do not mess with the page and do not clear the
|
||||
* object flags.
|
||||
*/
|
||||
|
||||
clearobjflags = 1;
|
||||
|
||||
TAILQ_FOREACH(p, &object->memq, listq) {
|
||||
@ -839,7 +836,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
|
||||
(tp->flags & PG_CLEANCHK) == 0 ||
|
||||
(tp->busy != 0))
|
||||
break;
|
||||
if((tp->queue - tp->pc) == PQ_CACHE) {
|
||||
if ((tp->queue - tp->pc) == PQ_CACHE) {
|
||||
vm_page_flag_clear(tp, PG_CLEANCHK);
|
||||
break;
|
||||
}
|
||||
@ -900,7 +897,6 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
|
||||
* NOTE: If the page is already at VM_PROT_NONE, calling
|
||||
* vm_page_protect will have no effect.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||
{
|
||||
@ -981,7 +977,6 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
|
||||
/*
|
||||
* Locate and adjust resident pages
|
||||
*/
|
||||
|
||||
for (; pindex < end; pindex += 1) {
|
||||
relookup:
|
||||
tobject = object;
|
||||
@ -1076,7 +1071,6 @@ shadowlookup:
|
||||
* The new object and offset into that object
|
||||
* are returned in the source parameters.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_object_shadow(
|
||||
vm_object_t *object, /* IN/OUT */
|
||||
@ -1092,7 +1086,6 @@ vm_object_shadow(
|
||||
/*
|
||||
* Don't create the new object if the old object isn't shared.
|
||||
*/
|
||||
|
||||
if (source != NULL &&
|
||||
source->ref_count == 1 &&
|
||||
source->handle == NULL &&
|
||||
@ -1128,13 +1121,11 @@ vm_object_shadow(
|
||||
* Store the offset into the source object, and fix up the offset into
|
||||
* the new object.
|
||||
*/
|
||||
|
||||
result->backing_object_offset = *offset;
|
||||
|
||||
/*
|
||||
* Return the new things
|
||||
*/
|
||||
|
||||
*offset = 0;
|
||||
*object = result;
|
||||
}
|
||||
@ -1161,7 +1152,6 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
/*
|
||||
* Initial conditions
|
||||
*/
|
||||
|
||||
if (op & OBSC_TEST_ALL_SHADOWED) {
|
||||
/*
|
||||
* We do not want to have to test for the existence of
|
||||
@ -1174,7 +1164,7 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
*/
|
||||
if (backing_object->type != OBJT_DEFAULT) {
|
||||
splx(s);
|
||||
return(0);
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
if (op & OBSC_COLLAPSE_WAIT) {
|
||||
@ -1184,7 +1174,6 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
/*
|
||||
* Our scan
|
||||
*/
|
||||
|
||||
p = TAILQ_FIRST(&backing_object->memq);
|
||||
while (p) {
|
||||
vm_page_t next = TAILQ_NEXT(p, listq);
|
||||
@ -1201,7 +1190,6 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
* note that we do not busy the backing object's
|
||||
* page.
|
||||
*/
|
||||
|
||||
if (
|
||||
p->pindex < backing_offset_index ||
|
||||
new_pindex >= object->size
|
||||
@ -1233,7 +1221,6 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
/*
|
||||
* Check for busy page
|
||||
*/
|
||||
|
||||
if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
|
||||
vm_page_t pp;
|
||||
|
||||
@ -1331,7 +1318,7 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
p = next;
|
||||
}
|
||||
splx(s);
|
||||
return(r);
|
||||
return (r);
|
||||
}
|
||||
|
||||
|
||||
@ -1416,19 +1403,16 @@ vm_object_collapse(vm_object_t object)
|
||||
* vm_object_backing_scan fails the shadowing test in this
|
||||
* case.
|
||||
*/
|
||||
|
||||
if (backing_object->ref_count == 1) {
|
||||
/*
|
||||
* If there is exactly one reference to the backing
|
||||
* object, we can collapse it into the parent.
|
||||
*/
|
||||
|
||||
vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
|
||||
|
||||
/*
|
||||
* Move the pager from backing_object to object.
|
||||
*/
|
||||
|
||||
if (backing_object->type == OBJT_SWAP) {
|
||||
vm_object_pip_add(backing_object, 1);
|
||||
|
||||
@ -1440,7 +1424,6 @@ vm_object_collapse(vm_object_t object)
|
||||
* new swapper is able to optimize the
|
||||
* destroy-source case.
|
||||
*/
|
||||
|
||||
vm_object_pip_add(object, 1);
|
||||
swap_pager_copy(
|
||||
backing_object,
|
||||
@ -1456,7 +1439,6 @@ vm_object_collapse(vm_object_t object)
|
||||
* backing_object->backing_object moves from within
|
||||
* backing_object to within object.
|
||||
*/
|
||||
|
||||
TAILQ_REMOVE(
|
||||
&object->backing_object->shadow_head,
|
||||
object,
|
||||
@ -1514,7 +1496,6 @@ vm_object_collapse(vm_object_t object)
|
||||
* If we do not entirely shadow the backing object,
|
||||
* there is nothing we can do so we give up.
|
||||
*/
|
||||
|
||||
if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
|
||||
break;
|
||||
}
|
||||
@ -1524,7 +1505,6 @@ vm_object_collapse(vm_object_t object)
|
||||
* chain. Deallocating backing_object will not remove
|
||||
* it, since its reference count is at least 2.
|
||||
*/
|
||||
|
||||
TAILQ_REMOVE(
|
||||
&backing_object->shadow_head,
|
||||
object,
|
||||
@ -1611,7 +1591,6 @@ again:
|
||||
* The busy flags are only cleared at
|
||||
* interrupt -- minimize the spl transitions
|
||||
*/
|
||||
|
||||
if (vm_page_sleep_busy(p, TRUE, "vmopar"))
|
||||
goto again;
|
||||
|
||||
@ -1714,7 +1693,6 @@ vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, vm_size_t p
|
||||
* another object . has a copy elsewhere (any of which mean that the
|
||||
* pages not mapped to prev_entry may be in use anyway)
|
||||
*/
|
||||
|
||||
if (prev_object->backing_object != NULL) {
|
||||
return (FALSE);
|
||||
}
|
||||
@ -1789,7 +1767,7 @@ _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
|
||||
tmpe = map->header.next;
|
||||
entcount = map->nentries;
|
||||
while (entcount-- && (tmpe != &map->header)) {
|
||||
if( _vm_object_in_map(map, object, tmpe)) {
|
||||
if (_vm_object_in_map(map, object, tmpe)) {
|
||||
return 1;
|
||||
}
|
||||
tmpe = tmpe->next;
|
||||
@ -1799,14 +1777,14 @@ _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
|
||||
tmpe = tmpm->header.next;
|
||||
entcount = tmpm->nentries;
|
||||
while (entcount-- && tmpe != &tmpm->header) {
|
||||
if( _vm_object_in_map(tmpm, object, tmpe)) {
|
||||
if (_vm_object_in_map(tmpm, object, tmpe)) {
|
||||
return 1;
|
||||
}
|
||||
tmpe = tmpe->next;
|
||||
}
|
||||
} else if ((obj = entry->object.vm_object) != NULL) {
|
||||
for (; obj; obj = obj->backing_object)
|
||||
if( obj == object) {
|
||||
if (obj == object) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -1820,21 +1798,21 @@ vm_object_in_map(vm_object_t object)
|
||||
|
||||
/* sx_slock(&allproc_lock); */
|
||||
LIST_FOREACH(p, &allproc, p_list) {
|
||||
if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
|
||||
if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
|
||||
continue;
|
||||
if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
|
||||
if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
|
||||
/* sx_sunlock(&allproc_lock); */
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
/* sx_sunlock(&allproc_lock); */
|
||||
if( _vm_object_in_map( kernel_map, object, 0))
|
||||
if (_vm_object_in_map(kernel_map, object, 0))
|
||||
return 1;
|
||||
if( _vm_object_in_map( kmem_map, object, 0))
|
||||
if (_vm_object_in_map(kmem_map, object, 0))
|
||||
return 1;
|
||||
if( _vm_object_in_map( pager_map, object, 0))
|
||||
if (_vm_object_in_map(pager_map, object, 0))
|
||||
return 1;
|
||||
if( _vm_object_in_map( buffer_map, object, 0))
|
||||
if (_vm_object_in_map(buffer_map, object, 0))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@ -1949,7 +1927,7 @@ DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
|
||||
vm_page_t m;
|
||||
|
||||
db_printf("new object: %p\n", (void *)object);
|
||||
if ( nl > 18) {
|
||||
if (nl > 18) {
|
||||
c = cngetc();
|
||||
if (c != ' ')
|
||||
return;
|
||||
@ -1967,7 +1945,7 @@ DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
|
||||
if (rcount) {
|
||||
db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
|
||||
(long)fidx, rcount, (long)pa);
|
||||
if ( nl > 18) {
|
||||
if (nl > 18) {
|
||||
c = cngetc();
|
||||
if (c != ' ')
|
||||
return;
|
||||
@ -1997,7 +1975,7 @@ DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
|
||||
db_printf(" index(%ld)run(%d)pa(0x%lx)",
|
||||
(long)fidx, rcount, (long)pa);
|
||||
db_printf("pd(%ld)\n", (long)padiff);
|
||||
if ( nl > 18) {
|
||||
if (nl > 18) {
|
||||
c = cngetc();
|
||||
if (c != ' ')
|
||||
return;
|
||||
@ -2012,7 +1990,7 @@ DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
|
||||
if (rcount) {
|
||||
db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
|
||||
(long)fidx, rcount, (long)pa);
|
||||
if ( nl > 18) {
|
||||
if (nl > 18) {
|
||||
c = cngetc();
|
||||
if (c != ' ')
|
||||
return;
|
||||
|
@ -123,7 +123,6 @@
|
||||
* Associated with page of user-allocatable memory is a
|
||||
* page structure.
|
||||
*/
|
||||
|
||||
static struct vm_page **vm_page_buckets; /* Array of buckets */
|
||||
static int vm_page_bucket_count; /* How big is array? */
|
||||
static int vm_page_hash_mask; /* Mask for hash function */
|
||||
@ -160,7 +159,6 @@ vm_set_page_size(void)
|
||||
* for the object/offset-to-page hash table headers.
|
||||
* Each page cell is initialized and placed on the free list.
|
||||
*/
|
||||
|
||||
vm_offset_t
|
||||
vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
|
||||
{
|
||||
@ -207,7 +205,6 @@ vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
|
||||
* Initialize the queue headers for the free queue, the active queue
|
||||
* and the inactive queue.
|
||||
*/
|
||||
|
||||
vm_pageq_init();
|
||||
|
||||
/*
|
||||
@ -255,13 +252,10 @@ vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
|
||||
* use (taking into account the overhead of a page structure per
|
||||
* page).
|
||||
*/
|
||||
|
||||
first_page = phys_avail[0] / PAGE_SIZE;
|
||||
|
||||
page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
|
||||
npages = (total - (page_range * sizeof(struct vm_page)) -
|
||||
(end - new_end)) / PAGE_SIZE;
|
||||
|
||||
end = new_end;
|
||||
|
||||
/*
|
||||
@ -317,7 +311,7 @@ vm_page_hash(vm_object_t object, vm_pindex_t pindex)
|
||||
{
|
||||
int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
|
||||
|
||||
return(i & vm_page_hash_mask);
|
||||
return (i & vm_page_hash_mask);
|
||||
}
|
||||
|
||||
void
|
||||
@ -347,7 +341,6 @@ vm_page_busy(vm_page_t m)
|
||||
*
|
||||
* wakeup anyone waiting for the page.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_flash(vm_page_t m)
|
||||
{
|
||||
@ -364,7 +357,6 @@ vm_page_flash(vm_page_t m)
|
||||
* page.
|
||||
*
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_wakeup(vm_page_t m)
|
||||
{
|
||||
@ -377,7 +369,6 @@ vm_page_wakeup(vm_page_t m)
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_io_start(vm_page_t m)
|
||||
{
|
||||
@ -424,7 +415,6 @@ vm_page_unhold(vm_page_t mem)
|
||||
* protection and therefore can be safely called if the page is already
|
||||
* at VM_PROT_NONE (it will be a NOP effectively ).
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_protect(vm_page_t mem, int prot)
|
||||
{
|
||||
@ -507,7 +497,6 @@ vm_page_free_zero(vm_page_t m)
|
||||
* PG_BUSY to m->busy or vise versa (which would create a timing
|
||||
* window).
|
||||
*/
|
||||
|
||||
int
|
||||
vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
|
||||
{
|
||||
@ -522,17 +511,16 @@ vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
|
||||
tsleep(m, PVM, msg, 0);
|
||||
}
|
||||
splx(s);
|
||||
return(TRUE);
|
||||
return (TRUE);
|
||||
/* not reached */
|
||||
}
|
||||
return(FALSE);
|
||||
return (FALSE);
|
||||
}
|
||||
/*
|
||||
* vm_page_dirty:
|
||||
*
|
||||
* make page all dirty
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_dirty(vm_page_t m)
|
||||
{
|
||||
@ -546,7 +534,6 @@ vm_page_dirty(vm_page_t m)
|
||||
*
|
||||
* Set page to not be dirty. Note: does not clear pmap modify bits
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_undirty(vm_page_t m)
|
||||
{
|
||||
@ -566,7 +553,6 @@ vm_page_undirty(vm_page_t m)
|
||||
* The object and page must be locked, and must be splhigh.
|
||||
* This routine may not block.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
|
||||
{
|
||||
@ -580,14 +566,12 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
|
||||
/*
|
||||
* Record the object/offset pair in this page
|
||||
*/
|
||||
|
||||
m->object = object;
|
||||
m->pindex = pindex;
|
||||
|
||||
/*
|
||||
* Insert it into the object_object/offset hash table
|
||||
*/
|
||||
|
||||
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
|
||||
m->hnext = *bucket;
|
||||
*bucket = m;
|
||||
@ -596,14 +580,12 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
|
||||
/*
|
||||
* Now link into the object's list of backed pages.
|
||||
*/
|
||||
|
||||
TAILQ_INSERT_TAIL(&object->memq, m, listq);
|
||||
object->generation++;
|
||||
|
||||
/*
|
||||
* show that the object has one more resident page.
|
||||
*/
|
||||
|
||||
object->resident_page_count++;
|
||||
|
||||
/*
|
||||
@ -626,7 +608,6 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
|
||||
* The underlying pmap entry (if any) is NOT removed here.
|
||||
* This routine may not block.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_remove(vm_page_t m)
|
||||
{
|
||||
@ -644,7 +625,6 @@ vm_page_remove(vm_page_t m)
|
||||
/*
|
||||
* Basically destroy the page.
|
||||
*/
|
||||
|
||||
vm_page_wakeup(m);
|
||||
|
||||
object = m->object;
|
||||
@ -656,7 +636,6 @@ vm_page_remove(vm_page_t m)
|
||||
* Note: we must NULL-out m->hnext to prevent loops in detached
|
||||
* buffers with vm_page_lookup().
|
||||
*/
|
||||
|
||||
{
|
||||
struct vm_page **bucket;
|
||||
|
||||
@ -674,13 +653,11 @@ vm_page_remove(vm_page_t m)
|
||||
/*
|
||||
* Now remove from the object's list of backed pages.
|
||||
*/
|
||||
|
||||
TAILQ_REMOVE(&object->memq, m, listq);
|
||||
|
||||
/*
|
||||
* And show that the object has one fewer resident page.
|
||||
*/
|
||||
|
||||
object->resident_page_count--;
|
||||
object->generation++;
|
||||
|
||||
@ -702,7 +679,6 @@ vm_page_remove(vm_page_t m)
|
||||
* This routine may not block.
|
||||
* This is a critical path routine
|
||||
*/
|
||||
|
||||
vm_page_t
|
||||
vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
{
|
||||
@ -713,7 +689,6 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
/*
|
||||
* Search the hash table for this object/offset pair
|
||||
*/
|
||||
|
||||
retry:
|
||||
generation = vm_page_bucket_generation;
|
||||
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
|
||||
@ -752,7 +727,6 @@ retry:
|
||||
* or vm_page_dirty() will panic. Dirty pages are not allowed
|
||||
* on the cache.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
|
||||
{
|
||||
@ -806,7 +780,6 @@ vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
|
||||
* This routine must be called at splvm().
|
||||
* This routine may not block.
|
||||
*/
|
||||
|
||||
static __inline vm_page_t
|
||||
vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
|
||||
{
|
||||
@ -817,7 +790,7 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer
|
||||
(pindex + object->pg_color) & PQ_L2_MASK,
|
||||
prefer_zero
|
||||
);
|
||||
return(m);
|
||||
return (m);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -838,7 +811,6 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer
|
||||
* interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with
|
||||
* the page cache in this case.
|
||||
*/
|
||||
|
||||
vm_page_t
|
||||
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
|
||||
{
|
||||
@ -853,7 +825,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
|
||||
/*
|
||||
* The pager is allowed to eat deeper into the free page list.
|
||||
*/
|
||||
|
||||
if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
|
||||
page_req = VM_ALLOC_SYSTEM;
|
||||
};
|
||||
@ -930,7 +901,6 @@ loop:
|
||||
/*
|
||||
* Initialize structure. Only the PG_ZERO flag is inherited.
|
||||
*/
|
||||
|
||||
if (m->flags & PG_ZERO) {
|
||||
vm_page_zero_count--;
|
||||
m->flags = PG_ZERO | PG_BUSY;
|
||||
@ -950,7 +920,6 @@ loop:
|
||||
* could cause us to block allocating memory). We cannot block
|
||||
* anywhere.
|
||||
*/
|
||||
|
||||
vm_page_insert(m, object, pindex);
|
||||
|
||||
/*
|
||||
@ -961,7 +930,6 @@ loop:
|
||||
pagedaemon_wakeup();
|
||||
|
||||
splx(s);
|
||||
|
||||
return (m);
|
||||
}
|
||||
|
||||
@ -971,7 +939,6 @@ loop:
|
||||
* Block until free pages are available for allocation
|
||||
* - Called in various places before memory allocations.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_wait(void)
|
||||
{
|
||||
@ -1001,7 +968,6 @@ vm_wait(void)
|
||||
* processes will be able to grab memory first. Do not change
|
||||
* this balance without careful testing first.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_waitpfault(void)
|
||||
{
|
||||
@ -1033,13 +999,10 @@ vm_page_activate(vm_page_t m)
|
||||
|
||||
GIANT_REQUIRED;
|
||||
s = splvm();
|
||||
|
||||
if (m->queue != PQ_ACTIVE) {
|
||||
if ((m->queue - m->pc) == PQ_CACHE)
|
||||
cnt.v_reactivated++;
|
||||
|
||||
vm_pageq_remove(m);
|
||||
|
||||
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
|
||||
if (m->act_count < ACT_INIT)
|
||||
m->act_count = ACT_INIT;
|
||||
@ -1049,7 +1012,6 @@ vm_page_activate(vm_page_t m)
|
||||
if (m->act_count < ACT_INIT)
|
||||
m->act_count = ACT_INIT;
|
||||
}
|
||||
|
||||
splx(s);
|
||||
}
|
||||
|
||||
@ -1124,7 +1086,6 @@ vm_page_free_toq(vm_page_t m)
|
||||
* callback routine until after we've put the page on the
|
||||
* appropriate free queue.
|
||||
*/
|
||||
|
||||
vm_pageq_remove_nowakeup(m);
|
||||
vm_page_remove(m);
|
||||
|
||||
@ -1132,7 +1093,6 @@ vm_page_free_toq(vm_page_t m)
|
||||
* If fictitious remove object association and
|
||||
* return, otherwise delay object association removal.
|
||||
*/
|
||||
|
||||
if ((m->flags & PG_FICTITIOUS) != 0) {
|
||||
splx(s);
|
||||
return;
|
||||
@ -1153,7 +1113,6 @@ vm_page_free_toq(vm_page_t m)
|
||||
* If we've exhausted the object's resident pages we want to free
|
||||
* it up.
|
||||
*/
|
||||
|
||||
if (object &&
|
||||
(object->type == OBJT_VNODE) &&
|
||||
((object->flags & OBJ_DEAD) == 0)
|
||||
@ -1167,12 +1126,11 @@ vm_page_free_toq(vm_page_t m)
|
||||
/*
|
||||
* Clear the UNMANAGED flag when freeing an unmanaged page.
|
||||
*/
|
||||
|
||||
if (m->flags & PG_UNMANAGED) {
|
||||
m->flags &= ~PG_UNMANAGED;
|
||||
m->flags &= ~PG_UNMANAGED;
|
||||
} else {
|
||||
#ifdef __alpha__
|
||||
pmap_page_is_free(m);
|
||||
pmap_page_is_free(m);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1189,16 +1147,13 @@ vm_page_free_toq(vm_page_t m)
|
||||
* Put zero'd pages on the end ( where we look for zero'd pages
|
||||
* first ) and non-zerod pages at the head.
|
||||
*/
|
||||
|
||||
if (m->flags & PG_ZERO) {
|
||||
TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
|
||||
++vm_page_zero_count;
|
||||
} else {
|
||||
TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
|
||||
}
|
||||
|
||||
vm_page_free_wakeup();
|
||||
|
||||
splx(s);
|
||||
}
|
||||
|
||||
@ -1220,7 +1175,6 @@ vm_page_free_toq(vm_page_t m)
|
||||
* will eventually be extended to support 4MB unmanaged physical
|
||||
* mappings.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_unmanage(vm_page_t m)
|
||||
{
|
||||
@ -1379,13 +1333,13 @@ vm_page_try_to_cache(vm_page_t m)
|
||||
|
||||
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
||||
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
|
||||
return(0);
|
||||
return (0);
|
||||
}
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty)
|
||||
return(0);
|
||||
return (0);
|
||||
vm_page_cache(m);
|
||||
return(1);
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1399,15 +1353,15 @@ vm_page_try_to_free(vm_page_t m)
|
||||
{
|
||||
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
|
||||
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
|
||||
return(0);
|
||||
return (0);
|
||||
}
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty)
|
||||
return(0);
|
||||
return (0);
|
||||
vm_page_busy(m);
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_free(m);
|
||||
return(1);
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1434,7 +1388,6 @@ vm_page_cache(vm_page_t m)
|
||||
* Remove all pmaps and indicate that the page is not
|
||||
* writeable or mapped.
|
||||
*/
|
||||
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
if (m->dirty != 0) {
|
||||
panic("vm_page_cache: caching a dirty page, pindex: %ld",
|
||||
@ -1468,7 +1421,6 @@ vm_page_cache(vm_page_t m)
|
||||
* space from active. The idea is to not force this to happen too
|
||||
* often.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_dontneed(vm_page_t m)
|
||||
{
|
||||
@ -1482,7 +1434,6 @@ vm_page_dontneed(vm_page_t m)
|
||||
/*
|
||||
* occassionally leave the page alone
|
||||
*/
|
||||
|
||||
if ((dnw & 0x01F0) == 0 ||
|
||||
m->queue == PQ_INACTIVE ||
|
||||
m->queue - m->pc == PQ_CACHE
|
||||
@ -1565,7 +1516,6 @@ retrylookup:
|
||||
*
|
||||
* Inputs are required to range within a page.
|
||||
*/
|
||||
|
||||
__inline int
|
||||
vm_page_bits(int base, int size)
|
||||
{
|
||||
@ -1578,7 +1528,7 @@ vm_page_bits(int base, int size)
|
||||
);
|
||||
|
||||
if (size == 0) /* handle degenerate case */
|
||||
return(0);
|
||||
return (0);
|
||||
|
||||
first_bit = base >> DEV_BSHIFT;
|
||||
last_bit = (base + size - 1) >> DEV_BSHIFT;
|
||||
@ -1614,7 +1564,6 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
* bit is clear, we have to zero out a portion of the
|
||||
* first block.
|
||||
*/
|
||||
|
||||
if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
|
||||
(m->valid & (1 << (base >> DEV_BSHIFT))) == 0
|
||||
) {
|
||||
@ -1630,9 +1579,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
* valid bit is clear, we have to zero out a portion of
|
||||
* the last block.
|
||||
*/
|
||||
|
||||
endoff = base + size;
|
||||
|
||||
if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
|
||||
(m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
|
||||
) {
|
||||
@ -1654,7 +1601,6 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
* clear dirty bits for DEV_BSIZE chunks that are fully within
|
||||
* the range.
|
||||
*/
|
||||
|
||||
pagebits = vm_page_bits(base, size);
|
||||
m->valid |= pagebits;
|
||||
#if 0 /* NOT YET */
|
||||
@ -1722,7 +1668,6 @@ vm_page_set_invalid(vm_page_t m, int base, int size)
|
||||
* Pages are most often semi-valid when the end of a file is mapped
|
||||
* into memory and the file's size is not page aligned.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
|
||||
{
|
||||
@ -1735,7 +1680,6 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
|
||||
* valid bit may be set ) have already been zerod by
|
||||
* vm_page_set_validclean().
|
||||
*/
|
||||
|
||||
for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
|
||||
if (i == (PAGE_SIZE / DEV_BSIZE) ||
|
||||
(m->valid & (1 << i))
|
||||
@ -1756,7 +1700,6 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
|
||||
* as being valid. We can do this if there are no cache consistancy
|
||||
* issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
|
||||
*/
|
||||
|
||||
if (setvalid)
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
@ -1770,7 +1713,6 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
|
||||
*
|
||||
* May not block.
|
||||
*/
|
||||
|
||||
int
|
||||
vm_page_is_valid(vm_page_t m, int base, int size)
|
||||
{
|
||||
@ -1785,7 +1727,6 @@ vm_page_is_valid(vm_page_t m, int base, int size)
|
||||
/*
|
||||
* update dirty bits from pmap/mmu. May not block.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_page_test_dirty(vm_page_t m)
|
||||
{
|
||||
|
@ -173,7 +173,7 @@ struct vm_page {
|
||||
#else
|
||||
#define PQ_CACHESIZE 128
|
||||
#endif
|
||||
#endif
|
||||
#endif /* !defined(PQ_CACHESIZE) */
|
||||
|
||||
#if PQ_CACHESIZE >= 1024
|
||||
#define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
|
||||
@ -225,7 +225,7 @@ struct vpgqueues {
|
||||
|
||||
extern struct vpgqueues vm_page_queues[PQ_COUNT];
|
||||
|
||||
#endif
|
||||
#endif /* !defined(KLD_MODULE) */
|
||||
|
||||
/*
|
||||
* These are the flags defined for vm_page.
|
||||
@ -256,7 +256,6 @@ extern struct vpgqueues vm_page_queues[PQ_COUNT];
|
||||
/*
|
||||
* Misc constants.
|
||||
*/
|
||||
|
||||
#define ACT_DECLINE 1
|
||||
#define ACT_ADVANCE 3
|
||||
#define ACT_INIT 5
|
||||
@ -372,6 +371,5 @@ int vm_page_bits (int, int);
|
||||
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
|
||||
void vm_page_free_toq(vm_page_t m);
|
||||
void vm_page_zero_idle_wakeup(void);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* !_VM_PAGE_ */
|
||||
|
@ -219,7 +219,6 @@ static void vm_pageout_page_stats(void);
|
||||
* block. Note the careful timing, however, the busy bit isn't set till
|
||||
* late and we cannot do anything that will mess with the page.
|
||||
*/
|
||||
|
||||
static int
|
||||
vm_pageout_clean(m)
|
||||
vm_page_t m;
|
||||
@ -276,7 +275,6 @@ vm_pageout_clean(m)
|
||||
* first and attempt to align our cluster, then do a
|
||||
* forward scan if room remains.
|
||||
*/
|
||||
|
||||
more:
|
||||
while (ib && pageout_count < vm_pageout_page_count) {
|
||||
vm_page_t p;
|
||||
@ -359,7 +357,6 @@ more:
|
||||
* the parent to do more sophisticated things we may have to change
|
||||
* the ordering.
|
||||
*/
|
||||
|
||||
int
|
||||
vm_pageout_flush(mc, count, flags)
|
||||
vm_page_t *mc;
|
||||
@ -382,7 +379,6 @@ vm_pageout_flush(mc, count, flags)
|
||||
* NOTE! mc[i]->dirty may be partial or fragmented due to an
|
||||
* edge case with file fragments.
|
||||
*/
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count));
|
||||
vm_page_io_start(mc[i]);
|
||||
@ -479,9 +475,9 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
|
||||
remove_mode = map_remove_only;
|
||||
if (object->shadow_count > 1)
|
||||
remove_mode = 1;
|
||||
/*
|
||||
* scan the objects entire memory queue
|
||||
*/
|
||||
/*
|
||||
* scan the objects entire memory queue
|
||||
*/
|
||||
rcount = object->resident_page_count;
|
||||
p = TAILQ_FIRST(&object->memq);
|
||||
while (p && (rcount-- > 0)) {
|
||||
@ -606,14 +602,13 @@ vm_pageout_map_deactivate_pages(map, desired)
|
||||
vm_map_unlock(map);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#endif /* !defined(NO_SWAPPING) */
|
||||
|
||||
/*
|
||||
* Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore
|
||||
* to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects
|
||||
* which we know can be trivially freed.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_pageout_page_free(vm_page_t m) {
|
||||
vm_object_t object = m->object;
|
||||
@ -690,12 +685,10 @@ vm_pageout_scan(int pass)
|
||||
* daemon cannot clean enough pages in the first pass, we let it go
|
||||
* all out in succeeding passes.
|
||||
*/
|
||||
|
||||
if ((maxlaunder = vm_max_launder) <= 1)
|
||||
maxlaunder = 1;
|
||||
if (pass)
|
||||
maxlaunder = 10000;
|
||||
|
||||
rescan0:
|
||||
addl_page_shortage = addl_page_shortage_init;
|
||||
maxscan = cnt.v_inactive_count;
|
||||
@ -727,7 +720,7 @@ rescan0:
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Dont mess with busy pages, keep in the front of the
|
||||
* Don't mess with busy pages, keep in the front of the
|
||||
* queue, most likely are being paged out.
|
||||
*/
|
||||
if (m->busy || (m->flags & PG_BUSY)) {
|
||||
@ -972,7 +965,6 @@ rescan0:
|
||||
* track the per-page activity counter and use it to locate
|
||||
* deactivation candidates.
|
||||
*/
|
||||
|
||||
pcount = cnt.v_active_count;
|
||||
m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
|
||||
|
||||
@ -1061,7 +1053,6 @@ rescan0:
|
||||
* are considered basically 'free', moving pages from cache to free
|
||||
* does not effect other calculations.
|
||||
*/
|
||||
|
||||
while (cnt.v_free_count < cnt.v_free_reserved) {
|
||||
static int cache_rover = 0;
|
||||
m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE);
|
||||
@ -1305,7 +1296,6 @@ vm_size_t count;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vm_pageout is the high level pageout daemon.
|
||||
*/
|
||||
@ -1319,7 +1309,6 @@ vm_pageout()
|
||||
/*
|
||||
* Initialize some paging parameters.
|
||||
*/
|
||||
|
||||
cnt.v_interrupt_free_min = 2;
|
||||
if (cnt.v_page_count < 2000)
|
||||
vm_pageout_page_count = 8;
|
||||
@ -1367,7 +1356,6 @@ vm_pageout()
|
||||
vm_pageout_stats_interval = 5;
|
||||
if (vm_pageout_full_stats_interval == 0)
|
||||
vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
|
||||
|
||||
|
||||
/*
|
||||
* Set maximum free per pass
|
||||
@ -1469,7 +1457,6 @@ vm_daemon()
|
||||
* scan the processes for exceeding their rlimits or if
|
||||
* process is swapped out -- deactivate pages
|
||||
*/
|
||||
|
||||
sx_slock(&allproc_lock);
|
||||
LIST_FOREACH(p, &allproc, p_list) {
|
||||
vm_pindex_t limit, size;
|
||||
@ -1515,4 +1502,4 @@ vm_daemon()
|
||||
sx_sunlock(&allproc_lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif /* !defined(NO_SWAPPING) */
|
||||
|
@ -109,7 +109,5 @@ void vm_pageout_page __P((vm_page_t, vm_object_t));
|
||||
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
|
||||
int vm_pageout_flush __P((vm_page_t *, int, int));
|
||||
void vm_pageout_page_free __P((vm_page_t));
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* _VM_VM_PAGEOUT_H_ */
|
||||
|
@ -58,7 +58,7 @@ vm_pageq_aquire(int queue)
|
||||
mtx_lock(&vm_pageq_mtx[queue]);
|
||||
#endif
|
||||
}
|
||||
return(vpq);
|
||||
return (vpq);
|
||||
}
|
||||
|
||||
void
|
||||
@ -127,7 +127,6 @@ vm_pageq_add_new_page(vm_offset_t pa)
|
||||
* This routine must be called at splhigh().
|
||||
* This routine may not block.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_pageq_remove_nowakeup(vm_page_t m)
|
||||
{
|
||||
@ -150,7 +149,6 @@ vm_pageq_remove_nowakeup(vm_page_t m)
|
||||
* This routine must be called at splhigh().
|
||||
* This routine may not block.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_pageq_remove(vm_page_t m)
|
||||
{
|
||||
@ -190,7 +188,6 @@ vm_pageq_remove(vm_page_t m)
|
||||
* This routine may only be called from the vm_page_list_find() macro
|
||||
* in vm_page.h
|
||||
*/
|
||||
|
||||
static __inline vm_page_t
|
||||
_vm_pageq_find(int basequeue, int index)
|
||||
{
|
||||
@ -206,18 +203,16 @@ _vm_pageq_find(int basequeue, int index)
|
||||
* same place. Even though this is not totally optimal, we've already
|
||||
* blown it by missing the cache case so we do not care.
|
||||
*/
|
||||
|
||||
for(i = PQ_L2_SIZE / 2; i > 0; --i) {
|
||||
for (i = PQ_L2_SIZE / 2; i > 0; --i) {
|
||||
if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
|
||||
break;
|
||||
|
||||
if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
|
||||
break;
|
||||
}
|
||||
return(m);
|
||||
return (m);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* PQ_L2_SIZE > 1 */
|
||||
|
||||
vm_page_t
|
||||
vm_pageq_find(int basequeue, int index, boolean_t prefer_zero)
|
||||
@ -242,6 +237,6 @@ vm_pageq_find(int basequeue, int index, boolean_t prefer_zero)
|
||||
m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
|
||||
}
|
||||
#endif
|
||||
return(m);
|
||||
return (m);
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,6 @@ vm_pager_deallocate(object)
|
||||
* called with no specific spl
|
||||
* Execute strategy routine directly to pager.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_pager_strategy(vm_object_t object, struct bio *bp)
|
||||
{
|
||||
@ -310,7 +309,6 @@ vm_pager_sync()
|
||||
if (pgops && ((*pgops)->pgo_sync != NULL))
|
||||
(*(*pgops)->pgo_sync) ();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
vm_offset_t
|
||||
|
@ -83,7 +83,6 @@ struct pagerops {
|
||||
#define VM_PAGER_PUT_INVAL 0x2
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
#ifdef MALLOC_DECLARE
|
||||
MALLOC_DECLARE(M_VMPGDATA);
|
||||
#endif
|
||||
@ -114,7 +113,6 @@ void vm_pager_strategy __P((vm_object_t object, struct bio *bp));
|
||||
* ( or into VM space somewhere ). If the pagein was successful, we
|
||||
* must fully validate it.
|
||||
*/
|
||||
|
||||
static __inline int
|
||||
vm_pager_get_pages(
|
||||
vm_object_t object,
|
||||
@ -130,7 +128,7 @@ vm_pager_get_pages(
|
||||
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
|
||||
vm_page_zero_invalid(m[reqpage], TRUE);
|
||||
}
|
||||
return(r);
|
||||
return (r);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
@ -156,7 +154,6 @@ vm_pager_put_pages(
|
||||
*
|
||||
* This routine does not have to be called at any particular spl.
|
||||
*/
|
||||
|
||||
static __inline boolean_t
|
||||
vm_pager_has_page(
|
||||
vm_object_t object,
|
||||
@ -179,7 +176,6 @@ vm_pager_has_page(
|
||||
*
|
||||
* This function may not block.
|
||||
*/
|
||||
|
||||
static __inline void
|
||||
vm_pager_page_unswapped(vm_page_t m)
|
||||
{
|
||||
@ -188,6 +184,5 @@ vm_pager_page_unswapped(vm_page_t m)
|
||||
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* _VM_PAGER_ */
|
||||
|
@ -82,7 +82,6 @@ struct vnode *swapdev_vp;
|
||||
*
|
||||
* The bp is expected to be locked and *not* B_DONE on call.
|
||||
*/
|
||||
|
||||
static int
|
||||
swapdev_strategy(ap)
|
||||
struct vop_strategy_args /* {
|
||||
|
@ -229,7 +229,7 @@ vnode_pager_haspage(object, pindex, before, after)
|
||||
after, before);
|
||||
if (err)
|
||||
return TRUE;
|
||||
if ( bn == -1)
|
||||
if (bn == -1)
|
||||
return FALSE;
|
||||
if (pagesperblock > 0) {
|
||||
poff = pindex - (reqblock * pagesperblock);
|
||||
@ -393,7 +393,7 @@ vnode_pager_addr(vp, address, run)
|
||||
rtaddress = -1;
|
||||
else {
|
||||
rtaddress = block + voffset / DEV_BSIZE;
|
||||
if( run) {
|
||||
if (run) {
|
||||
*run += 1;
|
||||
*run *= bsize/PAGE_SIZE;
|
||||
*run -= voffset/PAGE_SIZE;
|
||||
@ -681,7 +681,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
* clean up and return. Otherwise we have to re-read the
|
||||
* media.
|
||||
*/
|
||||
|
||||
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
|
||||
for (i = 0; i < count; i++) {
|
||||
if (i != reqpage)
|
||||
@ -694,12 +693,12 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
/*
|
||||
* here on direct device I/O
|
||||
*/
|
||||
|
||||
firstaddr = -1;
|
||||
|
||||
/*
|
||||
* calculate the run that includes the required page
|
||||
*/
|
||||
for(first = 0, i = 0; i < count; i = runend) {
|
||||
for (first = 0, i = 0; i < count; i = runend) {
|
||||
firstaddr = vnode_pager_addr(vp,
|
||||
IDX_TO_OFF(m[i]->pindex), &runpg);
|
||||
if (firstaddr == -1) {
|
||||
@ -920,7 +919,6 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
|
||||
/*
|
||||
* Call device-specific putpages function
|
||||
*/
|
||||
|
||||
vp = object->handle;
|
||||
if (vp->v_type != VREG)
|
||||
mp = NULL;
|
||||
|
@ -55,6 +55,5 @@ int vnode_pager_generic_getpages __P((struct vnode *vp, vm_page_t *m,
|
||||
int vnode_pager_generic_putpages __P((struct vnode *vp, vm_page_t *m,
|
||||
int count, boolean_t sync,
|
||||
int *rtvals));
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL */
|
||||
#endif /* _VNODE_PAGER_ */
|
||||
|
Loading…
x
Reference in New Issue
Block a user