freebsd-dev/sys/vm/vm_pager.c
John Dyson 8f9110f6a1 This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code.  These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances.  Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code.  This code might have been committed seperately, but
almost everything is interrelated.

1)	Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
	are fully valid.
2)	Rather than deactivating erroneously read initial (header) pages in
	kern_exec, we now free them.
3)	Fix the rundown of non-VMIO buffers that are in an inconsistent
	(missing vp) state.
4)	Fix the disassociation of pages from buffers in brelse.  The previous
	code had rotted and was faulty in a couple of important circumstances.
5)	Remove a gratuitious buffer wakeup in vfs_vmio_release.
6)	Remove a crufty and currently unused cluster mechanism for VBLK
	files in vfs_bio_awrite.  When the code is functional, I'll add back
	a cleaner version.
7)	The page busy count wakeups assocated with the buffer cache usage were
	incorrectly cleaned up in a previous commit by me.  Revert to the
	original, correct version, but with a cleaner implementation.
8)	The cluster read code now tries to keep data associated with buffers
	more aggressively (without breaking the heuristics) when it is presumed
	that the read data (buffers) will be soon needed.
9)	Change to filesystem lockmgr locks so that they use LK_NOPAUSE.  The
	delay loop waiting is not useful for filesystem locks, due to the
	length of the time intervals.
10)	Correct and clean-up spec_getpages.
11)	Implement a fully functional nfs_getpages, nfs_putpages.
12)	Fix nfs_write so that modifications are coherent with the NFS data on
	the server disk (at least as well as NFS seems to allow.)
13)	Properly support MS_INVALIDATE on NFS.
14)	Properly pass down MS_INVALIDATE to lower levels of the VM code from
	vm_map_clean.
15)	Better support the notion of pages being busy but valid, so that
	fewer in-transit waits occur.  (use p->busy more for pageouts instead
	of PG_BUSY.)  Since the page is fully valid, it is still usable for
	reads.
16)	It is possible (in error) for cached pages to be busy.  Make the
	page allocation code handle that case correctly.  (It should probably
	be a printf or panic, but I want the system to handle coding errors
	robustly.  I'll probably add a printf.)
17)	Correct the design and usage of vm_page_sleep.  It didn't handle
	consistancy problems very well, so make the design a little less
	lofty.  After vm_page_sleep, if it ever blocked, it is still important
	to relookup the page (if the object generation count changed), and
	verify it's status (always.)
18)	In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19)	Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20)	Fix vm_pager_put_pages and it's descendents to support an int flag
	instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00

345 lines
8.4 KiB
C

/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.35 1998/02/23 08:22:40 dyson Exp $
*/
/*
* Paging space routine stubs. Emulates a matchmaker-like interface
* for builtin pagers.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/buf.h>
#include <sys/ucred.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_prot.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data");
extern struct pagerops defaultpagerops;
extern struct pagerops swappagerops;
extern struct pagerops vnodepagerops;
extern struct pagerops devicepagerops;
static struct pagerops *pagertab[] = {
&defaultpagerops, /* OBJT_DEFAULT */
&swappagerops, /* OBJT_SWAP */
&vnodepagerops, /* OBJT_VNODE */
&devicepagerops, /* OBJT_DEVICE */
};
static int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
/*
* Kernel address space for mapping pages.
* Used by pagers where KVAs are needed for IO.
*
* XXX needs to be large enough to support the number of pending async
* cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
* (MAXPHYS == 64k) if you want to get the most efficiency.
*/
#define PAGER_MAP_SIZE (8 * 1024 * 1024)
int pager_map_size = PAGER_MAP_SIZE;
vm_map_t pager_map;
static int bswneeded;
static vm_offset_t swapbkva; /* swap buffers kva */
void
vm_pager_init()
{
struct pagerops **pgops;
/*
* Initialize known pagers
*/
for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
if (pgops && ((*pgops)->pgo_init != NULL))
(*(*pgops)->pgo_init) ();
}
void
vm_pager_bufferinit()
{
struct buf *bp;
int i;
bp = swbuf;
/*
* Now set up swap and physical I/O buffer headers.
*/
for (i = 0; i < nswbuf; i++, bp++) {
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
bp->b_rcred = bp->b_wcred = NOCRED;
bp->b_vnbufs.le_next = NOLIST;
}
swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
if (!swapbkva)
panic("Not enough pager_map VM space for physical buffers");
}
/*
* Allocate an instance of a pager of the given type.
* Size, protection and offset parameters are passed in for pagers that
* need to perform page-level validation (e.g. the device pager).
*/
vm_object_t
vm_pager_allocate(objtype_t type, void *handle, vm_size_t size, vm_prot_t prot,
vm_ooffset_t off)
{
struct pagerops *ops;
ops = pagertab[type];
if (ops)
return ((*ops->pgo_alloc) (handle, size, prot, off));
return (NULL);
}
void
vm_pager_deallocate(object)
vm_object_t object;
{
(*pagertab[object->type]->pgo_dealloc) (object);
}
int
vm_pager_get_pages(object, m, count, reqpage)
vm_object_t object;
vm_page_t *m;
int count;
int reqpage;
{
return ((*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage));
}
int
vm_pager_put_pages(object, m, count, flags, rtvals)
vm_object_t object;
vm_page_t *m;
int count;
int flags;
int *rtvals;
{
return ((*pagertab[object->type]->pgo_putpages)(object, m, count, flags, rtvals));
}
boolean_t
vm_pager_has_page(object, offset, before, after)
vm_object_t object;
vm_pindex_t offset;
int *before;
int *after;
{
return ((*pagertab[object->type]->pgo_haspage) (object, offset, before, after));
}
/*
* Called by pageout daemon before going back to sleep.
* Gives pagers a chance to clean up any completed async pageing operations.
*/
void
vm_pager_sync()
{
struct pagerops **pgops;
for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
if (pgops && ((*pgops)->pgo_sync != NULL))
(*(*pgops)->pgo_sync) ();
}
vm_offset_t
vm_pager_map_page(m)
vm_page_t m;
{
vm_offset_t kva;
kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
return (kva);
}
void
vm_pager_unmap_page(kva)
vm_offset_t kva;
{
pmap_kremove(kva);
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
}
vm_object_t
vm_pager_object_lookup(pg_list, handle)
register struct pagerlst *pg_list;
void *handle;
{
register vm_object_t object;
for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list))
if (object->handle == handle)
return (object);
return (NULL);
}
/*
* initialize a physical buffer
*/
static void
initpbuf(struct buf *bp) {
bzero(bp, sizeof *bp);
bp->b_rcred = NOCRED;
bp->b_wcred = NOCRED;
bp->b_qindex = QUEUE_NONE;
bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
bp->b_kvabase = bp->b_data;
bp->b_kvasize = MAXPHYS;
bp->b_vnbufs.le_next = NOLIST;
}
/*
* allocate a physical buffer
*/
struct buf *
getpbuf()
{
int s;
struct buf *bp;
s = splvm();
/* get a bp from the swap buffer header pool */
while ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
bswneeded = 1;
tsleep(&bswneeded, PVM, "wswbuf", 0);
}
TAILQ_REMOVE(&bswlist, bp, b_freelist);
splx(s);
initpbuf(bp);
return bp;
}
/*
* allocate a physical buffer, if one is available
*/
struct buf *
trypbuf()
{
int s;
struct buf *bp;
s = splvm();
if ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
splx(s);
return NULL;
}
TAILQ_REMOVE(&bswlist, bp, b_freelist);
splx(s);
initpbuf(bp);
return bp;
}
/*
* release a physical buffer
*/
void
relpbuf(bp)
struct buf *bp;
{
int s;
s = splvm();
if (bp->b_rcred != NOCRED) {
crfree(bp->b_rcred);
bp->b_rcred = NOCRED;
}
if (bp->b_wcred != NOCRED) {
crfree(bp->b_wcred);
bp->b_wcred = NOCRED;
}
if (bp->b_vp)
pbrelvp(bp);
if (bp->b_flags & B_WANTED)
wakeup(bp);
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
if (bswneeded) {
bswneeded = 0;
wakeup(&bswneeded);
}
splx(s);
}