Move bogus_page declaration to vm_page.h and initialization to vm_page.c.

Reviewed by:	kib
This commit is contained in:
glebius 2017-01-04 22:27:19 +00:00
parent 0b16bdd83f
commit 01e1e94c27
7 changed files with 13 additions and 24 deletions

View File

@ -1932,8 +1932,6 @@ DRIVER_MODULE(agp_i810, vgapci, agp_i810_driver, agp_devclass, 0, 0);
MODULE_DEPEND(agp_i810, agp, 1, 1, 1);
MODULE_DEPEND(agp_i810, pci, 1, 1, 1);
extern vm_page_t bogus_page;
void
agp_intel_gtt_clear_range(device_t dev, u_int first_entry, u_int num_entries)
{

View File

@ -62,8 +62,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
extern vm_page_t bogus_page;
/*
* Structure describing a single sendfile(2) I/O, which may consist of
* several underlying pager I/Os.

View File

@ -287,15 +287,6 @@ static int bufspace_request;
*/
static int bd_speedupreq;
/*
* bogus page -- for I/O to/from partially complete buffers
* this is a temporary solution to the problem, but it is not
* really that bad. it would be better to split the buffer
* for input in the case of buffers partially already in memory,
* but the code is intricate enough already.
*/
vm_page_t bogus_page;
/*
* Synchronization (sleep/wakeup) variable for active buffer space requests.
* Set when wait starts, cleared prior to wakeup().
@ -1115,9 +1106,6 @@ bufinit(void)
hifreebuffers = (3 * lofreebuffers) / 2;
numfreebuffers = nbuf;
bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
/* Setup the kva and free list allocators. */
vmem_set_reclaim(buffer_arena, bufkva_reclaim);
buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),

View File

@ -81,9 +81,6 @@ static int read_min = 1;
SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
"Cluster read min block count");
/* Page expended to mark partially backed buffers */
extern vm_page_t bogus_page;
/*
* Read data to a buf, including read-ahead if we find this to be beneficial.
* cluster_read replaces bread.

View File

@ -131,6 +131,12 @@ struct mtx_padalign vm_page_queue_free_mtx;
struct mtx_padalign pa_lock[PA_LOCK_COUNT];
/*
* bogus page -- for I/O to/from partially complete buffers,
* or for paging into sparsely invalid regions.
*/
vm_page_t bogus_page;
vm_page_t vm_page_array;
long vm_page_array_size;
long first_page;
@ -158,7 +164,7 @@ static void vm_page_alloc_check(vm_page_t m);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
static void vm_page_enqueue(uint8_t queue, vm_page_t m);
static void vm_page_free_wakeup(void);
static void vm_page_init_fakepg(void *dummy);
static void vm_page_init(void *dummy);
static int vm_page_insert_after(vm_page_t m, vm_object_t object,
vm_pindex_t pindex, vm_page_t mpred);
static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
@ -166,14 +172,16 @@ static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
vm_paddr_t high);
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
static void
vm_page_init_fakepg(void *dummy)
vm_page_init(void *dummy)
{
fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
}
/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */

View File

@ -243,6 +243,8 @@ extern struct vm_domain vm_dom[MAXMEMDOM];
#define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
#ifdef _KERNEL
extern vm_page_t bogus_page;
static __inline void
vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
{

View File

@ -84,8 +84,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
extern vm_page_t bogus_page;
int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
struct buf *swbuf;