The demise of vm_pager_map_page() in revision 1.93 of vm/vm_pager.c permits

the reduction of the pager map's size by 8M bytes.  In other words, eight
megabytes of largely wasted KVA are returned to the kernel map for use
elsewhere.
This commit is contained in:
Alan Cox 2004-04-08 19:08:49 +00:00
parent 10d1d31e8f
commit 41f1b2c460
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=128038
3 changed files with 2 additions and 6 deletions

View File

@ -185,12 +185,12 @@ vm_ksubmap_init(struct kva_md_info *kmi)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &kmi->clean_sva, &kmi->clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS));
buffer_map = kmem_suballoc(clean_map, &kmi->buffer_sva,
&kmi->buffer_eva, (nbuf*BKVASIZE));
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &kmi->pager_sva, &kmi->pager_eva,
(nswbuf*MAXPHYS) + pager_map_size);
(nswbuf*MAXPHYS));
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));

View File

@ -176,9 +176,6 @@ int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
* cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
* (MAXPHYS == 64k) if you want to get the most efficiency.
*/
#define PAGER_MAP_SIZE (8 * 1024 * 1024)
int pager_map_size = PAGER_MAP_SIZE;
vm_map_t pager_map;
static int bswneeded;
static vm_offset_t swapbkva; /* swap buffers kva */

View File

@ -91,7 +91,6 @@ MALLOC_DECLARE(M_VMPGDATA);
#endif
extern vm_map_t pager_map;
extern int pager_map_size;
extern struct pagerops *pagertab[];
extern struct mtx pbuf_mtx;