Do not reserve KVA for paging bufs in vm_ksubmap_init(), since now

they allocate it in pbuf_init(). This should have been done together
with r343030.
This commit is contained in:
Gleb Smirnoff 2019-01-16 20:14:16 +00:00
parent 1cf7d13fab
commit 46b0292a82
3 changed files with 2 additions and 22 deletions

View File

@ -221,11 +221,9 @@ vm_ksubmap_init(struct kva_md_info *kmi)
panic("startup: table size inconsistency");
/*
* Allocate the clean map to hold all of the paging and I/O virtual
* memory.
* Allocate the clean map to hold all of I/O virtual memory.
*/
size = (long)nbuf * BKVASIZE + (long)nswbuf * MAXPHYS +
(long)bio_transient_maxcnt * MAXPHYS;
size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * MAXPHYS;
kmi->clean_sva = firstaddr = kva_alloc(size);
kmi->clean_eva = firstaddr + size;
@ -242,13 +240,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
PAGE_SIZE, (mp_ncpus > 4) ? BKVASIZE * 8 : 0, 0);
firstaddr += size;
/*
* Now swap kva.
*/
swapbkva = firstaddr;
size = (long)nswbuf * MAXPHYS;
firstaddr += size;
/*
* And optionally transient bio space.
*/

View File

@ -74,7 +74,6 @@ extern struct vmem *kmem_arena;
extern struct vmem *buffer_arena;
extern struct vmem *transient_arena;
extern struct vmem *memguard_arena;
extern vm_offset_t swapbkva;
extern u_long vm_kmem_size;
extern u_int exec_map_entries;
extern u_int exec_map_entry_size;

View File

@ -163,16 +163,6 @@ struct pagerops *pagertab[] = {
&mgtdevicepagerops, /* OBJT_MGTDEVICE */
};
/*
* Kernel address space for mapping pages.
* Used by pagers where KVAs are needed for IO.
*
* XXX needs to be large enough to support the number of pending async
* cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
* (MAXPHYS == 64k) if you want to get the most efficiency.
*/
vm_offset_t swapbkva; /* swap buffers kva */
void
vm_pager_init(void)
{