New routine "pmap_kenter", designed to take advantage of the special

case of the kernel pmap.
This commit is contained in:
David Greenman 1994-03-30 02:17:47 +00:00
parent 827f6dbf32
commit 6b4ac811ca
4 changed files with 206 additions and 28 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.20 1994/03/07 11:38:34 davidg Exp $
* $Id: pmap.c,v 1.21 1994/03/14 21:54:01 davidg Exp $
*/
/*
@ -551,7 +551,7 @@ pmap_pinit(pmap)
/* install self-referential address mapping entry */
*(int *)(pmap->pm_pdir+PTDPTDI) =
((int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW;
((int)pmap_kextract((vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW;
pmap->pm_count = 1;
simple_lock_init(&pmap->pm_lock);
@ -1294,6 +1294,84 @@ pmap_enter(pmap, va, pa, prot, wired)
}
}
/*
* add a wired page to the kva
*/
void
pmap_kenter(va, pa)
vm_offset_t va;
register vm_offset_t pa;
{
register pt_entry_t *pte;
register pv_entry_t pv, npv;
vm_offset_t opa;
int s;
/*
* Enter on the PV list if part of our managed memory
* Note that we raise IPL while manipulating pv_table
* since pmap_enter can be called at interrupt time.
*/
pte = vtopte(va);
opa = pmap_pte_pa(pte);
/*
* Mapping has not changed, must be protection or wiring change.
*/
if (opa == pa) {
/*
* Wiring change, just update stats.
* We don't worry about wiring PT pages as they remain
* resident as long as there are valid mappings in them.
* Hence, if a user page is wired, the PT page will be also.
*/
if (!pmap_pte_w(pte)) {
kernel_pmap->pm_stats.wired_count++;
}
goto validate;
}
if (opa) {
pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
}
pv = pa_to_pvh(pa);
s = splimp();
/*
* No entries yet, use header as the first entry
*/
if (pv->pv_pmap == NULL) {
pv->pv_va = va;
pv->pv_pmap = kernel_pmap;
pv->pv_next = NULL;
}
/*
* There is at least one other VA mapping this page.
* Place this entry after the header.
*/
else {
npv = get_pv_entry();
npv->pv_va = va;
npv->pv_pmap = kernel_pmap;
npv->pv_next = pv->pv_next;
pv->pv_next = npv;
}
splx(s);
/*
* Increment counters
*/
kernel_pmap->pm_stats.resident_count++;
validate:
/*
* Now validate mapping with desired protection/wiring.
*/
*pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
}
/*
* this code makes some *MAJOR* assumptions:
* 1. Current pmap & pmap exists.

View File

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.14 1994/03/23 09:15:06 davidg Exp $
* $Id: vm_machdep.c,v 1.15 1994/03/24 23:12:35 davidg Exp $
*/
#include "npx.h"
@ -150,18 +150,31 @@ vm_bounce_kva(count)
int tofree;
int i;
int startfree;
vm_offset_t kva;
vm_offset_t kva = 0;
int s = splbio();
int size = count*NBPG;
startfree = 0;
more:
if (!bmfreeing && (tofree = kvasfreecnt)) {
bmfreeing = 1;
more1:
for (i = startfree; i < kvasfreecnt; i++) {
/*
* if we have a kva of the right size, no sense
* in freeing/reallocating...
* might affect fragmentation short term, but
* as long as the amount of bounce_map is
* significantly more than the maximum transfer
* size, I don't think that it is a problem.
*/
pmap_remove(kernel_pmap,
kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
kmem_free_wakeup(bounce_map, kvaf[i].addr,
kvaf[i].size);
if( !kva && kvaf[i].size == size) {
kva = kvaf[i].addr;
} else {
kmem_free_wakeup(bounce_map, kvaf[i].addr,
kvaf[i].size);
}
}
if (kvasfreecnt != tofree) {
startfree = i;
@ -172,12 +185,11 @@ vm_bounce_kva(count)
bmfreeing = 0;
}
if (!(kva = kmem_alloc_pageable(bounce_map, count * NBPG))) {
if (!kva && !(kva = kmem_alloc_pageable(bounce_map, size))) {
bmwait = 1;
tsleep((caddr_t) bounce_map, PRIBIO, "bmwait", 0);
goto more;
}
splx(s);
return kva;
@ -266,8 +278,7 @@ vm_bounce_alloc(bp)
* allocate a replacement page
*/
vm_offset_t bpa = vm_bounce_page_find(1);
pmap_enter(kernel_pmap, kva + (NBPG * i), bpa, VM_PROT_DEFAULT,
TRUE);
pmap_kenter(kva + (NBPG * i), bpa);
/*
* if we are writing, the copy the data into the page
*/
@ -277,11 +288,11 @@ vm_bounce_alloc(bp)
/*
* use original page
*/
pmap_enter(kernel_pmap, kva + (NBPG * i), pa, VM_PROT_DEFAULT,
TRUE);
pmap_kenter(kva + (NBPG * i), pa);
}
va += NBPG;
}
pmap_update();
/*
* flag the buffer as being bounced
@ -607,11 +618,11 @@ vmapbuf(bp)
pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
if (pa == 0)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
VM_PROT_READ|VM_PROT_WRITE, TRUE);
pmap_kenter(kva, trunc_page(pa));
addr += PAGE_SIZE;
kva += PAGE_SIZE;
}
pmap_update();
}
/*

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.20 1994/03/07 11:38:34 davidg Exp $
* $Id: pmap.c,v 1.21 1994/03/14 21:54:01 davidg Exp $
*/
/*
@ -551,7 +551,7 @@ pmap_pinit(pmap)
/* install self-referential address mapping entry */
*(int *)(pmap->pm_pdir+PTDPTDI) =
((int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW;
((int)pmap_kextract((vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW;
pmap->pm_count = 1;
simple_lock_init(&pmap->pm_lock);
@ -1294,6 +1294,84 @@ pmap_enter(pmap, va, pa, prot, wired)
}
}
/*
* add a wired page to the kva
*/
void
pmap_kenter(va, pa)
vm_offset_t va;
register vm_offset_t pa;
{
register pt_entry_t *pte;
register pv_entry_t pv, npv;
vm_offset_t opa;
int s;
/*
* Enter on the PV list if part of our managed memory
* Note that we raise IPL while manipulating pv_table
* since pmap_enter can be called at interrupt time.
*/
pte = vtopte(va);
opa = pmap_pte_pa(pte);
/*
* Mapping has not changed, must be protection or wiring change.
*/
if (opa == pa) {
/*
* Wiring change, just update stats.
* We don't worry about wiring PT pages as they remain
* resident as long as there are valid mappings in them.
* Hence, if a user page is wired, the PT page will be also.
*/
if (!pmap_pte_w(pte)) {
kernel_pmap->pm_stats.wired_count++;
}
goto validate;
}
if (opa) {
pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
}
pv = pa_to_pvh(pa);
s = splimp();
/*
* No entries yet, use header as the first entry
*/
if (pv->pv_pmap == NULL) {
pv->pv_va = va;
pv->pv_pmap = kernel_pmap;
pv->pv_next = NULL;
}
/*
* There is at least one other VA mapping this page.
* Place this entry after the header.
*/
else {
npv = get_pv_entry();
npv->pv_va = va;
npv->pv_pmap = kernel_pmap;
npv->pv_next = pv->pv_next;
pv->pv_next = npv;
}
splx(s);
/*
* Increment counters
*/
kernel_pmap->pm_stats.resident_count++;
validate:
/*
* Now validate mapping with desired protection/wiring.
*/
*pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
}
/*
* this code makes some *MAJOR* assumptions:
* 1. Current pmap & pmap exists.

View File

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.14 1994/03/23 09:15:06 davidg Exp $
* $Id: vm_machdep.c,v 1.15 1994/03/24 23:12:35 davidg Exp $
*/
#include "npx.h"
@ -150,18 +150,31 @@ vm_bounce_kva(count)
int tofree;
int i;
int startfree;
vm_offset_t kva;
vm_offset_t kva = 0;
int s = splbio();
int size = count*NBPG;
startfree = 0;
more:
if (!bmfreeing && (tofree = kvasfreecnt)) {
bmfreeing = 1;
more1:
for (i = startfree; i < kvasfreecnt; i++) {
/*
* if we have a kva of the right size, no sense
* in freeing/reallocating...
* might affect fragmentation short term, but
* as long as the amount of bounce_map is
* significantly more than the maximum transfer
* size, I don't think that it is a problem.
*/
pmap_remove(kernel_pmap,
kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
kmem_free_wakeup(bounce_map, kvaf[i].addr,
kvaf[i].size);
if( !kva && kvaf[i].size == size) {
kva = kvaf[i].addr;
} else {
kmem_free_wakeup(bounce_map, kvaf[i].addr,
kvaf[i].size);
}
}
if (kvasfreecnt != tofree) {
startfree = i;
@ -172,12 +185,11 @@ vm_bounce_kva(count)
bmfreeing = 0;
}
if (!(kva = kmem_alloc_pageable(bounce_map, count * NBPG))) {
if (!kva && !(kva = kmem_alloc_pageable(bounce_map, size))) {
bmwait = 1;
tsleep((caddr_t) bounce_map, PRIBIO, "bmwait", 0);
goto more;
}
splx(s);
return kva;
@ -266,8 +278,7 @@ vm_bounce_alloc(bp)
* allocate a replacement page
*/
vm_offset_t bpa = vm_bounce_page_find(1);
pmap_enter(kernel_pmap, kva + (NBPG * i), bpa, VM_PROT_DEFAULT,
TRUE);
pmap_kenter(kva + (NBPG * i), bpa);
/*
* if we are writing, the copy the data into the page
*/
@ -277,11 +288,11 @@ vm_bounce_alloc(bp)
/*
* use original page
*/
pmap_enter(kernel_pmap, kva + (NBPG * i), pa, VM_PROT_DEFAULT,
TRUE);
pmap_kenter(kva + (NBPG * i), pa);
}
va += NBPG;
}
pmap_update();
/*
* flag the buffer as being bounced
@ -607,11 +618,11 @@ vmapbuf(bp)
pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
if (pa == 0)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
VM_PROT_READ|VM_PROT_WRITE, TRUE);
pmap_kenter(kva, trunc_page(pa));
addr += PAGE_SIZE;
kva += PAGE_SIZE;
}
pmap_update();
}
/*