Improvements mostly from John Dyson, with a little bit from me.

* Removed pmap_is_wired
* added extra cli/sti protection in idle (swtch.s)
* slight code improvement in trap.c
* added lots of comments
* improved paging and other algorithms in VM system
This commit is contained in:
dg 1994-01-17 09:32:32 +00:00
parent eab8c19a7a
commit c5c5295745
8 changed files with 109 additions and 99 deletions

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.1 1993/11/13 02:25:06 davidg Exp $
* $Id: swtch.s,v 1.2 1994/01/14 16:23:40 davidg Exp $
*/
#include "npx.h" /* for NNPX */
@ -146,8 +146,10 @@ Idle:
ALIGN_TEXT
idle_loop:
call _spl0
cli
cmpl $0,_whichqs
jne sw1
sti
hlt /* wait for interrupt */
jmp idle_loop

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id$
* $Id: pmap.c,v 1.12 1994/01/14 16:23:37 davidg Exp $
*/
/*
@ -310,6 +310,7 @@ pmap_use_pt(pmap, va, use)
{
vm_offset_t pt, pa;
pv_entry_t pv;
vm_page_t m;
if (va >= VM_MAX_ADDRESS)
return;
@ -323,32 +324,14 @@ pmap_use_pt(pmap, va, use)
return;
pv = pa_to_pvh(pa);
m = PHYS_TO_VM_PAGE(pa);
if (use) {
pv->pv_wire++;
} else if (pv->pv_wire > 0) {
pv->pv_wire--;
vm_page_wire(m);
} else {
printf("attempt to decrement wire count below 0: %d\n", pv->pv_wire);
vm_page_unwire(m);
}
}
/*
* see if a page is pmap_wired
*/
inline int
pmap_pt_is_used(pa)
vm_offset_t pa;
{
pv_entry_t pv;
int s;
if (!pmap_is_managed(pa))
return 0;
pv = pa_to_pvh(pa);
return pv->pv_wire;
}
/* [ macro again?, should I force kstack into user map here? -wfj ] */
void
pmap_activate(pmap, pcbp)
@ -834,13 +817,10 @@ pmap_remove_entry(pmap, pv, va)
if (pmap == pv->pv_pmap && va == pv->pv_va) {
npv = pv->pv_next;
if (npv) {
wired = pv->pv_wire;
*pv = *npv;
pv->pv_wire = wired;
free_pv_entry(npv);
} else {
pv->pv_pmap = NULL;
pv->pv_wire = 0;
}
} else {
for (npv = pv->pv_next; npv; npv = npv->pv_next) {
@ -1030,9 +1010,6 @@ pmap_remove_all(pa)
splx(s);
}
if (pv->pv_wire != 0)
panic("pmap_remove_all, wire count != 0\n");
tlbflush();
}
@ -1084,6 +1061,9 @@ pmap_protect(pmap, sva, eva, prot)
pte = ptp + i386_btop(va);
/*
* scan for a non-empty pte
*/
{
int found=0;
int svap = pte - ptp;
@ -1218,7 +1198,6 @@ pmap_enter(pmap, va, pa, prot, wired)
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_next = NULL;
pv->pv_wire = 0;
}
/*
* There is at least one other VA mapping this page.
@ -1256,8 +1235,23 @@ pmap_enter(pmap, va, pa, prot, wired)
* Now validate mapping with desired protection/wiring.
*/
npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
/*
* When forking (copy-on-write, etc):
* A process will turn off write permissions for any of its writable
* pages. If the data (object) is only referred to by one process, the
* processes map is modified directly as opposed to using the
* object manipulation routine. When using pmap_protect, the
* modified bits are not kept in the vm_page_t data structure.
* Therefore, when using pmap_enter in vm_fault to bring back
* writability of a page, there has been no memory of the
* modified or referenced bits except at the pte level.
* this clause supports the carryover of the modified and
* used (referenced) bits.
*/
if (pa == opa)
npte |= *(int *)pte & (PG_M|PG_U);
if (wired)
npte |= PG_W;
if (va < UPT_MIN_ADDRESS)
@ -1636,19 +1630,6 @@ pmap_is_referenced(pa)
return(pmap_testbit(pa, PG_U));
}
/*
* pmap_is_pageable:
*
* Return whether or not the pmap system needs a page wired for its purposes
*/
boolean_t
pmap_is_wired(pa)
vm_offset_t pa;
{
return pmap_pt_is_used(pa)?1:0;
}
/*
* pmap_is_modified:
*

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.1 1993/11/13 02:25:06 davidg Exp $
* $Id: swtch.s,v 1.2 1994/01/14 16:23:40 davidg Exp $
*/
#include "npx.h" /* for NNPX */
@ -146,8 +146,10 @@ Idle:
ALIGN_TEXT
idle_loop:
call _spl0
cli
cmpl $0,_whichqs
jne sw1
sti
hlt /* wait for interrupt */
jmp idle_loop

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.13 1994/01/03 07:55:24 davidg Exp $
* $Id: trap.c,v 1.14 1994/01/14 16:23:41 davidg Exp $
*/
/*
@ -390,10 +390,10 @@ trap(frame)
/* check if page table is mapped, if not, fault it first */
#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
{
vm_offset_t v = trunc_page(vtopte(va));
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
@ -401,16 +401,21 @@ trap(frame)
/* Get the physical address: */
pa = pmap_extract(vm_map_pmap(map), v);
/* And wire the page at system vm level: */
/* And wire the pte page at system vm level: */
vm_page_wire(PHYS_TO_VM_PAGE(pa));
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
/* Unwire the pte page */
/* Unwire the pte page: */
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
} else {
/*
* Since we know that kernel virtual address addresses
* always have pte pages mapped, we just have to fault
* the page.
*/
rv = vm_fault(map, va, ftype, FALSE);
}
@ -606,10 +611,19 @@ int trapwrite(addr)
{
vm_offset_t v;
v = trunc_page(vtopte(va));
/*
* wire the pte page
*/
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
}
/*
* fault the data page
*/
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
/*
* unwire the pte page
*/
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id$
* $Id: pmap.c,v 1.12 1994/01/14 16:23:37 davidg Exp $
*/
/*
@ -310,6 +310,7 @@ pmap_use_pt(pmap, va, use)
{
vm_offset_t pt, pa;
pv_entry_t pv;
vm_page_t m;
if (va >= VM_MAX_ADDRESS)
return;
@ -323,32 +324,14 @@ pmap_use_pt(pmap, va, use)
return;
pv = pa_to_pvh(pa);
m = PHYS_TO_VM_PAGE(pa);
if (use) {
pv->pv_wire++;
} else if (pv->pv_wire > 0) {
pv->pv_wire--;
vm_page_wire(m);
} else {
printf("attempt to decrement wire count below 0: %d\n", pv->pv_wire);
vm_page_unwire(m);
}
}
/*
* see if a page is pmap_wired
*/
inline int
pmap_pt_is_used(pa)
vm_offset_t pa;
{
pv_entry_t pv;
int s;
if (!pmap_is_managed(pa))
return 0;
pv = pa_to_pvh(pa);
return pv->pv_wire;
}
/* [ macro again?, should I force kstack into user map here? -wfj ] */
void
pmap_activate(pmap, pcbp)
@ -834,13 +817,10 @@ pmap_remove_entry(pmap, pv, va)
if (pmap == pv->pv_pmap && va == pv->pv_va) {
npv = pv->pv_next;
if (npv) {
wired = pv->pv_wire;
*pv = *npv;
pv->pv_wire = wired;
free_pv_entry(npv);
} else {
pv->pv_pmap = NULL;
pv->pv_wire = 0;
}
} else {
for (npv = pv->pv_next; npv; npv = npv->pv_next) {
@ -1030,9 +1010,6 @@ pmap_remove_all(pa)
splx(s);
}
if (pv->pv_wire != 0)
panic("pmap_remove_all, wire count != 0\n");
tlbflush();
}
@ -1084,6 +1061,9 @@ pmap_protect(pmap, sva, eva, prot)
pte = ptp + i386_btop(va);
/*
* scan for a non-empty pte
*/
{
int found=0;
int svap = pte - ptp;
@ -1218,7 +1198,6 @@ pmap_enter(pmap, va, pa, prot, wired)
pv->pv_va = va;
pv->pv_pmap = pmap;
pv->pv_next = NULL;
pv->pv_wire = 0;
}
/*
* There is at least one other VA mapping this page.
@ -1256,8 +1235,23 @@ pmap_enter(pmap, va, pa, prot, wired)
* Now validate mapping with desired protection/wiring.
*/
npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
/*
* When forking (copy-on-write, etc):
* A process will turn off write permissions for any of its writable
* pages. If the data (object) is only referred to by one process, the
* processes map is modified directly as opposed to using the
* object manipulation routine. When using pmap_protect, the
* modified bits are not kept in the vm_page_t data structure.
* Therefore, when using pmap_enter in vm_fault to bring back
* writability of a page, there has been no memory of the
* modified or referenced bits except at the pte level.
* this clause supports the carryover of the modified and
* used (referenced) bits.
*/
if (pa == opa)
npte |= *(int *)pte & (PG_M|PG_U);
if (wired)
npte |= PG_W;
if (va < UPT_MIN_ADDRESS)
@ -1636,19 +1630,6 @@ pmap_is_referenced(pa)
return(pmap_testbit(pa, PG_U));
}
/*
* pmap_is_pageable:
*
* Return whether or not the pmap system needs a page wired for its purposes
*/
boolean_t
pmap_is_wired(pa)
vm_offset_t pa;
{
return pmap_pt_is_used(pa)?1:0;
}
/*
* pmap_is_modified:
*

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.1 1993/11/13 02:25:06 davidg Exp $
* $Id: swtch.s,v 1.2 1994/01/14 16:23:40 davidg Exp $
*/
#include "npx.h" /* for NNPX */
@ -146,8 +146,10 @@ Idle:
ALIGN_TEXT
idle_loop:
call _spl0
cli
cmpl $0,_whichqs
jne sw1
sti
hlt /* wait for interrupt */
jmp idle_loop

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.13 1994/01/03 07:55:24 davidg Exp $
* $Id: trap.c,v 1.14 1994/01/14 16:23:41 davidg Exp $
*/
/*
@ -390,10 +390,10 @@ trap(frame)
/* check if page table is mapped, if not, fault it first */
#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
{
vm_offset_t v = trunc_page(vtopte(va));
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
@ -401,16 +401,21 @@ trap(frame)
/* Get the physical address: */
pa = pmap_extract(vm_map_pmap(map), v);
/* And wire the page at system vm level: */
/* And wire the pte page at system vm level: */
vm_page_wire(PHYS_TO_VM_PAGE(pa));
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
/* Unwire the pte page */
/* Unwire the pte page: */
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
} else {
/*
* Since we know that kernel virtual address addresses
* always have pte pages mapped, we just have to fault
* the page.
*/
rv = vm_fault(map, va, ftype, FALSE);
}
@ -606,10 +611,19 @@ int trapwrite(addr)
{
vm_offset_t v;
v = trunc_page(vtopte(va));
/*
* wire the pte page
*/
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
}
/*
* fault the data page
*/
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
/*
* unwire the pte page
*/
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.13 1994/01/03 07:55:24 davidg Exp $
* $Id: trap.c,v 1.14 1994/01/14 16:23:41 davidg Exp $
*/
/*
@ -390,10 +390,10 @@ trap(frame)
/* check if page table is mapped, if not, fault it first */
#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
{
vm_offset_t v = trunc_page(vtopte(va));
if (map != kernel_map) {
vm_offset_t pa;
vm_offset_t v = (vm_offset_t) vtopte(va);
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
@ -401,16 +401,21 @@ trap(frame)
/* Get the physical address: */
pa = pmap_extract(vm_map_pmap(map), v);
/* And wire the page at system vm level: */
/* And wire the pte page at system vm level: */
vm_page_wire(PHYS_TO_VM_PAGE(pa));
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
/* Unwire the pte page */
/* Unwire the pte page: */
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
} else {
/*
* Since we know that kernel virtual address addresses
* always have pte pages mapped, we just have to fault
* the page.
*/
rv = vm_fault(map, va, ftype, FALSE);
}
@ -606,10 +611,19 @@ int trapwrite(addr)
{
vm_offset_t v;
v = trunc_page(vtopte(va));
/*
* wire the pte page
*/
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
}
/*
* fault the data page
*/
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
/*
* unwire the pte page
*/
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}