Move much of the machine dependent code from vm_glue.c into

pmap.c.  Along with the improved organization, small proc fork
performance is now about 5%-10% faster.
This commit is contained in:
John Dyson 1996-10-15 03:16:45 +00:00
parent 0a5e7ccdf9
commit 675878e732
7 changed files with 403 additions and 125 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.125 1996/10/12 21:35:03 dyson Exp $
* $Id: pmap.c,v 1.126 1996/10/13 01:38:37 dyson Exp $
*/
/*
@ -314,8 +314,8 @@ pmap_init(phys_start, phys_end)
vm_offset_t phys_start, phys_end;
{
vm_offset_t addr;
vm_size_t npg, s;
int i;
vm_size_t s;
int i, npg;
/*
* calculate the number of pv_entries needed
@ -333,7 +333,7 @@ pmap_init(phys_start, phys_end)
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
for(i=0;i<npg;i++) {
for(i = 0; i < npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
@ -665,9 +665,198 @@ pmap_page_lookup(object, pindex)
return m;
}
/*
* Create the UPAGES for a new process.
* This routine directly affects the fork perf for a process.
*/
void
pmap_new_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
struct user *up;
unsigned *ptep, *ptek;
pmap = &p->p_vmspace->vm_pmap;
/*
* allocate object for the upages
*/
upobj = vm_object_allocate( OBJT_DEFAULT,
UPAGES);
p->p_vmspace->vm_upages_obj = upobj;
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("vm_fork: u_map allocation failed");
/*
* Allocate the ptp and incr the hold count appropriately
*/
m = pmap_allocpte(pmap, (vm_offset_t) kstack);
m->hold_count += (UPAGES - 1);
ptep = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
ptek = (unsigned *) vtopte((vm_offset_t) up);
for(i=0;i<UPAGES;i++) {
/*
* Get a kernel stack page
*/
while ((m = vm_page_alloc(upobj,
i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
}
/*
* Wire the page
*/
m->wire_count++;
++cnt.v_wire_count;
/*
* Enter the page into both the kernel and the process
* address space.
*/
*(ptep + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
*(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
m->flags &= ~(PG_ZERO|PG_BUSY);
m->flags |= PG_MAPPED|PG_WRITEABLE;
m->valid = VM_PAGE_BITS_ALL;
}
p->p_addr = up;
}
/*
* Dispose the UPAGES for a process that has exited.
* This routine directly impacts the exit perf of a process.
*/
void
pmap_dispose_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
unsigned *ptep, *ptek;
pmap = &p->p_vmspace->vm_pmap;
ptep = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
upobj = p->p_vmspace->vm_upages_obj;
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_dispose_proc: upage already missing???");
*(ptep + i) = 0;
*(ptek + i) = 0;
pmap_unuse_pt(pmap, (vm_offset_t) kstack + i * PAGE_SIZE, NULL);
vm_page_unwire(m);
vm_page_free(m);
}
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
}
/*
* Allow the UPAGES for a process to be prejudicially paged out.
*/
void
pmap_swapout_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
unsigned *pte;
pmap = &p->p_vmspace->vm_pmap;
pte = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
upobj = p->p_vmspace->vm_upages_obj;
/*
* let the upages be paged
*/
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_pageout_proc: upage already missing???");
m->dirty = VM_PAGE_BITS_ALL;
*(pte + i) = 0;
pmap_unuse_pt(pmap, (vm_offset_t) kstack + i * PAGE_SIZE, NULL);
vm_page_unwire(m);
vm_page_deactivate(m);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
}
/*
* Bring the UPAGES for a specified process back in.
*/
void
pmap_swapin_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
unsigned *pte;
pmap = &p->p_vmspace->vm_pmap;
/*
* Allocate the ptp and incr the hold count appropriately
*/
m = pmap_allocpte(pmap, (vm_offset_t) kstack);
m->hold_count += (UPAGES - 1);
pte = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
upobj = p->p_vmspace->vm_upages_obj;
for(i=0;i<UPAGES;i++) {
int s;
s = splvm();
retry:
if ((m = vm_page_lookup(upobj, i)) == NULL) {
if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
goto retry;
}
} else {
if ((m->flags & PG_BUSY) || m->busy) {
m->flags |= PG_WANTED;
tsleep(m, PVM, "swinuw",0);
goto retry;
}
m->flags |= PG_BUSY;
}
vm_page_wire(m);
splx(s);
*(pte+i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {
int rv;
rv = vm_pager_get_pages(upobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("faultin: cannot get upages for proc: %d\n", p->p_pid);
m->valid = VM_PAGE_BITS_ALL;
}
PAGE_WAKEUP(m);
m->flags |= PG_MAPPED|PG_WRITEABLE;
}
}
/***************************************************
* Page table page management routines.....

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.68 1996/09/08 20:44:12 dyson Exp $
* $Id: vm_machdep.c,v 1.69 1996/09/28 22:37:43 dyson Exp $
*/
#include "npx.h"
@ -623,8 +623,7 @@ cpu_wait(p)
struct proc *p;
{
/* drop per-process resources */
pmap_qremove((vm_offset_t) p->p_addr, UPAGES);
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
pmap_dispose_proc(p);
vmspace_free(p->p_vmspace);
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.125 1996/10/12 21:35:03 dyson Exp $
* $Id: pmap.c,v 1.126 1996/10/13 01:38:37 dyson Exp $
*/
/*
@ -314,8 +314,8 @@ pmap_init(phys_start, phys_end)
vm_offset_t phys_start, phys_end;
{
vm_offset_t addr;
vm_size_t npg, s;
int i;
vm_size_t s;
int i, npg;
/*
* calculate the number of pv_entries needed
@ -333,7 +333,7 @@ pmap_init(phys_start, phys_end)
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
for(i=0;i<npg;i++) {
for(i = 0; i < npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
@ -665,9 +665,198 @@ pmap_page_lookup(object, pindex)
return m;
}
/*
* Create the UPAGES for a new process.
* This routine directly affects the fork perf for a process.
*/
void
pmap_new_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
struct user *up;
unsigned *ptep, *ptek;
pmap = &p->p_vmspace->vm_pmap;
/*
* allocate object for the upages
*/
upobj = vm_object_allocate( OBJT_DEFAULT,
UPAGES);
p->p_vmspace->vm_upages_obj = upobj;
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("vm_fork: u_map allocation failed");
/*
* Allocate the ptp and incr the hold count appropriately
*/
m = pmap_allocpte(pmap, (vm_offset_t) kstack);
m->hold_count += (UPAGES - 1);
ptep = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
ptek = (unsigned *) vtopte((vm_offset_t) up);
for(i=0;i<UPAGES;i++) {
/*
* Get a kernel stack page
*/
while ((m = vm_page_alloc(upobj,
i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
}
/*
* Wire the page
*/
m->wire_count++;
++cnt.v_wire_count;
/*
* Enter the page into both the kernel and the process
* address space.
*/
*(ptep + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
*(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
m->flags &= ~(PG_ZERO|PG_BUSY);
m->flags |= PG_MAPPED|PG_WRITEABLE;
m->valid = VM_PAGE_BITS_ALL;
}
p->p_addr = up;
}
/*
* Dispose the UPAGES for a process that has exited.
* This routine directly impacts the exit perf of a process.
*/
void
pmap_dispose_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
unsigned *ptep, *ptek;
pmap = &p->p_vmspace->vm_pmap;
ptep = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
upobj = p->p_vmspace->vm_upages_obj;
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_dispose_proc: upage already missing???");
*(ptep + i) = 0;
*(ptek + i) = 0;
pmap_unuse_pt(pmap, (vm_offset_t) kstack + i * PAGE_SIZE, NULL);
vm_page_unwire(m);
vm_page_free(m);
}
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
}
/*
* Allow the UPAGES for a process to be prejudicially paged out.
*/
void
pmap_swapout_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
unsigned *pte;
pmap = &p->p_vmspace->vm_pmap;
pte = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
upobj = p->p_vmspace->vm_upages_obj;
/*
* let the upages be paged
*/
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_pageout_proc: upage already missing???");
m->dirty = VM_PAGE_BITS_ALL;
*(pte + i) = 0;
pmap_unuse_pt(pmap, (vm_offset_t) kstack + i * PAGE_SIZE, NULL);
vm_page_unwire(m);
vm_page_deactivate(m);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
}
/*
* Bring the UPAGES for a specified process back in.
*/
void
pmap_swapin_proc(p)
struct proc *p;
{
int i;
vm_object_t upobj;
pmap_t pmap;
vm_page_t m;
unsigned *pte;
pmap = &p->p_vmspace->vm_pmap;
/*
* Allocate the ptp and incr the hold count appropriately
*/
m = pmap_allocpte(pmap, (vm_offset_t) kstack);
m->hold_count += (UPAGES - 1);
pte = (unsigned *) pmap_pte(pmap, (vm_offset_t) kstack);
upobj = p->p_vmspace->vm_upages_obj;
for(i=0;i<UPAGES;i++) {
int s;
s = splvm();
retry:
if ((m = vm_page_lookup(upobj, i)) == NULL) {
if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
goto retry;
}
} else {
if ((m->flags & PG_BUSY) || m->busy) {
m->flags |= PG_WANTED;
tsleep(m, PVM, "swinuw",0);
goto retry;
}
m->flags |= PG_BUSY;
}
vm_page_wire(m);
splx(s);
*(pte+i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V;
pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {
int rv;
rv = vm_pager_get_pages(upobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("faultin: cannot get upages for proc: %d\n", p->p_pid);
m->valid = VM_PAGE_BITS_ALL;
}
PAGE_WAKEUP(m);
m->flags |= PG_MAPPED|PG_WRITEABLE;
}
}
/***************************************************
* Page table page management routines.....

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.68 1996/09/08 20:44:12 dyson Exp $
* $Id: vm_machdep.c,v 1.69 1996/09/28 22:37:43 dyson Exp $
*/
#include "npx.h"
@ -623,8 +623,7 @@ cpu_wait(p)
struct proc *p;
{
/* drop per-process resources */
pmap_qremove((vm_offset_t) p->p_addr, UPAGES);
kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
pmap_dispose_proc(p);
vmspace_free(p->p_vmspace);
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: pmap.h,v 1.14 1996/07/30 03:08:04 dyson Exp $
* $Id: pmap.h,v 1.15 1996/09/08 20:44:31 dyson Exp $
*/
/*
@ -126,6 +126,10 @@ void pmap_zero_page __P((vm_offset_t));
void pmap_prefault __P((pmap_t pmap, vm_offset_t addra,
vm_map_entry_t entry, vm_object_t object));
int pmap_mincore __P((pmap_t pmap, vm_offset_t addr));
void pmap_new_proc __P((struct proc *p));
void pmap_dispose_proc __P((struct proc *p));
void pmap_swapout_proc __P((struct proc *p));
void pmap_swapin_proc __P((struct proc *p));
#endif /* KERNEL */

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.52 1996/07/02 02:07:56 dyson Exp $
* $Id: vm_glue.c,v 1.53 1996/09/15 11:24:21 bde Exp $
*/
#include <sys/param.h>
@ -205,55 +205,9 @@ vm_fork(p1, p2)
if (p1->p_vmspace->vm_shm)
shmfork(p1, p2);
/*
* Allocate a wired-down (for now) pcb and kernel stack for the
* process
*/
pmap_new_proc(p2);
pvp = &p2->p_vmspace->vm_pmap;
/*
* allocate object for the upages
*/
p2->p_vmspace->vm_upages_obj = upobj = vm_object_allocate( OBJT_DEFAULT,
UPAGES);
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("vm_fork: u_map allocation failed");
for(i=0;i<UPAGES;i++) {
vm_page_t m;
/*
* Get a kernel stack page
*/
while ((m = vm_page_alloc(upobj,
i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
}
/*
* Wire the page
*/
vm_page_wire(m);
PAGE_WAKEUP(m);
/*
* Enter the page into both the kernel and the process
* address space.
*/
pmap_enter( pvp, (vm_offset_t) kstack + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, TRUE);
pmap_kenter(((vm_offset_t) up) + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m));
m->flags &= ~PG_ZERO;
m->flags |= PG_MAPPED|PG_WRITEABLE;
m->valid = VM_PAGE_BITS_ALL;
}
p2->p_addr = up;
up = p2->p_addr;
/*
* p_stats and p_sigacts currently point at fields in the user struct
@ -318,51 +272,11 @@ faultin(p)
int s;
if ((p->p_flag & P_INMEM) == 0) {
pmap_t pmap = &p->p_vmspace->vm_pmap;
vm_page_t m;
vm_object_t upobj = p->p_vmspace->vm_upages_obj;
++p->p_lock;
#if defined(SWAP_DEBUG)
printf("swapping in %d\n", p->p_pid);
#endif
for(i=0;i<UPAGES;i++) {
int s;
s = splvm();
retry:
if ((m = vm_page_lookup(upobj, i)) == NULL) {
if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
goto retry;
}
} else {
if ((m->flags & PG_BUSY) || m->busy) {
m->flags |= PG_WANTED;
tsleep(m, PVM, "swinuw",0);
goto retry;
}
m->flags |= PG_BUSY;
}
vm_page_wire(m);
splx(s);
pmap_swapin_proc(p);
pmap_enter( pmap, (vm_offset_t) kstack + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, TRUE);
pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {
int rv;
rv = vm_pager_get_pages(upobj,
&m, 1, 0);
if (rv != VM_PAGER_OK)
panic("faultin: cannot get upages for proc: %d\n", p->p_pid);
m->valid = VM_PAGE_BITS_ALL;
}
PAGE_WAKEUP(m);
m->flags |= PG_MAPPED|PG_WRITEABLE;
}
s = splhigh();
if (p->p_stat == SRUN)
@ -541,20 +455,7 @@ swapout(p)
remrq(p);
(void) spl0();
/*
* let the upages be paged
*/
for(i=0;i<UPAGES;i++) {
vm_page_t m;
if ((m = vm_page_lookup(p->p_vmspace->vm_upages_obj, i)) == NULL)
panic("swapout: upage already missing???");
m->dirty = VM_PAGE_BITS_ALL;
vm_page_unwire(m);
vm_page_deactivate(m);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
pmap_remove(pmap, (vm_offset_t) kstack,
(vm_offset_t) kstack + PAGE_SIZE * UPAGES);
pmap_swapout_proc(p);
p->p_flag &= ~P_SWAPPING;
p->p_swtime = 0;

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.67 1996/10/06 18:27:39 dyson Exp $
* $Id: vm_page.c,v 1.68 1996/10/12 20:09:48 bde Exp $
*/
/*
@ -684,9 +684,6 @@ vm_page_select_free(object, pindex, prefqueue)
#if PQ_L2_SIZE > 1
index = pindex + object->pg_color;
/*
* These are special cased because of clock-arithemetic
*/
for(j = 0; j < PQ_L1_SIZE; j++) {
for(i = (PQ_L2_SIZE/2) - (PQ_L1_SIZE - 1);
(i + j) >= 0;