From 208bfdc98bbc87630ba3438072faf6f082922b84 Mon Sep 17 00:00:00 2001 From: John Dyson Date: Thu, 28 Mar 1996 04:59:34 +0000 Subject: [PATCH] Significant code cleanup, and some performance improvement. Also, mlock will now work properly without killing the system. --- sys/amd64/amd64/pmap.c | 216 +++++++++++++++++++++++++---------------- sys/i386/i386/pmap.c | 216 +++++++++++++++++++++++++---------------- 2 files changed, 268 insertions(+), 164 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 420b86a40361..f92a1680cc87 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.80 1996/03/11 05:55:56 hsu Exp $ + * $Id: pmap.c,v 1.81 1996/03/13 00:39:45 dyson Exp $ */ /* @@ -51,13 +51,6 @@ * * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 */ -/* - * Major modifications by John S. Dyson primarily to support - * pageable page tables, eliminating pmap_attributes, - * discontiguous memory pages, and using more efficient string - * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994, - * general clean-up and efficiency mods. - */ /* * Manages physical address maps. @@ -131,8 +124,8 @@ static void init_pv_entries __P((int)); #define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) -#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) -#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) +#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) +#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) /* * Given a map and a machine independent protection code, @@ -179,6 +172,7 @@ static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_offset_t pa)); static int pmap_is_managed __P((vm_offset_t pa)); static void pmap_remove_all __P((vm_offset_t pa)); +static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); static __inline void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv, vm_offset_t va)); static int pmap_remove_pte __P((struct pmap *pmap, pt_entry_t *ptq, @@ -318,7 +312,7 @@ pmap_is_managed(pa) return 0; for (i = 0; phys_avail[i + 1]; i += 2) { - if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) + if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) return 1; } return 0; @@ -551,7 +545,9 @@ pmap_getpdir() { --nfreepdir; pdir = pdirlist; pdirlist = (caddr_t *) *pdir; +#if 0 /* Not needed anymore */ bzero( (caddr_t) pdir, PAGE_SIZE); +#endif } else { pdir = (caddr_t *) kmem_alloc(kernel_map, PAGE_SIZE); } @@ -564,6 +560,24 @@ pmap_freepdir(void *pdir) { if (nfreepdir > NFREEPDIR) { kmem_free(kernel_map, (vm_offset_t) pdir, PAGE_SIZE); } else { + int i; + pt_entry_t *s; + s = (pt_entry_t *) pdir; + + /* + * remove wired in kernel mappings + */ + bzero(s + KPTDI, nkpt * PTESIZE); + s[APTDPTDI] = 0; + s[PTDPTDI] = 0; + +#if defined(PMAP_DIAGNOSTIC) + for(i=0;i= UPT_MIN_ADDRESS) - i386prot |= PG_RW; - } - pbits = *(int *)pte; -#if defined(PMAP_DIAGNOSTIC) - if (pmap_nw_modified((pt_entry_t) pbits)) { - printf("pmap_protect: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, pbits); - } -#endif - if (pbits & PG_M) { - vm_page_t m; - vm_offset_t pa = pbits & PG_FRAME; - m = PHYS_TO_VM_PAGE(pa); - m->dirty = VM_PAGE_BITS_ALL; - *(int *)pte &= ~PG_M; - anychanged = 1; - } - pprot = pbits & PG_PROT; - if (pprot != i386prot) { - pmap_pte_set_prot(pte, i386prot); - anychanged = 1; + + if (pbits & PG_RW) { + if (pbits & PG_M) { + vm_page_t m; + vm_offset_t pa = pbits & PG_FRAME; + m = PHYS_TO_VM_PAGE(pa); + m->dirty = VM_PAGE_BITS_ALL; + } + *(int *)pte &= ~(PG_M|PG_RW); + anychanged=1; } ++sva; + if ( sva < pdnxt) + goto quickloop; } if (anychanged) pmap_update(); @@ -1341,6 +1363,7 @@ pmap_enter(pmap, va, pa, prot, wired) if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); +#ifdef NO_HANDLE_LOCKED_PTES /* * Page Directory table entry not valid, we need a new PT page */ @@ -1350,6 +1373,33 @@ pmap_enter(pmap, va, pa, prot, wired) pmap->pm_pdir[PTDPTDI], va); panic("invalid kernel page directory"); } +#else + /* + * This is here in the case that a page table page is not + * resident, but we are inserting a page there. + */ + if ((va < VM_MIN_KERNEL_ADDRESS) && + (curproc != NULL) && + (pmap == &curproc->p_vmspace->vm_pmap)) { + vm_offset_t v; + v = (vm_offset_t) vtopte(va); + + /* Fault the pte only if needed: */ + if (*((int *)vtopte(v)) == 0) + (void) vm_fault(&curproc->p_vmspace->vm_map, + trunc_page(v), VM_PROT_WRITE, FALSE); + } + + /* + * Page Directory table entry not valid, we need a new PT page + */ + pte = pmap_pte(pmap, va); + if (pte == NULL) { + printf("kernel page directory invalid pdir=%p, va=0x%lx\n", + pmap->pm_pdir[PTDPTDI], va); + panic("invalid kernel page directory"); + } +#endif origpte = *(vm_offset_t *)pte; opa = origpte & PG_FRAME; @@ -1393,7 +1443,7 @@ pmap_enter(pmap, va, pa, prot, wired) * handle validating new mapping. */ if (opa) { - pmap_remove(pmap, va, va + PAGE_SIZE); + pmap_remove_page(pmap, va); opa = 0; origpte = 0; } @@ -1582,7 +1632,7 @@ pmap_enter_quick(pmap, va, pa) pte = vtopte(va); /* a fault on the page table might occur here */ if (*pte) { - pmap_remove(pmap, va, va + PAGE_SIZE); + pmap_remove_page(pmap, va); } pv = pa_to_pvh(pa); @@ -1623,7 +1673,7 @@ pmap_enter_quick(pmap, va, pa) return; } -#define MAX_INIT_PT (512) +#define MAX_INIT_PT (96) /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft @@ -1708,6 +1758,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size) } } } + return; } /* @@ -1794,6 +1845,7 @@ pmap_prefault(pmap, addra, entry, object) m->flags |= PG_MAPPED; pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m)); vm_page_unhold(m); + } } } diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 420b86a40361..f92a1680cc87 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.80 1996/03/11 05:55:56 hsu Exp $ + * $Id: pmap.c,v 1.81 1996/03/13 00:39:45 dyson Exp $ */ /* @@ -51,13 +51,6 @@ * * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 */ -/* - * Major modifications by John S. Dyson primarily to support - * pageable page tables, eliminating pmap_attributes, - * discontiguous memory pages, and using more efficient string - * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994, - * general clean-up and efficiency mods. - */ /* * Manages physical address maps. @@ -131,8 +124,8 @@ static void init_pv_entries __P((int)); #define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) -#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) -#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) +#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) +#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) /* * Given a map and a machine independent protection code, @@ -179,6 +172,7 @@ static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_offset_t pa)); static int pmap_is_managed __P((vm_offset_t pa)); static void pmap_remove_all __P((vm_offset_t pa)); +static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); static __inline void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv, vm_offset_t va)); static int pmap_remove_pte __P((struct pmap *pmap, pt_entry_t *ptq, @@ -318,7 +312,7 @@ pmap_is_managed(pa) return 0; for (i = 0; phys_avail[i + 1]; i += 2) { - if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) + if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) return 1; } return 0; @@ -551,7 +545,9 @@ pmap_getpdir() { --nfreepdir; pdir = pdirlist; pdirlist = (caddr_t *) *pdir; +#if 0 /* Not needed anymore */ bzero( (caddr_t) pdir, PAGE_SIZE); +#endif } else { pdir = (caddr_t *) kmem_alloc(kernel_map, PAGE_SIZE); } @@ -564,6 +560,24 @@ pmap_freepdir(void *pdir) { if (nfreepdir > NFREEPDIR) { kmem_free(kernel_map, (vm_offset_t) pdir, PAGE_SIZE); } else { + int i; + pt_entry_t *s; + s = (pt_entry_t *) pdir; + + /* + * remove wired in kernel mappings + */ + bzero(s + KPTDI, nkpt * PTESIZE); + s[APTDPTDI] = 0; + s[PTDPTDI] = 0; + +#if defined(PMAP_DIAGNOSTIC) + for(i=0;i= UPT_MIN_ADDRESS) - i386prot |= PG_RW; - } - pbits = *(int *)pte; -#if defined(PMAP_DIAGNOSTIC) - if (pmap_nw_modified((pt_entry_t) pbits)) { - printf("pmap_protect: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, pbits); - } -#endif - if (pbits & PG_M) { - vm_page_t m; - vm_offset_t pa = pbits & PG_FRAME; - m = PHYS_TO_VM_PAGE(pa); - m->dirty = VM_PAGE_BITS_ALL; - *(int *)pte &= ~PG_M; - anychanged = 1; - } - pprot = pbits & PG_PROT; - if (pprot != i386prot) { - pmap_pte_set_prot(pte, i386prot); - anychanged = 1; + + if (pbits & PG_RW) { + if (pbits & PG_M) { + vm_page_t m; + vm_offset_t pa = pbits & PG_FRAME; + m = PHYS_TO_VM_PAGE(pa); + m->dirty = VM_PAGE_BITS_ALL; + } + *(int *)pte &= ~(PG_M|PG_RW); + anychanged=1; } ++sva; + if ( sva < pdnxt) + goto quickloop; } if (anychanged) pmap_update(); @@ -1341,6 +1363,7 @@ pmap_enter(pmap, va, pa, prot, wired) if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); +#ifdef NO_HANDLE_LOCKED_PTES /* * Page Directory table entry not valid, we need a new PT page */ @@ -1350,6 +1373,33 @@ pmap_enter(pmap, va, pa, prot, wired) pmap->pm_pdir[PTDPTDI], va); panic("invalid kernel page directory"); } +#else + /* + * This is here in the case that a page table page is not + * resident, but we are inserting a page there. + */ + if ((va < VM_MIN_KERNEL_ADDRESS) && + (curproc != NULL) && + (pmap == &curproc->p_vmspace->vm_pmap)) { + vm_offset_t v; + v = (vm_offset_t) vtopte(va); + + /* Fault the pte only if needed: */ + if (*((int *)vtopte(v)) == 0) + (void) vm_fault(&curproc->p_vmspace->vm_map, + trunc_page(v), VM_PROT_WRITE, FALSE); + } + + /* + * Page Directory table entry not valid, we need a new PT page + */ + pte = pmap_pte(pmap, va); + if (pte == NULL) { + printf("kernel page directory invalid pdir=%p, va=0x%lx\n", + pmap->pm_pdir[PTDPTDI], va); + panic("invalid kernel page directory"); + } +#endif origpte = *(vm_offset_t *)pte; opa = origpte & PG_FRAME; @@ -1393,7 +1443,7 @@ pmap_enter(pmap, va, pa, prot, wired) * handle validating new mapping. */ if (opa) { - pmap_remove(pmap, va, va + PAGE_SIZE); + pmap_remove_page(pmap, va); opa = 0; origpte = 0; } @@ -1582,7 +1632,7 @@ pmap_enter_quick(pmap, va, pa) pte = vtopte(va); /* a fault on the page table might occur here */ if (*pte) { - pmap_remove(pmap, va, va + PAGE_SIZE); + pmap_remove_page(pmap, va); } pv = pa_to_pvh(pa); @@ -1623,7 +1673,7 @@ pmap_enter_quick(pmap, va, pa) return; } -#define MAX_INIT_PT (512) +#define MAX_INIT_PT (96) /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft @@ -1708,6 +1758,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size) } } } + return; } /* @@ -1794,6 +1845,7 @@ pmap_prefault(pmap, addra, entry, object) m->flags |= PG_MAPPED; pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m)); vm_page_unhold(m); + } } }