Implement pmap changes suggested by alc@:
1. Move dirty bit emulation code that is duplicted for kernel and user in trap.c to a function pmap_emulate_modified() in pmap.c. 2. While doing dirty bit emulation, it is not necessary to update the TLB entry on all CPUs using smp_rendezvous(), we can just update the TLB entry on the current CPU, and let the other CPUs update their TLB entry lazily if they get an exception. Reviewed by: alc, neel
This commit is contained in:
parent
b47b62ea82
commit
619fede20e
@ -157,16 +157,14 @@ void pmap_bootstrap(void);
|
||||
void *pmap_mapdev(vm_offset_t, vm_size_t);
|
||||
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
||||
vm_offset_t pmap_steal_memory(vm_size_t size);
|
||||
void pmap_set_modified(vm_offset_t pa);
|
||||
int page_is_managed(vm_offset_t pa);
|
||||
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
|
||||
void pmap_kremove(vm_offset_t va);
|
||||
void *pmap_kenter_temporary(vm_paddr_t pa, int i);
|
||||
void pmap_kenter_temporary_free(vm_paddr_t pa);
|
||||
int pmap_compute_pages_to_dump(void);
|
||||
void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
|
||||
void pmap_flush_pvcache(vm_page_t m);
|
||||
|
||||
int pmap_emulate_modified(pmap_t pmap, vm_offset_t va);
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* !LOCORE */
|
||||
|
@ -180,6 +180,7 @@ static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
|
||||
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
|
||||
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
|
||||
vm_offset_t va, vm_page_t m);
|
||||
static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
|
||||
static void pmap_invalidate_all(pmap_t pmap);
|
||||
static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
|
||||
static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
|
||||
@ -704,7 +705,7 @@ struct pmap_update_page_arg {
|
||||
pt_entry_t pte;
|
||||
};
|
||||
|
||||
void
|
||||
static void
|
||||
pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
|
||||
{
|
||||
struct pmap_update_page_arg arg;
|
||||
@ -723,7 +724,7 @@ pmap_update_page_action(void *arg)
|
||||
pmap_update_page_local(p->pmap, p->va, p->pte);
|
||||
}
|
||||
#else
|
||||
void
|
||||
static void
|
||||
pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
|
||||
{
|
||||
|
||||
@ -3265,15 +3266,49 @@ init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
}
|
||||
|
||||
/*
|
||||
* pmap_set_modified:
|
||||
* pmap_emulate_modified : do dirty bit emulation
|
||||
*
|
||||
* Sets the page modified and reference bits for the specified page.
|
||||
* On SMP, update just the local TLB, other CPUs will update their
|
||||
* TLBs from PTE lazily, if they get the exception.
|
||||
* Returns 0 in case of sucess, 1 if the page is read only and we
|
||||
* need to fault.
|
||||
*/
|
||||
void
|
||||
pmap_set_modified(vm_offset_t pa)
|
||||
int
|
||||
pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
|
||||
{
|
||||
vm_page_t m;
|
||||
pt_entry_t *pte;
|
||||
vm_offset_t pa;
|
||||
|
||||
PHYS_TO_VM_PAGE(pa)->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
|
||||
PMAP_LOCK(pmap);
|
||||
pte = pmap_pte(pmap, va);
|
||||
if (pte == NULL)
|
||||
panic("pmap_emulate_modified: can't find PTE");
|
||||
#ifdef SMP
|
||||
/* It is possible that some other CPU changed m-bit */
|
||||
if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
|
||||
pmap_update_page_local(pmap, va, *pte);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (0);
|
||||
}
|
||||
#else
|
||||
if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
|
||||
panic("pmap_emulate_modified: invalid pte");
|
||||
#endif
|
||||
if (pte_test(pte, PTE_RO)) {
|
||||
/* write to read only page in the kernel */
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (1);
|
||||
}
|
||||
pte_set(pte, PTE_D);
|
||||
pmap_update_page_local(pmap, va, *pte);
|
||||
pa = TLBLO_PTE_TO_PA(*pte);
|
||||
if (!page_is_managed(pa))
|
||||
panic("pmap_emulate_modified: unmanaged page");
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -281,7 +281,6 @@ trap(struct trapframe *trapframe)
|
||||
struct thread *td = curthread;
|
||||
struct proc *p = curproc;
|
||||
vm_prot_t ftype;
|
||||
pt_entry_t *pte;
|
||||
pmap_t pmap;
|
||||
int access_type;
|
||||
ksiginfo_t ksi;
|
||||
@ -372,82 +371,24 @@ trap(struct trapframe *trapframe)
|
||||
case T_TLB_MOD:
|
||||
/* check for kernel address */
|
||||
if (KERNLAND(trapframe->badvaddr)) {
|
||||
vm_offset_t pa;
|
||||
|
||||
PMAP_LOCK(kernel_pmap);
|
||||
pte = pmap_pte(kernel_pmap, trapframe->badvaddr);
|
||||
if (pte == NULL)
|
||||
panic("trap: ktlbmod: can't find PTE");
|
||||
#ifdef SMP
|
||||
/* It is possible that some other CPU changed m-bit */
|
||||
if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
|
||||
pmap_update_page(kernel_pmap,
|
||||
trapframe->badvaddr, *pte);
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
return (trapframe->pc);
|
||||
}
|
||||
#else
|
||||
if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
|
||||
panic("trap: ktlbmod: invalid pte");
|
||||
#endif
|
||||
if (pte_test(pte, PTE_RO)) {
|
||||
/* write to read only page in the kernel */
|
||||
if (pmap_emulate_modified(kernel_pmap,
|
||||
trapframe->badvaddr) != 0) {
|
||||
ftype = VM_PROT_WRITE;
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
goto kernel_fault;
|
||||
}
|
||||
pte_set(pte, PTE_D);
|
||||
pmap_update_page(kernel_pmap, trapframe->badvaddr, *pte);
|
||||
pa = TLBLO_PTE_TO_PA(*pte);
|
||||
if (!page_is_managed(pa))
|
||||
panic("trap: ktlbmod: unmanaged page");
|
||||
pmap_set_modified(pa);
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
return (trapframe->pc);
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
|
||||
case T_TLB_MOD + T_USER:
|
||||
{
|
||||
vm_offset_t pa;
|
||||
|
||||
pmap = &p->p_vmspace->vm_pmap;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
pte = pmap_pte(pmap, trapframe->badvaddr);
|
||||
if (pte == NULL)
|
||||
panic("trap: utlbmod: can't find PTE");
|
||||
#ifdef SMP
|
||||
/* It is possible that some other CPU changed m-bit */
|
||||
if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
|
||||
pmap_update_page(pmap, trapframe->badvaddr, *pte);
|
||||
PMAP_UNLOCK(pmap);
|
||||
goto out;
|
||||
}
|
||||
#else
|
||||
if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
|
||||
panic("trap: utlbmod: invalid pte");
|
||||
#endif
|
||||
|
||||
if (pte_test(pte, PTE_RO)) {
|
||||
/* write to read only page */
|
||||
ftype = VM_PROT_WRITE;
|
||||
PMAP_UNLOCK(pmap);
|
||||
goto dofault;
|
||||
}
|
||||
pte_set(pte, PTE_D);
|
||||
pmap_update_page(pmap, trapframe->badvaddr, *pte);
|
||||
pa = TLBLO_PTE_TO_PA(*pte);
|
||||
if (!page_is_managed(pa))
|
||||
panic("trap: utlbmod: unmanaged page");
|
||||
pmap_set_modified(pa);
|
||||
|
||||
PMAP_UNLOCK(pmap);
|
||||
if (!usermode) {
|
||||
return (trapframe->pc);
|
||||
}
|
||||
goto out;
|
||||
pmap = &p->p_vmspace->vm_pmap;
|
||||
if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) {
|
||||
ftype = VM_PROT_WRITE;
|
||||
goto dofault;
|
||||
}
|
||||
if (!usermode)
|
||||
return (trapframe->pc);
|
||||
goto out;
|
||||
|
||||
case T_TLB_LD_MISS:
|
||||
case T_TLB_ST_MISS:
|
||||
|
Loading…
Reference in New Issue
Block a user