Implement pmap_unwire(). See r268327 for the motivation behind this change.
This commit is contained in:
parent
c2307b7d21
commit
1c51afe84c
@ -297,6 +297,7 @@ void moea_release(mmu_t, pmap_t);
|
||||
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea_remove_all(mmu_t, vm_page_t);
|
||||
void moea_remove_write(mmu_t, vm_page_t);
|
||||
void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea_zero_page(mmu_t, vm_page_t);
|
||||
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
|
||||
void moea_zero_page_idle(mmu_t, vm_page_t);
|
||||
@ -345,6 +346,7 @@ static mmu_method_t moea_methods[] = {
|
||||
MMUMETHOD(mmu_remove_all, moea_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, moea_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, moea_sync_icache),
|
||||
MMUMETHOD(mmu_unwire, moea_unwire),
|
||||
MMUMETHOD(mmu_zero_page, moea_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
|
||||
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
|
||||
@ -1035,6 +1037,24 @@ moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
void
|
||||
moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
struct pvo_entry key, *pvo;
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
key.pvo_vaddr = sva;
|
||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
|
||||
pvo != NULL && PVO_VADDR(pvo) < eva;
|
||||
pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
|
||||
if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
|
||||
panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
|
||||
pvo->pvo_vaddr &= ~PVO_WIRED;
|
||||
pm->pm_stats.wired_count--;
|
||||
}
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
void
|
||||
moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
||||
{
|
||||
|
@ -312,6 +312,7 @@ void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea64_remove_pages(mmu_t, pmap_t);
|
||||
void moea64_remove_all(mmu_t, vm_page_t);
|
||||
void moea64_remove_write(mmu_t, vm_page_t);
|
||||
void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
void moea64_zero_page(mmu_t, vm_page_t);
|
||||
void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
|
||||
void moea64_zero_page_idle(mmu_t, vm_page_t);
|
||||
@ -359,6 +360,7 @@ static mmu_method_t moea64_methods[] = {
|
||||
MMUMETHOD(mmu_remove_all, moea64_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, moea64_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
|
||||
MMUMETHOD(mmu_unwire, moea64_unwire),
|
||||
MMUMETHOD(mmu_zero_page, moea64_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
|
||||
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
|
||||
@ -1076,6 +1078,41 @@ moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
void
|
||||
moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
struct pvo_entry key, *pvo;
|
||||
uintptr_t pt;
|
||||
|
||||
LOCK_TABLE_RD();
|
||||
PMAP_LOCK(pm);
|
||||
key.pvo_vaddr = sva;
|
||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
|
||||
pvo != NULL && PVO_VADDR(pvo) < eva;
|
||||
pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
|
||||
if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
|
||||
panic("moea64_unwire: pvo %p is missing PVO_WIRED",
|
||||
pvo);
|
||||
pvo->pvo_vaddr &= ~PVO_WIRED;
|
||||
if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
|
||||
panic("moea64_unwire: pte %p is missing LPTE_WIRED",
|
||||
&pvo->pvo_pte.lpte);
|
||||
pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
|
||||
if ((pt = MOEA64_PVO_TO_PTE(mmu, pvo)) != -1) {
|
||||
/*
|
||||
* The PTE's wired attribute is not a hardware
|
||||
* feature, so there is no need to invalidate any TLB
|
||||
* entries.
|
||||
*/
|
||||
MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
|
||||
pvo->pvo_vpn);
|
||||
}
|
||||
pm->pm_stats.wired_count--;
|
||||
}
|
||||
UNLOCK_TABLE_RD();
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
/*
|
||||
* This goes through and sets the physical address of our
|
||||
* special scratch PTE to the PA we want to zero or copy. Because
|
||||
|
@ -306,6 +306,7 @@ static void mmu_booke_release(mmu_t, pmap_t);
|
||||
static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
static void mmu_booke_remove_all(mmu_t, vm_page_t);
|
||||
static void mmu_booke_remove_write(mmu_t, vm_page_t);
|
||||
static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
||||
static void mmu_booke_zero_page(mmu_t, vm_page_t);
|
||||
static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
|
||||
static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
|
||||
@ -361,6 +362,7 @@ static mmu_method_t mmu_booke_methods[] = {
|
||||
MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
|
||||
MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
|
||||
MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
|
||||
MMUMETHOD(mmu_unwire, mmu_booke_unwire),
|
||||
MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
|
||||
MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
|
||||
MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
|
||||
@ -2434,6 +2436,36 @@ mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the wired attribute from the mappings for the specified range of
|
||||
* addresses in the given pmap. Every valid mapping within that range must
|
||||
* have the wired attribute set. In contrast, invalid mappings cannot have
|
||||
* the wired attribute set, so they are ignored.
|
||||
*
|
||||
* The wired attribute of the page table entry is not a hardware feature, so
|
||||
* there is no need to invalidate any TLB entries.
|
||||
*/
|
||||
static void
|
||||
mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
vm_offset_t va;
|
||||
pte_t *pte;
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
for (va = sva; va < eva; va += PAGE_SIZE) {
|
||||
if ((pte = pte_find(mmu, pmap, va)) != NULL &&
|
||||
PTE_ISVALID(pte)) {
|
||||
if (!PTE_ISWIRED(pte))
|
||||
panic("mmu_booke_unwire: pte %p isn't wired",
|
||||
pte);
|
||||
pte->flags &= ~PTE_WIRED;
|
||||
pmap->pm_stats.wired_count--;
|
||||
}
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the pmap's pv is one of the first 16 pvs linked to from this
|
||||
* page. This count may be changed upwards or downwards in the future; it is
|
||||
|
@ -627,6 +627,22 @@ METHOD void remove_pages {
|
||||
} DEFAULT mmu_null_remove_pages;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Clear the wired attribute from the mappings for the specified range
|
||||
* of addresses in the given pmap.
|
||||
*
|
||||
* @param _pmap physical map
|
||||
* @param _start virtual range start
|
||||
* @param _end virtual range end
|
||||
*/
|
||||
METHOD void unwire {
|
||||
mmu_t _mmu;
|
||||
pmap_t _pmap;
|
||||
vm_offset_t _start;
|
||||
vm_offset_t _end;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @brief Zero a physical page. It is not assumed that the page is mapped,
|
||||
* so a temporary (or direct) mapping may need to be used.
|
||||
|
@ -360,6 +360,14 @@ pmap_remove_write(vm_page_t m)
|
||||
MMU_REMOVE_WRITE(mmu_obj, m);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
|
||||
MMU_UNWIRE(mmu_obj, pmap, start, end);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_zero_page(vm_page_t m)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user