Merge r216333 and r216555 from the native pmap

When r207410 eliminated the acquisition and release of the page queues
  lock from pmap_extract_and_hold(), it didn't take into account that
  pmap_pte_quick() sometimes requires the page queues lock to be held.
  This change reimplements pmap_extract_and_hold() such that it no
  longer uses pmap_pte_quick(), and thus never requires the page queues
  lock.

Merge r177525 from the native pmap
  Prevent the overflow in the calculation of the next page directory.
  The overflow causes the wraparound with consequent corruption of the
  (almost) whole address space mapping.

Strictly speaking, r177525 is not required by the Xen pmap because the
hypervisor steals the uppermost region of the normal kernel address
space.  I am nonetheless merging it in order to reduce the number of
unnecessary differences between the native and Xen pmap implementations.

Tested by:	sbruno
This commit is contained in:
Alan Cox 2011-12-30 18:16:15 +00:00
parent 84143cee4f
commit c65205a6e2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=229007

View File

@ -1122,7 +1122,7 @@ vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
pd_entry_t pde;
pt_entry_t pte;
pt_entry_t pte, *ptep;
vm_page_t m;
vm_paddr_t pa;
@ -1142,21 +1142,17 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
vm_page_hold(m);
}
} else {
sched_pin();
pte = PT_GET(pmap_pte_quick(pmap, va));
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
if ((pte & PG_V) &&
ptep = pmap_pte(pmap, va);
pte = PT_GET(ptep);
pmap_pte_release(ptep);
if (pte != 0 &&
((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
&pa)) {
sched_unpin();
&pa))
goto retry;
}
m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
vm_page_hold(m);
}
sched_unpin();
}
}
PA_UNLOCK_COND(pa);
@ -2316,6 +2312,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* Calculate index for next page table.
*/
pdnxt = (sva + NBPDR) & ~PDRMASK;
if (pdnxt < sva)
pdnxt = eva;
if (pmap->pm_stats.resident_count == 0)
break;
@ -2471,6 +2469,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
u_int pdirindex;
pdnxt = (sva + NBPDR) & ~PDRMASK;
if (pdnxt < sva)
pdnxt = eva;
pdirindex = sva >> PDRSHIFT;
ptpaddr = pmap->pm_pdir[pdirindex];
@ -3172,6 +3172,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
("pmap_copy: invalid to pmap_copy page tables"));
pdnxt = (addr + NBPDR) & ~PDRMASK;
if (pdnxt < addr)
pdnxt = end_addr;
ptepindex = addr >> PDRSHIFT;
srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]);