Disallow preemptive creation of wired superpage mappings.
There are some unusual cases where a process may cause an mlock()ed range of memory to be unmapped. If the application subsequently faults on that region, the handler may attempt to create a superpage mapping backed by the resident, wired pages. However, the pmap code responsible for creating such a mapping (pmap_enter_pde() on i386 and amd64) does not ensure that a leaf page table page is available if the superpage is later demoted; the demotion operation must therefore perform a non-blocking page allocation and must unmap the entire superpage if the allocation fails. The pmap layer ensures that this can never happen for wired mappings, and so the case described above breaks that invariant. For now, simply ensure that the MI fault handler never attempts to create a wired superpage except via promotion. Reviewed by: kib Reported by: syzbot+292d3b0416c27c131505@syzkaller.appspotmail.com MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D19670
This commit is contained in:
parent
41a70f9371
commit
1ab80ddad8
@ -5308,6 +5308,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
|
||||
pt_entry_t PG_G, PG_RW, PG_V;
|
||||
vm_page_t mt, pdpg;
|
||||
|
||||
KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0,
|
||||
("pmap_enter_pde: cannot create wired user mapping"));
|
||||
PG_G = pmap_global_bit(pmap);
|
||||
PG_RW = pmap_rw_bit(pmap);
|
||||
KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
|
||||
|
@ -3882,6 +3882,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
|
||||
("pmap_enter_pde: newpde is missing PG_M"));
|
||||
KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0,
|
||||
("pmap_enter_pde: cannot create wired user mapping"));
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
pde = pmap_pde(pmap, va);
|
||||
oldpde = *pde;
|
||||
|
@ -294,7 +294,7 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
|
||||
rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start &&
|
||||
roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end &&
|
||||
(vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) &
|
||||
(pagesizes[m_super->psind] - 1)) &&
|
||||
(pagesizes[m_super->psind] - 1)) && !wired &&
|
||||
pmap_ps_enabled(fs->map->pmap)) {
|
||||
flags = PS_ALL_VALID;
|
||||
if ((prot & VM_PROT_WRITE) != 0) {
|
||||
@ -469,7 +469,7 @@ vm_fault_populate(struct faultstate *fs, vm_prot_t prot, int fault_type,
|
||||
psind = m->psind;
|
||||
if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
|
||||
pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||
|
||||
!pmap_ps_enabled(fs->map->pmap)))
|
||||
!pmap_ps_enabled(fs->map->pmap) || wired))
|
||||
psind = 0;
|
||||
#else
|
||||
psind = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user