Clean up a couple of MD warts in vm_fault_populate():
--Eliminate a big ifdef that encompassed all currently-supported architectures except mips and powerpc32. This applied to the case in which we've allocated a superpage but the pager-populated range is insufficient for a superpage mapping. For platforms that don't support superpages the check should be inexpensive as we shouldn't get a superpage in the first place. Make the normal-page fallback logic identical for all platforms and provide a simple implementation of pmap_ps_enabled() for MIPS and Book-E/AIM32 powerpc. --Apply the logic for handling pmap_enter() failure if a superpage mapping can't be supported due to additional protection policy. Use KERN_PROTECTION_FAILURE instead of KERN_FAILURE for this case, and note Intel PKU on amd64 as the first example of such protection policy. Reviewed by: kib, markj, bdragon Differential Revision: https://reviews.freebsd.org/D29439
This commit is contained in:
parent
74f6cb0f31
commit
8dc8feb53d
@ -7147,7 +7147,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
|
||||
*/
|
||||
if (!pmap_pkru_same(pmap, va, va + NBPDR)) {
|
||||
pmap_abort_ptp(pmap, va, pdpg);
|
||||
return (KERN_FAILURE);
|
||||
return (KERN_PROTECTION_FAILURE);
|
||||
}
|
||||
if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) {
|
||||
newpde &= ~X86_PG_PKU_MASK;
|
||||
|
@ -190,6 +190,12 @@ pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
pmap_ps_enabled(pmap_t pmap __unused)
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* !LOCORE */
|
||||
|
@ -327,6 +327,7 @@ void moea_scan_init(void);
|
||||
vm_offset_t moea_quick_enter_page(vm_page_t m);
|
||||
void moea_quick_remove_page(vm_offset_t addr);
|
||||
boolean_t moea_page_is_mapped(vm_page_t m);
|
||||
bool moea_ps_enabled(pmap_t pmap);
|
||||
static int moea_map_user_ptr(pmap_t pm,
|
||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
||||
static int moea_decode_kernel_ptr(vm_offset_t addr,
|
||||
@ -370,6 +371,7 @@ static struct pmap_funcs moea_methods = {
|
||||
.quick_enter_page = moea_quick_enter_page,
|
||||
.quick_remove_page = moea_quick_remove_page,
|
||||
.page_is_mapped = moea_page_is_mapped,
|
||||
.ps_enabled = moea_ps_enabled,
|
||||
|
||||
/* Internal interfaces */
|
||||
.bootstrap = moea_bootstrap,
|
||||
@ -1122,6 +1124,12 @@ moea_page_is_mapped(vm_page_t m)
|
||||
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
|
||||
}
|
||||
|
||||
bool
|
||||
moea_ps_enabled(pmap_t pmap __unused)
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the given physical page at the specified virtual address in the
|
||||
* target pmap with the protection requested. If specified the page
|
||||
|
@ -354,6 +354,7 @@ static int mmu_booke_decode_kernel_ptr(vm_offset_t addr,
|
||||
int *is_user, vm_offset_t *decoded_addr);
|
||||
static void mmu_booke_page_array_startup(long);
|
||||
static boolean_t mmu_booke_page_is_mapped(vm_page_t m);
|
||||
static bool mmu_booke_ps_enabled(pmap_t pmap);
|
||||
|
||||
static struct pmap_funcs mmu_booke_methods = {
|
||||
/* pmap dispatcher interface */
|
||||
@ -396,6 +397,7 @@ static struct pmap_funcs mmu_booke_methods = {
|
||||
.quick_remove_page = mmu_booke_quick_remove_page,
|
||||
.page_array_startup = mmu_booke_page_array_startup,
|
||||
.page_is_mapped = mmu_booke_page_is_mapped,
|
||||
.ps_enabled = mmu_booke_ps_enabled,
|
||||
|
||||
/* Internal interfaces */
|
||||
.bootstrap = mmu_booke_bootstrap,
|
||||
@ -1226,6 +1228,12 @@ mmu_booke_page_is_mapped(vm_page_t m)
|
||||
return (!TAILQ_EMPTY(&(m)->md.pv_list));
|
||||
}
|
||||
|
||||
static bool
|
||||
mmu_booke_ps_enabled(pmap_t pmap __unused)
|
||||
{
|
||||
return (false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize pmap associated with process 0.
|
||||
*/
|
||||
|
@ -542,17 +542,13 @@ vm_fault_populate(struct faultstate *fs)
|
||||
pidx <= pager_last;
|
||||
pidx += npages, m = vm_page_next(&m[npages - 1])) {
|
||||
vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
|
||||
#if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
|
||||
__ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) || \
|
||||
defined(__powerpc64__)
|
||||
|
||||
psind = m->psind;
|
||||
if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
|
||||
pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||
|
||||
!pmap_ps_enabled(fs->map->pmap) || fs->wired))
|
||||
psind = 0;
|
||||
#else
|
||||
psind = 0;
|
||||
#endif
|
||||
|
||||
npages = atop(pagesizes[psind]);
|
||||
for (i = 0; i < npages; i++) {
|
||||
vm_fault_populate_check_page(&m[i]);
|
||||
@ -561,8 +557,18 @@ vm_fault_populate(struct faultstate *fs)
|
||||
VM_OBJECT_WUNLOCK(fs->first_object);
|
||||
rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type |
|
||||
(fs->wired ? PMAP_ENTER_WIRED : 0), psind);
|
||||
#if defined(__amd64__)
|
||||
if (psind > 0 && rv == KERN_FAILURE) {
|
||||
|
||||
/*
|
||||
* pmap_enter() may fail for a superpage mapping if additional
|
||||
* protection policies prevent the full mapping.
|
||||
* For example, this will happen on amd64 if the entire
|
||||
* address range does not share the same userspace protection
|
||||
* key. Revert to single-page mappings if this happens.
|
||||
*/
|
||||
MPASS(rv == KERN_SUCCESS ||
|
||||
(psind > 0 && rv == KERN_PROTECTION_FAILURE));
|
||||
if (__predict_false(psind > 0 &&
|
||||
rv == KERN_PROTECTION_FAILURE)) {
|
||||
for (i = 0; i < npages; i++) {
|
||||
rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i),
|
||||
&m[i], fs->prot, fs->fault_type |
|
||||
@ -570,9 +576,7 @@ vm_fault_populate(struct faultstate *fs)
|
||||
MPASS(rv == KERN_SUCCESS);
|
||||
}
|
||||
}
|
||||
#else
|
||||
MPASS(rv == KERN_SUCCESS);
|
||||
#endif
|
||||
|
||||
VM_OBJECT_WLOCK(fs->first_object);
|
||||
for (i = 0; i < npages; i++) {
|
||||
if ((fs->fault_flags & VM_FAULT_WIRE) != 0)
|
||||
|
Loading…
Reference in New Issue
Block a user