Fix L2 PTE access permissions management.

Keep following access permissions:

APX     AP     Kernel     User
 1      01       R         N
 1      10       R         R
 0      01      R/W        N
 0      11      R/W       R/W

Avoid using reserved in ARMv6 APX|AP settings:
- In case of unprivileged (user) access without permission to write,
  the access permission bits were being set to reserved for ARMv6
  (but valid for ARMv7) value of APX|AP = 111.

Fix-up faulting userland accesses properly:
- Wrong condition statement in pmap_fault_fixup() caused that
  any genuine, unprivileged access was being fixed-up instead of
  just skip doing anything and return. Staring from now we ensure
  proper reaction for illicit user accesses.

L2_S_PROT_R and L2_S_PROT_U names might be misleading as they do not
reflect real permission levels. It will be clarified in following
patches (switch to AP[2:1] permissions model).

Obtained from: Semihalf
This commit is contained in:
Grzegorz Bernacki 2013-05-06 15:30:34 +00:00
parent b4b27eaac6
commit 4c8add8a96
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=250297
2 changed files with 10 additions and 7 deletions

View File

@ -983,6 +983,7 @@ pmap_set_prot(pt_entry_t *ptep, vm_prot_t prot, uint8_t user)
if (!(prot & VM_PROT_EXECUTE))
*ptep |= L2_XN;
*ptep |= L2_APX;
*ptep |= L2_S_PROT_R;
if (user)
@ -990,6 +991,8 @@ pmap_set_prot(pt_entry_t *ptep, vm_prot_t prot, uint8_t user)
if (prot & VM_PROT_WRITE)
*ptep &= ~(L2_APX);
else if (user)
*ptep &= ~(L2_S_PROT_R);
}
/*
@ -1216,7 +1219,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
/*
* Catch a userland access to the vector page mapped at 0x0
*/
if (user && ((pte & L2_S_PROT_MASK) == L2_S_PROT_U))
if (user && !(pte & L2_S_PROT_U))
goto out;
if (va == vector_page)
goto out;
@ -2649,7 +2652,10 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
npte |= L2_TYPE_INV;
}
npte |= L2_APX;
npte |= L2_S_PROT_R;
if (user)
npte |= L2_S_PROT_U;
if (prot & VM_PROT_WRITE) {
npte &= ~(L2_APX);
@ -2657,11 +2663,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (m != NULL &&
(m->oflags & VPO_UNMANAGED) == 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
}
if (user)
npte |= L2_S_PROT_U;
} else if (user)
npte &= ~(L2_S_PROT_R);
if (!(prot & VM_PROT_EXECUTE) && m)
npte |= L2_XN;

View File

@ -352,7 +352,7 @@ extern int pmap_needs_pte_sync;
#elif (ARM_MMU_V6 + ARM_MMU_V7) != 0
#define L2_S_PROT_U (L2_AP0(2)) /* user access */
#define L2_S_PROT_R (L2_APX|L2_AP0(1)) /* read access */
#define L2_S_PROT_R (L2_AP0(1)) /* read access */
#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R)
#define L2_S_WRITABLE(pte) (!(pte & L2_APX))