amd64: Define KVA regions for KMSAN shadow maps

KMSAN requires two shadow maps, each one-to-one with the kernel map.
Allocate regions of the kernels PML4 page for them.  Add functions to
create mappings in the shadow map regions, these will be used by the
KMSAN runtime.

Reviewed by:	alc, kib
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D31295
This commit is contained in:
Mark Johnston 2021-08-10 16:25:39 -04:00
parent 30d00832d7
commit f95f780ea4
3 changed files with 102 additions and 2 deletions

View File

@ -11339,6 +11339,76 @@ pmap_kasan_enter(vm_offset_t va)
}
#endif
#ifdef KMSAN
static vm_page_t
pmap_kmsan_enter_alloc_4k(void)
{
vm_page_t m;
m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL)
panic("%s: no memory to grow shadow map", __func__);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
return (m);
}
static vm_page_t
pmap_kmsan_enter_alloc_2m(void)
{
vm_page_t m;
m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED, NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT);
if (m != NULL)
memset((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 0, NBPDR);
return (m);
}
/*
* Grow the shadow or origin maps by at least one 4KB page at the specified
* address. Use 2MB pages when possible.
*/
void
pmap_kmsan_enter(vm_offset_t va)
{
pdp_entry_t *pdpe;
pd_entry_t *pde;
pt_entry_t *pte;
vm_page_t m;
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
pdpe = pmap_pdpe(kernel_pmap, va);
if ((*pdpe & X86_PG_V) == 0) {
m = pmap_kmsan_enter_alloc_4k();
*pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
X86_PG_V | pg_nx);
}
pde = pmap_pdpe_to_pde(pdpe, va);
if ((*pde & X86_PG_V) == 0) {
m = pmap_kmsan_enter_alloc_2m();
if (m != NULL) {
*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx);
} else {
m = pmap_kmsan_enter_alloc_4k();
*pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
X86_PG_V | pg_nx);
}
}
if ((*pde & X86_PG_PS) != 0)
return;
pte = pmap_pde_to_pte(pde, va);
if ((*pte & X86_PG_V) != 0)
return;
m = pmap_kmsan_enter_alloc_4k();
*pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V |
X86_PG_M | X86_PG_A | pg_nx);
}
#endif
/*
* Track a range of the kernel's virtual address space that is contiguous
* in various mapping attributes.
@ -11520,6 +11590,14 @@ sysctl_kmaps(SYSCTL_HANDLER_ARGS)
case KASANPML4I:
sbuf_printf(sb, "\nKASAN shadow map:\n");
break;
#endif
#ifdef KMSAN
case KMSANSHADPML4I:
sbuf_printf(sb, "\nKMSAN shadow map:\n");
break;
case KMSANORIGPML4I:
sbuf_printf(sb, "\nKMSAN origin map:\n");
break;
#endif
case KPML4BASE:
sbuf_printf(sb, "\nKernel map:\n");

View File

@ -201,6 +201,13 @@
*/
#define NKASANPML4E ((NKPML4E + 7) / 8)
/*
* Number of PML4 slots for the KMSAN shadow and origin maps. These are
* one-to-one with the kernel map.
*/
#define NKMSANSHADPML4E NKPML4E
#define NKMSANORIGPML4E NKPML4E
/*
* We use the same numbering of the page table pages for 5-level and
* 4-level paging structures.
@ -251,6 +258,9 @@
#define KASANPML4I (DMPML4I - NKASANPML4E) /* Below the direct map */
#define KMSANSHADPML4I (KPML4BASE - NKMSANSHADPML4E)
#define KMSANORIGPML4I (DMPML4I - NKMSANORIGPML4E)
/* Large map: index of the first and max last pml4 entry */
#define LMSPML4I (PML4PML4I + 1)
#define LMEPML4I (KASANPML4I - 1)
@ -520,6 +530,9 @@ vm_page_t pmap_page_alloc_below_4g(bool zeroed);
#ifdef KASAN
void pmap_kasan_enter(vm_offset_t);
#endif
#ifdef KMSAN
void pmap_kmsan_enter(vm_offset_t);
#endif
#endif /* _KERNEL */

View File

@ -170,9 +170,10 @@
* 0xffff804020100fff - 0xffff807fffffffff unused
* 0xffff808000000000 - 0xffff847fffffffff large map (can be tuned up)
* 0xffff848000000000 - 0xfffff77fffffffff unused (large map extends there)
* 0xfffff78000000000 - 0xfffff7ffffffffff 512GB KASAN shadow map
* 0xfffff60000000000 - 0xfffff7ffffffffff 2TB KMSAN origin map, optional
* 0xfffff78000000000 - 0xfffff7bfffffffff 512GB KASAN shadow map, optional
* 0xfffff80000000000 - 0xfffffbffffffffff 4TB direct map
* 0xfffffc0000000000 - 0xfffffdffffffffff unused
* 0xfffffc0000000000 - 0xfffffdffffffffff 2TB KMSAN shadow map, optional
* 0xfffffe0000000000 - 0xffffffffffffffff 2TB kernel map
*
* Within the kernel map:
@ -191,6 +192,14 @@
#define KASAN_MIN_ADDRESS KV4ADDR(KASANPML4I, 0, 0, 0)
#define KASAN_MAX_ADDRESS KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0)
#define KMSAN_SHAD_MIN_ADDRESS KV4ADDR(KMSANSHADPML4I, 0, 0, 0)
#define KMSAN_SHAD_MAX_ADDRESS KV4ADDR(KMSANSHADPML4I + NKMSANSHADPML4E, \
0, 0, 0)
#define KMSAN_ORIG_MIN_ADDRESS KV4ADDR(KMSANORIGPML4I, 0, 0, 0)
#define KMSAN_ORIG_MAX_ADDRESS KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, \
0, 0, 0)
#define LARGEMAP_MIN_ADDRESS KV4ADDR(LMSPML4I, 0, 0, 0)
#define LARGEMAP_MAX_ADDRESS KV4ADDR(LMEPML4I + 1, 0, 0, 0)