Implement transparent 2MB superpage promotion for RISC-V.

This includes support for pmap_enter(..., psind=1) as described in the
commit log message for r321378.

The changes are largely modelled after amd64.  arm64 has more stringent
requirements around superpage creation to avoid the possibility of TLB
conflict aborts, and these requirements do not apply to RISC-V, which
like amd64 permits simultaneous caching of 4KB and 2MB translations for
a given page.  RISC-V's PTE format includes only two software bits, and
as these are already consumed we do not have an analogue for amd64's
PG_PROMOTED.  Instead, pmap_remove_l2() always invalidates the entire
2MB address range.

pmap_ts_referenced() is modified to clear PTE_A, now that we support
both hardware- and software-managed reference and dirty bits.  Also
fix pmap_fault_fixup() so that it does not set PTE_A or PTE_D on kernel
mappings.

Reviewed by:	kib (earlier version)
Discussed with:	jhb
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D18863
Differential Revision:	https://reviews.freebsd.org/D18864
Differential Revision:	https://reviews.freebsd.org/D18865
Differential Revision:	https://reviews.freebsd.org/D18866
Differential Revision:	https://reviews.freebsd.org/D18867
Differential Revision:	https://reviews.freebsd.org/D18868
This commit is contained in:
markj 2019-02-13 17:19:37 +00:00
parent 50a8601b2e
commit 9d5cba36c5
6 changed files with 1247 additions and 245 deletions

View File

@ -82,7 +82,7 @@
#define PAGE_SIZE (1 << PAGE_SHIFT) /* Page size */
#define PAGE_MASK (PAGE_SIZE - 1)
#define MAXPAGESIZES 1 /* maximum number of supported page sizes */
#define MAXPAGESIZES 3 /* maximum number of supported page sizes */
#ifndef KSTACK_PAGES
#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */

View File

@ -44,6 +44,8 @@
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <vm/_vm_radix.h>
#ifdef _KERNEL
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
@ -80,6 +82,7 @@ struct pmap {
pd_entry_t *pm_l1;
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
struct vm_radix pm_root;
};
typedef struct pv_entry {
@ -139,6 +142,7 @@ void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
vm_paddr_t pmap_kextract(vm_offset_t va);
void pmap_kremove(vm_offset_t);
void pmap_kremove_device(vm_offset_t, vm_size_t);
bool pmap_ps_enabled(pmap_t);
void *pmap_mapdev(vm_offset_t, vm_size_t);
void *pmap_mapbios(vm_paddr_t, vm_size_t);

View File

@ -62,7 +62,8 @@ typedef uint64_t pn_t; /* page number */
#define L3_SIZE (1 << L3_SHIFT)
#define L3_OFFSET (L3_SIZE - 1)
#define Ln_ENTRIES (1 << 9)
#define Ln_ENTRIES_SHIFT 9
#define Ln_ENTRIES (1 << Ln_ENTRIES_SHIFT)
#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
/* Bits 9:8 are reserved for software */
@ -79,6 +80,8 @@ typedef uint64_t pn_t; /* page number */
#define PTE_RWX (PTE_R | PTE_W | PTE_X)
#define PTE_RX (PTE_R | PTE_X)
#define PTE_KERN (PTE_V | PTE_R | PTE_W | PTE_A | PTE_D)
#define PTE_PROMOTE (PTE_V | PTE_RWX | PTE_D | PTE_A | PTE_G | PTE_U | \
PTE_SW_MANAGED | PTE_SW_WIRED)
#define PTE_PPN0_S 10
#define PTE_PPN1_S 19

View File

@ -99,10 +99,10 @@
#define VM_NFREEORDER 12
/*
* Disable superpage reservations.
* Enable superpage reservations: 1 level.
*/
#ifndef VM_NRESERVLEVEL
#define VM_NRESERVLEVEL 0
#define VM_NRESERVLEVEL 1
#endif
/*

File diff suppressed because it is too large Load Diff

View File

@ -271,7 +271,8 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
{
vm_page_t m, m_map;
#if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
__ARM_ARCH >= 6) || defined(__i386__)) && VM_NRESERVLEVEL > 0
__ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
VM_NRESERVLEVEL > 0
vm_page_t m_super;
int flags;
#endif
@ -286,7 +287,8 @@ vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
m_map = m;
psind = 0;
#if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
__ARM_ARCH >= 6) || defined(__i386__)) && VM_NRESERVLEVEL > 0
__ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
VM_NRESERVLEVEL > 0
if ((m->flags & PG_FICTITIOUS) == 0 &&
(m_super = vm_reserv_to_superpage(m)) != NULL &&
rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start &&
@ -463,7 +465,7 @@ vm_fault_populate(struct faultstate *fs, vm_prot_t prot, int fault_type,
pidx += npages, m = vm_page_next(&m[npages - 1])) {
vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
#if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
__ARM_ARCH >= 6) || defined(__i386__)
__ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)
psind = m->psind;
if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||