Implement per-CPU pmap activation tracking for RISC-V.

This reduces the overhead of TLB invalidations by ensuring that we
only interrupt CPUs which are using the given pmap.  Tracking is
performed in pmap_activate(), which gets called during context switches:
from cpu_throw(), if a thread is exiting or an AP is starting, or
cpu_switch() for a regular context switch.

For now, pmap_sync_icache() still must interrupt all CPUs.

Reviewed by:	kib (earlier version), jhb
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D18874
This commit is contained in:
markj 2019-02-13 17:50:01 +00:00
parent fea91ac613
commit ad0bb33a89
9 changed files with 104 additions and 77 deletions

View File

@ -55,7 +55,6 @@ struct pcb {
#define PCB_FP_STARTED 0x1
#define PCB_FP_USERMASK 0x1
uint64_t pcb_sepc; /* Supervisor exception pc */
vm_offset_t pcb_l1addr; /* L1 page tables base address */
vm_offset_t pcb_onfault; /* Copyinout fault handler */
};

View File

@ -45,6 +45,7 @@
#define ALT_STACK_SIZE 128
#define PCPU_MD_FIELDS \
struct pmap *pc_curpmap; /* Currently active pmap */ \
uint32_t pc_pending_ipis; /* IPIs pending to this CPU */ \
char __pad[61]

View File

@ -41,6 +41,7 @@
#ifndef LOCORE
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
@ -80,6 +81,8 @@ struct pmap {
struct mtx pm_mtx;
struct pmap_statistics pm_stats; /* pmap statictics */
pd_entry_t *pm_l1;
u_long pm_satp; /* value for SATP register */
cpuset_t pm_active; /* active on cpus */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
struct vm_radix pm_root;
@ -137,6 +140,10 @@ extern vm_offset_t virtual_end;
#define L1_MAPPABLE_P(va, pa, size) \
((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
struct thread;
void pmap_activate_boot(pmap_t);
void pmap_activate_sw(struct thread *);
void pmap_bootstrap(vm_offset_t, vm_paddr_t, vm_size_t);
void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
vm_paddr_t pmap_kextract(vm_offset_t va);

View File

@ -63,7 +63,6 @@ ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(PCB_L1ADDR, offsetof(struct pcb, pcb_l1addr));
ASSYM(PCB_SIZE, sizeof(struct pcb));
ASSYM(PCB_RA, offsetof(struct pcb, pcb_ra));
ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp));

View File

@ -871,10 +871,6 @@ initriscv(struct riscv_bootparams *rvbp)
init_proc0(rvbp->kern_stack);
/* set page table base register for thread0 */
thread0.td_pcb->pcb_l1addr = \
(rvbp->kern_l1pt - KERNBASE + rvbp->kern_phys);
msgbufinit(msgbufp, msgbufsize);
mutex_init();
init_param2(physmem);

View File

@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <vm/pmap.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <machine/intr.h>
#include <machine/smp.h>
@ -255,6 +256,9 @@ init_secondary(uint64_t cpu)
/* Enable external (PLIC) interrupts */
csr_set(sie, SIE_SEIE);
/* Activate process 0's pmap. */
pmap_activate_boot(vmspace_pmap(proc0.p_vmspace));
mtx_lock_spin(&ap_boot_mtx);
atomic_add_rel_32(&smp_cpus, 1);

View File

@ -118,9 +118,10 @@ __FBSDID("$FreeBSD$");
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bitstring.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <sys/cpuset.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
@ -566,6 +567,8 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
rw_init(&pvh_global_lock, "pmap pv global");
CPU_FILL(&kernel_pmap->pm_active);
/* Assume the address we were loaded to is a valid physical address. */
min_pa = max_pa = kernstart;
@ -723,9 +726,6 @@ pmap_init(void)
* In general, the calling thread uses a plain fence to order the
* writes to the page tables before invoking an SBI callback to invoke
* sfence_vma() on remote CPUs.
*
* Since the riscv pmap does not yet have a pm_active field, IPIs are
* sent to all CPUs in the system.
*/
static void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
@ -733,10 +733,11 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
cpuset_t mask;
sched_pin();
mask = all_cpus;
mask = pmap->pm_active;
CPU_CLR(PCPU_GET(cpuid), &mask);
fence();
sbi_remote_sfence_vma(mask.__bits, va, 1);
if (!CPU_EMPTY(&mask) && smp_started)
sbi_remote_sfence_vma(mask.__bits, va, 1);
sfence_vma_page(va);
sched_unpin();
}
@ -747,10 +748,11 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
cpuset_t mask;
sched_pin();
mask = all_cpus;
mask = pmap->pm_active;
CPU_CLR(PCPU_GET(cpuid), &mask);
fence();
sbi_remote_sfence_vma(mask.__bits, sva, eva - sva + 1);
if (!CPU_EMPTY(&mask) && smp_started)
sbi_remote_sfence_vma(mask.__bits, sva, eva - sva + 1);
/*
* Might consider a loop of sfence_vma_page() for a small
@ -766,16 +768,17 @@ pmap_invalidate_all(pmap_t pmap)
cpuset_t mask;
sched_pin();
mask = all_cpus;
mask = pmap->pm_active;
CPU_CLR(PCPU_GET(cpuid), &mask);
fence();
/*
* XXX: The SBI doc doesn't detail how to specify x0 as the
* address to perform a global fence. BBL currently treats
* all sfence_vma requests as global however.
*/
sbi_remote_sfence_vma(mask.__bits, 0, 0);
fence();
if (!CPU_EMPTY(&mask) && smp_started)
sbi_remote_sfence_vma(mask.__bits, 0, 0);
sfence_vma();
sched_unpin();
}
@ -1199,6 +1202,9 @@ pmap_pinit0(pmap_t pmap)
PMAP_LOCK_INIT(pmap);
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
pmap->pm_l1 = kernel_pmap->pm_l1;
pmap->pm_satp = SATP_MODE_SV39 | (vtophys(pmap->pm_l1) >> PAGE_SHIFT);
CPU_ZERO(&pmap->pm_active);
pmap_activate_boot(pmap);
}
int
@ -1216,12 +1222,15 @@ pmap_pinit(pmap_t pmap)
l1phys = VM_PAGE_TO_PHYS(l1pt);
pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT);
if ((l1pt->flags & PG_ZERO) == 0)
pagezero(pmap->pm_l1);
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
CPU_ZERO(&pmap->pm_active);
/* Install kernel pagetables */
memcpy(pmap->pm_l1, kernel_pmap->pm_l1, PAGE_SIZE);
@ -1411,6 +1420,8 @@ pmap_release(pmap_t pmap)
KASSERT(pmap->pm_stats.resident_count == 0,
("pmap_release: pmap resident count %ld != 0",
pmap->pm_stats.resident_count));
KASSERT(CPU_EMPTY(&pmap->pm_active),
("releasing active pmap %p", pmap));
mtx_lock(&allpmaps_lock);
LIST_REMOVE(pmap, pm_list);
@ -4251,26 +4262,56 @@ done:
return (val);
}
void
pmap_activate_sw(struct thread *td)
{
pmap_t oldpmap, pmap;
u_int cpu;
oldpmap = PCPU_GET(curpmap);
pmap = vmspace_pmap(td->td_proc->p_vmspace);
if (pmap == oldpmap)
return;
load_satp(pmap->pm_satp);
cpu = PCPU_GET(cpuid);
#ifdef SMP
CPU_SET_ATOMIC(cpu, &pmap->pm_active);
CPU_CLR_ATOMIC(cpu, &oldpmap->pm_active);
#else
CPU_SET(cpu, &pmap->pm_active);
CPU_CLR(cpu, &oldpmap->pm_active);
#endif
PCPU_SET(curpmap, pmap);
sfence_vma();
}
void
pmap_activate(struct thread *td)
{
pmap_t pmap;
uint64_t reg;
critical_enter();
pmap = vmspace_pmap(td->td_proc->p_vmspace);
td->td_pcb->pcb_l1addr = vtophys(pmap->pm_l1);
reg = SATP_MODE_SV39;
reg |= (td->td_pcb->pcb_l1addr >> PAGE_SHIFT);
load_satp(reg);
pmap_invalidate_all(pmap);
pmap_activate_sw(td);
critical_exit();
}
void
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
pmap_activate_boot(pmap_t pmap)
{
u_int cpu;
cpu = PCPU_GET(cpuid);
#ifdef SMP
CPU_SET_ATOMIC(cpu, &pmap->pm_active);
#else
CPU_SET(cpu, &pmap->pm_active);
#endif
PCPU_SET(curpmap, pmap);
}
void
pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
{
cpuset_t mask;
@ -4286,7 +4327,8 @@ pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
mask = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &mask);
fence();
sbi_remote_fence_i(mask.__bits);
if (!CPU_EMPTY(&mask) && smp_started)
sbi_remote_fence_i(mask.__bits);
sched_unpin();
}

View File

@ -207,28 +207,21 @@ ENTRY(fpe_state_clear)
END(fpe_state_clear)
/*
* void cpu_throw(struct thread *old, struct thread *new)
* void cpu_throw(struct thread *old __unused, struct thread *new)
*/
ENTRY(cpu_throw)
/* Activate the new thread's pmap. */
mv s0, a1
mv a0, a1
call _C_LABEL(pmap_activate_sw)
mv a0, s0
/* Store the new curthread */
sd a1, PC_CURTHREAD(gp)
sd a0, PC_CURTHREAD(gp)
/* And the new pcb */
ld x13, TD_PCB(a1)
ld x13, TD_PCB(a0)
sd x13, PC_CURPCB(gp)
sfence.vma
/* Switch to the new pmap */
ld t0, PCB_L1ADDR(x13)
srli t0, t0, PAGE_SHIFT
li t1, SATP_MODE_SV39
or t0, t0, t1
csrw satp, t0
/* TODO: Invalidate the TLB */
sfence.vma
/* Load registers */
ld ra, (PCB_RA)(x13)
ld sp, (PCB_SP)(x13)
@ -250,7 +243,7 @@ ENTRY(cpu_throw)
#ifdef FPE
/* Is FPE enabled for new thread? */
ld t0, TD_FRAME(a1)
ld t0, TD_FRAME(a0)
ld t1, (TF_SSTATUS)(t0)
li t2, SSTATUS_FS_MASK
and t3, t1, t2
@ -324,39 +317,28 @@ ENTRY(cpu_switch)
1:
#endif
/* Activate the new thread's pmap */
mv s0, a0
mv s1, a1
mv s2, a2
mv a0, a1
call _C_LABEL(pmap_activate_sw)
mv a1, s1
/* Release the old thread */
sd s2, TD_LOCK(s0)
#if defined(SCHED_ULE) && defined(SMP)
/* Spin if TD_LOCK points to a blocked_lock */
la s2, _C_LABEL(blocked_lock)
1:
ld t0, TD_LOCK(a1)
beq t0, s2, 1b
#endif
/*
* Restore the saved context.
*/
ld x13, TD_PCB(a1)
/*
* TODO: We may need to flush the cache here if switching
* to a user process.
*/
sfence.vma
/* Switch to the new pmap */
ld t0, PCB_L1ADDR(x13)
srli t0, t0, PAGE_SHIFT
li t1, SATP_MODE_SV39
or t0, t0, t1
csrw satp, t0
/* TODO: Invalidate the TLB */
sfence.vma
/* Release the old thread */
sd a2, TD_LOCK(a0)
#if defined(SCHED_ULE) && defined(SMP)
/* Spin if TD_LOCK points to a blocked_lock */
la a2, _C_LABEL(blocked_lock)
1:
ld t0, TD_LOCK(a1)
beq t0, a2, 1b
#endif
/* Restore the registers */
ld tp, (PCB_TP)(x13)
ld ra, (PCB_RA)(x13)

View File

@ -92,9 +92,6 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
td2->td_pcb = pcb2;
bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
td2->td_pcb->pcb_l1addr =
vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l1);
tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
bcopy(td1->td_frame, tf, sizeof(*tf));