Add x86 msr tweak KPI.

Use the KPI to tweak MSRs in mitigation code.

Reviewed by:	markj, scottl
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D22431
This commit is contained in:
kib 2019-11-18 20:53:57 +00:00
parent 81f62ee15e
commit 4387bf78c9
3 changed files with 96 additions and 88 deletions

View File

@ -220,15 +220,6 @@ iaf_reload_count_to_perfctr_value(pmc_value_t rlc)
return (1ULL << core_iaf_width) - rlc;
}
static void
tweak_tsx_force_abort(void *arg)
{
u_int val;
val = (uintptr_t)arg;
wrmsr(MSR_TSX_FORCE_ABORT, val);
}
static int
iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
@ -270,7 +261,8 @@ iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
if ((cpu_stdext_feature3 & CPUID_STDEXT3_TSXFA) != 0 &&
!pmc_tsx_force_abort_set) {
pmc_tsx_force_abort_set = true;
smp_rendezvous(NULL, tweak_tsx_force_abort, NULL, (void *)1);
x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS |
MSR_OP_WRITE, 1);
}
flags = 0;
@ -411,7 +403,8 @@ iaf_release_pmc(int cpu, int ri, struct pmc *pmc)
MPASS(pmc_alloc_refs > 0);
if (pmc_alloc_refs-- == 1 && pmc_tsx_force_abort_set) {
pmc_tsx_force_abort_set = false;
smp_rendezvous(NULL, tweak_tsx_force_abort, NULL, (void *)0);
x86_msr_op(MSR_TSX_FORCE_ABORT, MSR_OP_RENDEZVOUS |
MSR_OP_WRITE, 0);
}
return (0);

View File

@ -148,4 +148,12 @@ int user_dbreg_trap(register_t dr6);
int minidumpsys(struct dumperinfo *);
struct pcb *get_pcb_td(struct thread *td);
#define MSR_OP_ANDNOT 0x00000001
#define MSR_OP_OR 0x00000002
#define MSR_OP_WRITE 0x00000003
#define MSR_OP_LOCAL 0x10000000
#define MSR_OP_SCHED 0x20000000
#define MSR_OP_RENDEZVOUS 0x30000000
void x86_msr_op(u_int msr, u_int op, uint64_t arg1);
#endif

View File

@ -111,6 +111,80 @@ static u_int cpu_reset_proxyid;
static volatile u_int cpu_reset_proxy_active;
#endif
struct msr_op_arg {
u_int msr;
int op;
uint64_t arg1;
};
static void
x86_msr_op_one(void *argp)
{
struct msr_op_arg *a;
uint64_t v;
a = argp;
switch (a->op) {
case MSR_OP_ANDNOT:
v = rdmsr(a->msr);
v &= ~a->arg1;
wrmsr(a->msr, v);
break;
case MSR_OP_OR:
v = rdmsr(a->msr);
v |= a->arg1;
wrmsr(a->msr, v);
break;
case MSR_OP_WRITE:
wrmsr(a->msr, a->arg1);
break;
}
}
#define MSR_OP_EXMODE_MASK 0xf0000000
#define MSR_OP_OP_MASK 0x000000ff
void
x86_msr_op(u_int msr, u_int op, uint64_t arg1)
{
struct thread *td;
struct msr_op_arg a;
u_int exmode;
int bound_cpu, i, is_bound;
a.op = op & MSR_OP_OP_MASK;
MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
a.op == MSR_OP_WRITE);
exmode = op & MSR_OP_EXMODE_MASK;
MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED ||
exmode == MSR_OP_RENDEZVOUS);
a.msr = msr;
a.arg1 = arg1;
switch (exmode) {
case MSR_OP_LOCAL:
x86_msr_op_one(&a);
break;
case MSR_OP_SCHED:
td = curthread;
thread_lock(td);
is_bound = sched_is_bound(td);
bound_cpu = td->td_oncpu;
CPU_FOREACH(i) {
sched_bind(td, i);
x86_msr_op_one(&a);
}
if (is_bound)
sched_bind(td, bound_cpu);
else
sched_unbind(td);
thread_unlock(td);
break;
case MSR_OP_RENDEZVOUS:
smp_rendezvous(NULL, x86_msr_op_one, NULL, &a);
break;
}
}
/*
* Automatically initialized per CPU errata in cpu_idle_tun below.
*/
@ -806,18 +880,10 @@ SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
void
hw_ibrs_recalculate(void)
{
uint64_t v;
if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
if (hw_ibrs_disable) {
v = rdmsr(MSR_IA32_SPEC_CTRL);
v &= ~(uint64_t)IA32_SPEC_CTRL_IBRS;
wrmsr(MSR_IA32_SPEC_CTRL, v);
} else {
v = rdmsr(MSR_IA32_SPEC_CTRL);
v |= IA32_SPEC_CTRL_IBRS;
wrmsr(MSR_IA32_SPEC_CTRL, v);
}
x86_msr_op(MSR_IA32_SPEC_CTRL, MSR_OP_LOCAL |
(hw_ibrs_disable ? MSR_OP_ANDNOT : MSR_OP_OR),
IA32_SPEC_CTRL_IBRS);
return;
}
hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 &&
@ -848,47 +914,18 @@ SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
&hw_ssb_active, 0,
"Speculative Store Bypass Disable active");
static void
hw_ssb_set_one(bool enable)
{
uint64_t v;
v = rdmsr(MSR_IA32_SPEC_CTRL);
if (enable)
v |= (uint64_t)IA32_SPEC_CTRL_SSBD;
else
v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD;
wrmsr(MSR_IA32_SPEC_CTRL, v);
}
static void
hw_ssb_set(bool enable, bool for_all_cpus)
{
struct thread *td;
int bound_cpu, i, is_bound;
if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
hw_ssb_active = 0;
return;
}
hw_ssb_active = enable;
if (for_all_cpus) {
td = curthread;
thread_lock(td);
is_bound = sched_is_bound(td);
bound_cpu = td->td_oncpu;
CPU_FOREACH(i) {
sched_bind(td, i);
hw_ssb_set_one(enable);
}
if (is_bound)
sched_bind(td, bound_cpu);
else
sched_unbind(td);
thread_unlock(td);
} else {
hw_ssb_set_one(enable);
}
x86_msr_op(MSR_IA32_SPEC_CTRL,
(enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
(for_all_cpus ? MSR_OP_SCHED : MSR_OP_LOCAL), IA32_SPEC_CTRL_SSBD);
}
void
@ -1150,44 +1187,14 @@ enum {
TAA_TAA_NO = 4
};
static void
taa_set_one(bool enable)
{
uint64_t v;
v = rdmsr(MSR_IA32_TSX_CTRL);
if (enable)
v |= (uint64_t)(IA32_TSX_CTRL_RTM_DISABLE |
IA32_TSX_CTRL_TSX_CPUID_CLEAR);
else
v &= ~(uint64_t)(IA32_TSX_CTRL_RTM_DISABLE |
IA32_TSX_CTRL_TSX_CPUID_CLEAR);
wrmsr(MSR_IA32_TSX_CTRL, v);
}
static void
taa_set(bool enable, bool all)
{
struct thread *td;
int bound_cpu, i, is_bound;
if (all) {
td = curthread;
thread_lock(td);
is_bound = sched_is_bound(td);
bound_cpu = td->td_oncpu;
CPU_FOREACH(i) {
sched_bind(td, i);
taa_set_one(enable);
}
if (is_bound)
sched_bind(td, bound_cpu);
else
sched_unbind(td);
thread_unlock(td);
} else
taa_set_one(enable);
x86_msr_op(MSR_IA32_TSX_CTRL,
(enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
(all ? MSR_OP_RENDEZVOUS : MSR_OP_LOCAL),
IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR);
}
void