Add Intel Spec Store Bypass Disable control.

Speculative Store Bypass (SSB) is a speculative execution side channel
vulnerability identified by Jann Horn of Google Project Zero (GPZ) and
Ken Johnson of the Microsoft Security Response Center (MSRC)
https://bugs.chromium.org/p/project-zero/issues/detail?id=1528.
Updated Intel microcode introduces a MSR bit to disable SSB as a
mitigation for the vulnerability.

Introduce a sysctl hw.spec_store_bypass_disable to provide global
control over the SSBD bit, akin to the existing sysctl that controls
IBRS. The sysctl can be set to one of three values:
0: off
1: on
2: auto

Future work will enable applications to control SSBD on a per-process
basis (when it is not enabled globally).

SSBD bit detection and control was verified with prerelease microcode.

Security:	CVE-2018-3639
Tested by:	emaste (previous version, without updated microcode)
Sponsored by:	The FreeBSD Foundation
MFC after:	3 days
This commit is contained in:
Konstantin Belousov 2018-05-21 21:08:19 +00:00
parent 9be4bbbb21
commit 3621ba1ede
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=334005
7 changed files with 112 additions and 2 deletions

View File

@ -224,6 +224,7 @@ initializecpu(void)
pg_nx = PG_NX;
}
hw_ibrs_recalculate();
hw_ssb_recalculate(false);
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
init_amd();

View File

@ -1843,6 +1843,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
thread0.td_critnest = 0;
TUNABLE_INT_FETCH("hw.ibrs_disable", &hw_ibrs_disable);
TUNABLE_INT_FETCH("hw.spec_store_bypass_disable", &hw_ssb_disable);
TSEXIT();

View File

@ -39,6 +39,7 @@
extern uint64_t *vm_page_dump;
extern int hw_lower_amd64_sharedpage;
extern int hw_ibrs_disable;
extern int hw_ssb_disable;
/*
* The file "conf/ldscript.amd64" defines the symbol "kernphys". Its

View File

@ -529,6 +529,7 @@ cpuctl_do_eval_cpu_features(int cpu, struct thread *td)
identify_cpu2();
hw_ibrs_recalculate();
restore_cpu(oldcpu, is_bound, td);
hw_ssb_recalculate(true);
printcpuinfo();
return (0);
}

View File

@ -244,6 +244,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
#endif
#ifdef __amd64__
hw_ibrs_active = 0;
hw_ssb_active = 0;
cpu_stdext_feature3 = 0;
CPU_FOREACH(i) {
pc = pcpu_find(i);

View File

@ -85,6 +85,7 @@ extern uint64_t xsave_mask;
extern u_int max_apic_id;
extern int pti;
extern int hw_ibrs_active;
extern int hw_ssb_active;
struct pcb;
struct thread;
@ -137,6 +138,7 @@ int isa_nmi(int cd);
void handle_ibrs_entry(void);
void handle_ibrs_exit(void);
void hw_ibrs_recalculate(void);
void hw_ssb_recalculate(bool all_cpus);
void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame);
void nmi_call_kdb_smp(u_int type, struct trapframe *frame);
void nmi_handle_intr(u_int type, struct trapframe *frame);

View File

@ -150,6 +150,7 @@ void
acpi_cpu_idle_mwait(uint32_t mwait_hint)
{
int *state;
uint64_t v;
/*
* A comment in Linux patch claims that 'CPUs run faster with
@ -166,11 +167,24 @@ acpi_cpu_idle_mwait(uint32_t mwait_hint)
KASSERT(atomic_load_int(state) == STATE_SLEEPING,
("cpu_mwait_cx: wrong monitorbuf state"));
atomic_store_int(state, STATE_MWAIT);
handle_ibrs_exit();
if (PCPU_GET(ibpb_set) || hw_ssb_active) {
v = rdmsr(MSR_IA32_SPEC_CTRL);
wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
} else {
v = 0;
}
cpu_monitor(state, 0, 0);
if (atomic_load_int(state) == STATE_MWAIT)
cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
handle_ibrs_entry();
/*
* SSB cannot be disabled while we sleep, or rather, if it was
* disabled, the sysctl thread will bind to our cpu to tweak
* MSR.
*/
if (v != 0)
wrmsr(MSR_IA32_SPEC_CTRL, v);
/*
* We should exit on any event that interrupts mwait, because
@ -804,6 +818,95 @@ SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
"Disable Indirect Branch Restricted Speculation");
int hw_ssb_active;
int hw_ssb_disable;
SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
&hw_ssb_active, 0,
"Speculative Store Bypass Disable active");
static void
hw_ssb_set_one(bool enable)
{
uint64_t v;
v = rdmsr(MSR_IA32_SPEC_CTRL);
if (enable)
v |= (uint64_t)IA32_SPEC_CTRL_SSBD;
else
v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD;
wrmsr(MSR_IA32_SPEC_CTRL, v);
}
static void
hw_ssb_set(bool enable, bool for_all_cpus)
{
struct thread *td;
int bound_cpu, i, is_bound;
if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
hw_ssb_active = 0;
return;
}
hw_ssb_active = enable;
if (for_all_cpus) {
td = curthread;
thread_lock(td);
is_bound = sched_is_bound(td);
bound_cpu = td->td_oncpu;
CPU_FOREACH(i) {
sched_bind(td, i);
hw_ssb_set_one(enable);
}
if (is_bound)
sched_bind(td, bound_cpu);
else
sched_unbind(td);
thread_unlock(td);
} else {
hw_ssb_set_one(enable);
}
}
void
hw_ssb_recalculate(bool all_cpus)
{
switch (hw_ssb_disable) {
default:
hw_ssb_disable = 0;
/* FALLTHROUGH */
case 0: /* off */
hw_ssb_set(false, all_cpus);
break;
case 1: /* on */
hw_ssb_set(true, all_cpus);
break;
case 2: /* auto */
hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSBD_NO) != 0 ?
false : true, all_cpus);
break;
}
}
static int
hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
{
int error, val;
val = hw_ssb_disable;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
hw_ssb_disable = val;
hw_ssb_recalculate(true);
return (0);
}
SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
hw_ssb_disable_handler, "I",
"Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
/*
* Enable and restore kernel text write permissions.
* Callers must ensure that disable_wp()/restore_wp() are executed