Mitigations for Microarchitectural Data Sampling.

Microarchitectural buffers on some Intel processors utilizing
speculative execution may allow a local process to obtain a memory
disclosure.  An attacker may be able to read secret data from the
kernel or from a process when executing untrusted code (for example,
in a web browser).

Reference: https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00233.html
Security:	CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091
Security:	FreeBSD-SA-19:07.mds
Reviewed by:	jhb
Tested by:	emaste, lwhsu
Approved by:	so (gtetlow)
This commit is contained in:
Konstantin Belousov 2019-05-14 17:02:20 +00:00
parent 3a89c98bec
commit 7355a02bdd
15 changed files with 652 additions and 3 deletions

View File

@ -512,6 +512,7 @@ fast_syscall_common:
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
jne 3f
call handle_ibrs_exit
callq *mds_handler
/* Restore preserved registers. */
MEXITCOUNT
movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */
@ -1157,6 +1158,7 @@ ld_regs:
jz 2f /* keep running with kernel GS.base */
cli
call handle_ibrs_exit_rs
callq *mds_handler
cmpq $~0,PCPU(UCR3)
je 1f
pushq %rdx

View File

@ -233,6 +233,9 @@ ASSYM(PC_PTI_STACK, offsetof(struct pcpu, pc_pti_stack));
ASSYM(PC_PTI_STACK_SZ, PC_PTI_STACK_SZ);
ASSYM(PC_PTI_RSP0, offsetof(struct pcpu, pc_pti_rsp0));
ASSYM(PC_IBPB_SET, offsetof(struct pcpu, pc_ibpb_set));
ASSYM(PC_MDS_TMP, offsetof(struct pcpu, pc_mds_tmp));
ASSYM(PC_MDS_BUF, offsetof(struct pcpu, pc_mds_buf));
ASSYM(PC_MDS_BUF64, offsetof(struct pcpu, pc_mds_buf64));
ASSYM(LA_EOI, LAPIC_EOI * LAPIC_MEM_MUL);
ASSYM(LA_ISR, LAPIC_ISR0 * LAPIC_MEM_MUL);

View File

@ -257,6 +257,7 @@ initializecpu(void)
hw_ibrs_recalculate();
hw_ssb_recalculate(false);
amd64_syscall_ret_flush_l1d_recalc();
hw_mds_recalculate();
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
init_amd();

View File

@ -1732,6 +1732,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
TUNABLE_INT_FETCH("hw.spec_store_bypass_disable", &hw_ssb_disable);
TUNABLE_INT_FETCH("machdep.syscall_ret_l1d_flush",
&syscall_ret_l1d_flush_mode);
TUNABLE_INT_FETCH("hw.mds_disable", &hw_mds_disable);
finishidentcpu(); /* Final stage of CPU initialization */
initializecpu(); /* Initialize CPU registers */

View File

@ -1,8 +1,13 @@
/*-
* Copyright (c) 2018-2019 The FreeBSD Foundation
* Copyright (c) 2003 Peter Wemm.
* Copyright (c) 1993 The Regents of the University of California.
* All rights reserved.
*
* Portions of this software were developed by
* Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
* the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -1622,3 +1627,239 @@ ENTRY(flush_l1d_sw_abi)
popq %rbx
ret
END(flush_l1d_sw_abi)
ENTRY(mds_handler_void)
retq
END(mds_handler_void)
ENTRY(mds_handler_verw)
subq $8, %rsp
movw %ds, (%rsp)
verw (%rsp)
addq $8, %rsp
retq
END(mds_handler_verw)
ENTRY(mds_handler_ivb)
pushq %rax
pushq %rdx
pushq %rcx
movq %cr0, %rax
testb $CR0_TS, %al
je 1f
clts
1: movq PCPU(MDS_BUF), %rdx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
lfence
orpd (%rdx), %xmm0
orpd (%rdx), %xmm0
mfence
movl $40, %ecx
addq $16, %rdx
2: movntdq %xmm0, (%rdx)
addq $16, %rdx
decl %ecx
jnz 2b
mfence
movdqa PCPU(MDS_TMP),%xmm0
testb $CR0_TS, %al
je 3f
movq %rax, %cr0
3: popq %rcx
popq %rdx
popq %rax
retq
END(mds_handler_ivb)
ENTRY(mds_handler_bdw)
pushq %rax
pushq %rbx
pushq %rcx
pushq %rdi
pushq %rsi
movq %cr0, %rax
testb $CR0_TS, %al
je 1f
clts
1: movq PCPU(MDS_BUF), %rbx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
movq %rbx, %rdi
movq %rbx, %rsi
movl $40, %ecx
2: movntdq %xmm0, (%rbx)
addq $16, %rbx
decl %ecx
jnz 2b
mfence
movl $1536, %ecx
rep; movsb
lfence
movdqa PCPU(MDS_TMP),%xmm0
testb $CR0_TS, %al
je 3f
movq %rax, %cr0
3: popq %rsi
popq %rdi
popq %rcx
popq %rbx
popq %rax
retq
END(mds_handler_bdw)
ENTRY(mds_handler_skl_sse)
pushq %rax
pushq %rdx
pushq %rcx
pushq %rdi
movq %cr0, %rax
testb $CR0_TS, %al
je 1f
clts
1: movq PCPU(MDS_BUF), %rdi
movq PCPU(MDS_BUF64), %rdx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
lfence
orpd (%rdx), %xmm0
orpd (%rdx), %xmm0
xorl %eax, %eax
2: clflushopt 5376(%rdi, %rax, 8)
addl $8, %eax
cmpl $8 * 12, %eax
jb 2b
sfence
movl $6144, %ecx
xorl %eax, %eax
rep; stosb
mfence
movdqa PCPU(MDS_TMP), %xmm0
testb $CR0_TS, %al
je 3f
movq %rax, %cr0
3: popq %rdi
popq %rcx
popq %rdx
popq %rax
retq
END(mds_handler_skl_sse)
ENTRY(mds_handler_skl_avx)
pushq %rax
pushq %rdx
pushq %rcx
pushq %rdi
movq %cr0, %rax
testb $CR0_TS, %al
je 1f
clts
1: movq PCPU(MDS_BUF), %rdi
movq PCPU(MDS_BUF64), %rdx
vmovdqa %ymm0, PCPU(MDS_TMP)
vpxor %ymm0, %ymm0, %ymm0
lfence
vorpd (%rdx), %ymm0, %ymm0
vorpd (%rdx), %ymm0, %ymm0
xorl %eax, %eax
2: clflushopt 5376(%rdi, %rax, 8)
addl $8, %eax
cmpl $8 * 12, %eax
jb 2b
sfence
movl $6144, %ecx
xorl %eax, %eax
rep; stosb
mfence
vmovdqa PCPU(MDS_TMP), %ymm0
testb $CR0_TS, %al
je 3f
movq %rax, %cr0
3: popq %rdi
popq %rcx
popq %rdx
popq %rax
retq
END(mds_handler_skl_avx)
ENTRY(mds_handler_skl_avx512)
pushq %rax
pushq %rdx
pushq %rcx
pushq %rdi
movq %cr0, %rax
testb $CR0_TS, %al
je 1f
clts
1: movq PCPU(MDS_BUF), %rdi
movq PCPU(MDS_BUF64), %rdx
vmovdqa64 %zmm0, PCPU(MDS_TMP)
vpxor %zmm0, %zmm0, %zmm0
lfence
vorpd (%rdx), %zmm0, %zmm0
vorpd (%rdx), %zmm0, %zmm0
xorl %eax, %eax
2: clflushopt 5376(%rdi, %rax, 8)
addl $8, %eax
cmpl $8 * 12, %eax
jb 2b
sfence
movl $6144, %ecx
xorl %eax, %eax
rep; stosb
mfence
vmovdqa64 PCPU(MDS_TMP), %zmm0
testb $CR0_TS, %al
je 3f
movq %rax, %cr0
3: popq %rdi
popq %rcx
popq %rdx
popq %rax
retq
END(mds_handler_skl_avx512)
ENTRY(mds_handler_silvermont)
pushq %rax
pushq %rdx
pushq %rcx
movq %cr0, %rax
testb $CR0_TS, %al
je 1f
clts
1: movq PCPU(MDS_BUF), %rdx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
movl $16, %ecx
2: movntdq %xmm0, (%rdx)
addq $16, %rdx
decl %ecx
jnz 2b
mfence
movdqa PCPU(MDS_TMP),%xmm0
testb $CR0_TS, %al
je 3f
movq %rax, %cr0
3: popq %rcx
popq %rdx
popq %rax
retq
END(mds_handler_silvermont)

View File

@ -84,8 +84,12 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line");
uint32_t pc_pcid_gen; \
uint32_t pc_smp_tlb_done; /* TLB op acknowledgement */ \
uint32_t pc_ibpb_set; \
void *pc_mds_buf; \
void *pc_mds_buf64; \
uint32_t pc_pad[2]; \
uint8_t pc_mds_tmp[64]; \
u_int pc_ipi_bitmap; \
char __pad[3284] /* pad to UMA_PCPU_ALLOC_SIZE */
char __pad[3172] /* pad to UMA_PCPU_ALLOC_SIZE */
#define PC_DBREG_CMD_NONE 0
#define PC_DBREG_CMD_LOAD 1

View File

@ -540,6 +540,7 @@ cpuctl_do_eval_cpu_features(int cpu, struct thread *td)
#ifdef __amd64__
amd64_syscall_ret_flush_l1d_recalc();
#endif
hw_mds_recalculate();
printcpuinfo();
return (0);
}

View File

@ -522,6 +522,8 @@ doreti_exit:
2: movl $handle_ibrs_exit,%eax
pushl %ecx /* preserve enough call-used regs */
call *%eax
movl mds_handler,%eax
call *%eax
popl %ecx
movl %esp, %esi
movl PCPU(TRAMPSTK), %edx

View File

@ -209,6 +209,9 @@ ASSYM(PC_KESP0, offsetof(struct pcpu, pc_kesp0));
ASSYM(PC_TRAMPSTK, offsetof(struct pcpu, pc_trampstk));
ASSYM(PC_COPYOUT_BUF, offsetof(struct pcpu, pc_copyout_buf));
ASSYM(PC_IBPB_SET, offsetof(struct pcpu, pc_ibpb_set));
ASSYM(PC_MDS_TMP, offsetof(struct pcpu, pc_mds_tmp));
ASSYM(PC_MDS_BUF, offsetof(struct pcpu, pc_mds_buf));
ASSYM(PC_MDS_BUF64, offsetof(struct pcpu, pc_mds_buf64));
ASSYM(PMAP_TRM_MIN_ADDRESS, PMAP_TRM_MIN_ADDRESS);
ASSYM(KERNLOAD, KERNLOAD);
ASSYM(KERNBASE, KERNBASE);

View File

@ -749,6 +749,7 @@ initializecpu(void)
msr = rdmsr(MSR_EFER) | EFER_NXE;
wrmsr(MSR_EFER, msr);
}
hw_mds_recalculate();
if ((amd_feature & AMDID_RDTSCP) != 0 ||
(cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0)
wrmsr(MSR_TSC_AUX, PCPU_GET(cpuid));

View File

@ -472,3 +472,187 @@ ENTRY(handle_ibrs_exit)
movb $0,PCPU(IBPB_SET)
1: ret
END(handle_ibrs_exit)
ENTRY(mds_handler_void)
ret
END(mds_handler_void)
ENTRY(mds_handler_verw)
subl $4, %esp
movw %ds, (%esp)
verw (%esp)
addl $4, %esp
ret
END(mds_handler_verw)
ENTRY(mds_handler_ivb)
movl %cr0, %eax
testb $CR0_TS, %al
je 1f
clts
1: movl PCPU(MDS_BUF), %edx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
lfence
orpd (%edx), %xmm0
orpd (%edx), %xmm0
mfence
movl $40, %ecx
addl $16, %edx
2: movntdq %xmm0, (%edx)
addl $16, %edx
decl %ecx
jnz 2b
mfence
movdqa PCPU(MDS_TMP),%xmm0
testb $CR0_TS, %al
je 3f
movl %eax, %cr0
3: ret
END(mds_handler_ivb)
ENTRY(mds_handler_bdw)
movl %cr0, %eax
testb $CR0_TS, %al
je 1f
clts
1: movl PCPU(MDS_BUF), %ebx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
movl %ebx, %edi
movl %ebx, %esi
movl $40, %ecx
2: movntdq %xmm0, (%ebx)
addl $16, %ebx
decl %ecx
jnz 2b
mfence
movl $1536, %ecx
rep; movsb
lfence
movdqa PCPU(MDS_TMP),%xmm0
testb $CR0_TS, %al
je 3f
movl %eax, %cr0
3: ret
END(mds_handler_bdw)
ENTRY(mds_handler_skl_sse)
movl %cr0, %eax
testb $CR0_TS, %al
je 1f
clts
1: movl PCPU(MDS_BUF), %edi
movl PCPU(MDS_BUF64), %edx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
lfence
orpd (%edx), %xmm0
orpd (%edx), %xmm0
xorl %eax, %eax
2: clflushopt 5376(%edi, %eax, 8)
addl $8, %eax
cmpl $8 * 12, %eax
jb 2b
sfence
movl $6144, %ecx
xorl %eax, %eax
rep; stosb
mfence
movdqa PCPU(MDS_TMP), %xmm0
testb $CR0_TS, %al
je 3f
movl %eax, %cr0
3: ret
END(mds_handler_skl_sse)
ENTRY(mds_handler_skl_avx)
movl %cr0, %eax
testb $CR0_TS, %al
je 1f
clts
1: movl PCPU(MDS_BUF), %edi
movl PCPU(MDS_BUF64), %edx
vmovdqa %ymm0, PCPU(MDS_TMP)
vpxor %ymm0, %ymm0, %ymm0
lfence
vorpd (%edx), %ymm0, %ymm0
vorpd (%edx), %ymm0, %ymm0
xorl %eax, %eax
2: clflushopt 5376(%edi, %eax, 8)
addl $8, %eax
cmpl $8 * 12, %eax
jb 2b
sfence
movl $6144, %ecx
xorl %eax, %eax
rep; stosb
mfence
vmovdqa PCPU(MDS_TMP), %ymm0
testb $CR0_TS, %al
je 3f
movl %eax, %cr0
3: ret
END(mds_handler_skl_avx)
ENTRY(mds_handler_skl_avx512)
movl %cr0, %eax
testb $CR0_TS, %al
je 1f
clts
1: movl PCPU(MDS_BUF), %edi
movl PCPU(MDS_BUF64), %edx
vmovdqa64 %zmm0, PCPU(MDS_TMP)
vpxor %zmm0, %zmm0, %zmm0
lfence
vorpd (%edx), %zmm0, %zmm0
vorpd (%edx), %zmm0, %zmm0
xorl %eax, %eax
2: clflushopt 5376(%edi, %eax, 8)
addl $8, %eax
cmpl $8 * 12, %eax
jb 2b
sfence
movl $6144, %ecx
xorl %eax, %eax
rep; stosb
mfence
vmovdqa64 PCPU(MDS_TMP), %zmm0
testb $CR0_TS, %al
je 3f
movl %eax, %cr0
3: ret
END(mds_handler_skl_avx512)
ENTRY(mds_handler_silvermont)
movl %cr0, %eax
testb $CR0_TS, %al
je 1f
clts
1: movl PCPU(MDS_BUF), %edx
movdqa %xmm0, PCPU(MDS_TMP)
pxor %xmm0, %xmm0
movl $16, %ecx
2: movntdq %xmm0, (%edx)
addl $16, %edx
decl %ecx
jnz 2b
mfence
movdqa PCPU(MDS_TMP),%xmm0
testb $CR0_TS, %al
je 3f
movl %eax, %cr0
3: ret
END(mds_handler_silvermont)

View File

@ -84,11 +84,15 @@ _Static_assert(sizeof(struct monitorbuf) == 128, "2x cache line");
struct sx pc_copyout_slock; \
char *pc_copyout_buf; \
vm_offset_t pc_pmap_eh_va; \
caddr_t pc_pmap_eh_ptep; \
caddr_t pc_pmap_eh_ptep; \
uint32_t pc_smp_tlb_done; /* TLB op acknowledgement */ \
uint32_t pc_ibpb_set; \
void *pc_mds_buf; \
void *pc_mds_buf64; \
uint32_t pc_pad[4]; \
uint8_t pc_mds_tmp[64]; \
u_int pc_ipi_bitmap; \
char __pad[3606]
char __pad[3518]
#ifdef _KERNEL

View File

@ -448,6 +448,7 @@
/*
* CPUID instruction 7 Structured Extended Features, leaf 0 edx info
*/
#define CPUID_STDEXT3_MD_CLEAR 0x00000400
#define CPUID_STDEXT3_TSXFA 0x00002000
#define CPUID_STDEXT3_IBPB 0x04000000
#define CPUID_STDEXT3_STIBP 0x08000000
@ -462,6 +463,7 @@
#define IA32_ARCH_CAP_RSBA 0x00000004
#define IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY 0x00000008
#define IA32_ARCH_CAP_SSB_NO 0x00000010
#define IA32_ARCH_CAP_MDS_NO 0x00000020
/*
* CPUID manufacturers identifiers

View File

@ -86,6 +86,7 @@ extern u_int max_apic_id;
extern int i386_read_exec;
extern int pti;
extern int hw_ibrs_active;
extern int hw_mds_disable;
extern int hw_ssb_active;
struct pcb;
@ -128,6 +129,7 @@ int isa_nmi(int cd);
void handle_ibrs_entry(void);
void handle_ibrs_exit(void);
void hw_ibrs_recalculate(void);
void hw_mds_recalculate(void);
void hw_ssb_recalculate(bool all_cpus);
void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame);
void nmi_call_kdb_smp(u_int type, struct trapframe *frame);

View File

@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/cpu.h>
#include <sys/domainset.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
@ -930,6 +931,203 @@ SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
hw_ssb_disable_handler, "I",
"Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
int hw_mds_disable;
/*
* Handler for Microarchitectural Data Sampling issues. Really not a
* pointer to C function: on amd64 the code must not change any CPU
* architectural state except possibly %rflags. Also, it is always
* called with interrupts disabled.
*/
void (*mds_handler)(void);
void mds_handler_void(void);
void mds_handler_verw(void);
void mds_handler_ivb(void);
void mds_handler_bdw(void);
void mds_handler_skl_sse(void);
void mds_handler_skl_avx(void);
void mds_handler_skl_avx512(void);
void mds_handler_silvermont(void);
static int
sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
{
const char *state;
if (mds_handler == mds_handler_void)
state = "inactive";
else if (mds_handler == mds_handler_verw)
state = "VERW";
else if (mds_handler == mds_handler_ivb)
state = "software IvyBridge";
else if (mds_handler == mds_handler_bdw)
state = "software Broadwell";
else if (mds_handler == mds_handler_skl_sse)
state = "software Skylake SSE";
else if (mds_handler == mds_handler_skl_avx)
state = "software Skylake AVX";
else if (mds_handler == mds_handler_skl_avx512)
state = "software Skylake AVX512";
else if (mds_handler == mds_handler_silvermont)
state = "software Silvermont";
else
state = "unknown";
return (SYSCTL_OUT(req, state, strlen(state)));
}
SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
sysctl_hw_mds_disable_state_handler, "A",
"Microarchitectural Data Sampling Mitigation state");
_Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
void
hw_mds_recalculate(void)
{
struct pcpu *pc;
vm_offset_t b64;
u_long xcr0;
int i;
/*
* Allow user to force VERW variant even if MD_CLEAR is not
* reported. For instance, hypervisor might unknowingly
* filter the cap out.
* For the similar reasons, and for testing, allow to enable
* mitigation even for RDCL_NO or MDS_NO caps.
*/
if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
((cpu_ia32_arch_caps & (IA32_ARCH_CAP_RDCL_NO |
IA32_ARCH_CAP_MDS_NO)) != 0 && hw_mds_disable == 3)) {
mds_handler = mds_handler_void;
} else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
hw_mds_disable == 3) || hw_mds_disable == 1) {
mds_handler = mds_handler_verw;
} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
(CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
CPUID_TO_MODEL(cpu_id) == 0x3a) &&
(hw_mds_disable == 2 || hw_mds_disable == 3)) {
/*
* Nehalem, SandyBridge, IvyBridge
*/
CPU_FOREACH(i) {
pc = pcpu_find(i);
if (pc->pc_mds_buf == NULL) {
pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
bzero(pc->pc_mds_buf, 16);
}
}
mds_handler = mds_handler_ivb;
} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
(CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
(hw_mds_disable == 2 || hw_mds_disable == 3)) {
/*
* Haswell, Broadwell
*/
CPU_FOREACH(i) {
pc = pcpu_find(i);
if (pc->pc_mds_buf == NULL) {
pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
bzero(pc->pc_mds_buf, 16);
}
}
mds_handler = mds_handler_bdw;
} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
CPUID_STEPPING) <= 5) ||
CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
(CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
CPUID_STEPPING) <= 0xb) ||
(CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
CPUID_STEPPING) <= 0xc)) &&
(hw_mds_disable == 2 || hw_mds_disable == 3)) {
/*
* Skylake, KabyLake, CoffeeLake, WhiskeyLake,
* CascadeLake
*/
CPU_FOREACH(i) {
pc = pcpu_find(i);
if (pc->pc_mds_buf == NULL) {
pc->pc_mds_buf = malloc_domainset(6 * 1024,
M_TEMP, DOMAINSET_PREF(pc->pc_domain),
M_WAITOK);
b64 = (vm_offset_t)malloc_domainset(64 + 63,
M_TEMP, DOMAINSET_PREF(pc->pc_domain),
M_WAITOK);
pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
bzero(pc->pc_mds_buf64, 64);
}
}
xcr0 = rxcr(0);
if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
(cpu_stdext_feature2 & CPUID_STDEXT_AVX512DQ) != 0)
mds_handler = mds_handler_skl_avx512;
else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
(cpu_feature2 & CPUID2_AVX) != 0)
mds_handler = mds_handler_skl_avx;
else
mds_handler = mds_handler_skl_sse;
} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
((CPUID_TO_MODEL(cpu_id) == 0x37 ||
CPUID_TO_MODEL(cpu_id) == 0x4a ||
CPUID_TO_MODEL(cpu_id) == 0x4c ||
CPUID_TO_MODEL(cpu_id) == 0x4d ||
CPUID_TO_MODEL(cpu_id) == 0x5a ||
CPUID_TO_MODEL(cpu_id) == 0x5d ||
CPUID_TO_MODEL(cpu_id) == 0x6e ||
CPUID_TO_MODEL(cpu_id) == 0x65 ||
CPUID_TO_MODEL(cpu_id) == 0x75 ||
CPUID_TO_MODEL(cpu_id) == 0x1c ||
CPUID_TO_MODEL(cpu_id) == 0x26 ||
CPUID_TO_MODEL(cpu_id) == 0x27 ||
CPUID_TO_MODEL(cpu_id) == 0x35 ||
CPUID_TO_MODEL(cpu_id) == 0x36 ||
CPUID_TO_MODEL(cpu_id) == 0x7a))) {
/* Silvermont, Airmont */
CPU_FOREACH(i) {
pc = pcpu_find(i);
if (pc->pc_mds_buf == NULL)
pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
}
mds_handler = mds_handler_silvermont;
} else {
hw_mds_disable = 0;
mds_handler = mds_handler_void;
}
}
static int
sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
{
int error, val;
val = hw_mds_disable;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (val < 0 || val > 3)
return (EINVAL);
hw_mds_disable = val;
hw_mds_recalculate();
return (0);
}
SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
sysctl_mds_disable_handler, "I",
"Microarchitectural Data Sampling Mitigation "
"(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO");
/*
* Enable and restore kernel text write permissions.
* Callers must ensure that disable_wp()/restore_wp() are executed