Konstantin Belousov 32a1e9e4a5 Update print_INTEL_TLB() by the tag values from the Intel SDM
rev. 55.  The modern CPUs cache and TLB descriptions looked quite
questionable without the update, e.g. Haswell i7 4770S reported:
	Data TLB: 4 KB pages, 4-way set associative, 64 entries
	L2 cache: 256 kbytes, 8-way associative, 64 bytes/line
After the update, the report is:
	Data TLB: 1 GByte pages, 4-way set associative, 4 entries
	Data TLB: 4 KB pages, 4-way set associative, 64 entries
	Instruction TLB: 2M/4M pages, fully associative, 8 entries
	Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
	64-Byte prefetching
	Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
	L2 cache: 256 kbytes, 8-way associative, 64 bytes/line
Some tags were apparently removed from the table 3-21, Vol. 2A.  Keep
them around, but add a comment stating the removal.

Update the format line for cpu_stdext_feature according to the bits
from the SDM rev.55.  It appears that Haswells do not store %cs and
%ds values in the FPU save area.

Store content of the %ecx register from the CPUID leaf 0x7
subleaf 0 as cpu_stdext_feature2 and print defined bits from it,
again acording to SDM rev. 55.

Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
2015-06-06 22:03:24 +00:00

223 lines
6.8 KiB
C

/*-
* Copyright (c) KATO Takenori, 1997, 1998.
*
* All rights reserved. Unpublished rights reserved under the copyright
* laws of Japan.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer as
* the first lines of this file unmodified.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_cpu.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/pcpu.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
static int hw_instruction_sse;
SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
&hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
/*
* -1: automatic (default)
* 0: keep enable CLFLUSH
* 1: force disable CLFLUSH
*/
static int hw_clflush_disable = -1;
int cpu; /* Are we 386, 386sx, 486, etc? */
u_int cpu_feature; /* Feature flags */
u_int cpu_feature2; /* Feature flags */
u_int amd_feature; /* AMD feature flags */
u_int amd_feature2; /* AMD feature flags */
u_int amd_pminfo; /* AMD advanced power management info */
u_int via_feature_rng; /* VIA RNG features */
u_int via_feature_xcrypt; /* VIA ACE features */
u_int cpu_high; /* Highest arg to CPUID */
u_int cpu_exthigh; /* Highest arg to extended CPUID */
u_int cpu_id; /* Stepping ID */
u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */
u_int cpu_procinfo2; /* Multicore info */
char cpu_vendor[20]; /* CPU Origin code */
u_int cpu_vendor_id; /* CPU vendor ID */
u_int cpu_fxsr; /* SSE enabled */
u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */
u_int cpu_clflush_line_size = 32;
u_int cpu_stdext_feature;
u_int cpu_stdext_feature2;
u_int cpu_max_ext_state_size;
u_int cpu_mon_mwait_flags; /* MONITOR/MWAIT flags (CPUID.05H.ECX) */
u_int cpu_mon_min_size; /* MONITOR minimum range size, bytes */
u_int cpu_mon_max_size; /* MONITOR minimum range size, bytes */
u_int cpu_maxphyaddr; /* Max phys addr width in bits */
SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
&via_feature_rng, 0, "VIA RNG feature available in CPU");
SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
&via_feature_xcrypt, 0, "VIA xcrypt feature available in CPU");
static void
init_amd(void)
{
/*
* Work around Erratum 721 for Family 10h and 12h processors.
* These processors may incorrectly update the stack pointer
* after a long series of push and/or near-call instructions,
* or a long series of pop and/or near-return instructions.
*
* http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf
* http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf
*
* Hypervisors do not provide access to the errata MSR,
* causing #GP exception on attempt to apply the errata. The
* MSR write shall be done on host and persist globally
* anyway, so do not try to do it when under virtualization.
*/
switch (CPUID_TO_FAMILY(cpu_id)) {
case 0x10:
case 0x12:
if ((cpu_feature2 & CPUID2_HV) == 0)
wrmsr(0xc0011029, rdmsr(0xc0011029) | 1);
break;
}
}
/*
* Initialize special VIA features
*/
static void
init_via(void)
{
u_int regs[4], val;
/*
* Check extended CPUID for PadLock features.
*
* http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
*/
do_cpuid(0xc0000000, regs);
if (regs[0] >= 0xc0000001) {
do_cpuid(0xc0000001, regs);
val = regs[3];
} else
return;
/* Enable RNG if present. */
if ((val & VIA_CPUID_HAS_RNG) != 0) {
via_feature_rng = VIA_HAS_RNG;
wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
}
/* Enable PadLock if present. */
if ((val & VIA_CPUID_HAS_ACE) != 0)
via_feature_xcrypt |= VIA_HAS_AES;
if ((val & VIA_CPUID_HAS_ACE2) != 0)
via_feature_xcrypt |= VIA_HAS_AESCTR;
if ((val & VIA_CPUID_HAS_PHE) != 0)
via_feature_xcrypt |= VIA_HAS_SHA;
if ((val & VIA_CPUID_HAS_PMM) != 0)
via_feature_xcrypt |= VIA_HAS_MM;
if (via_feature_xcrypt != 0)
wrmsr(0x1107, rdmsr(0x1107) | (1 << 28));
}
/*
* Initialize CPU control registers
*/
void
initializecpu(void)
{
uint64_t msr;
uint32_t cr4;
cr4 = rcr4();
if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
cr4 |= CR4_FXSR | CR4_XMM;
cpu_fxsr = hw_instruction_sse = 1;
}
if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE)
cr4 |= CR4_FSGSBASE;
/*
* Postpone enabling the SMEP on the boot CPU until the page
* tables are switched from the boot loader identity mapping
* to the kernel tables. The boot loader enables the U bit in
* its tables.
*/
if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
cr4 |= CR4_SMEP;
load_cr4(cr4);
if ((amd_feature & AMDID_NX) != 0) {
msr = rdmsr(MSR_EFER) | EFER_NXE;
wrmsr(MSR_EFER, msr);
pg_nx = PG_NX;
}
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
init_amd();
break;
case CPU_VENDOR_CENTAUR:
init_via();
break;
}
}
void
initializecpucache(void)
{
/*
* CPUID with %eax = 1, %ebx returns
* Bits 15-8: CLFLUSH line size
* (Value * 8 = cache line size in bytes)
*/
if ((cpu_feature & CPUID_CLFSH) != 0)
cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
/*
* XXXKIB: (temporary) hack to work around traps generated
* when CLFLUSHing APIC register window under virtualization
* environments. These environments tend to disable the
* CPUID_SS feature even though the native CPU supports it.
*/
TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
cpu_feature &= ~CPUID_CLFSH;
/*
* Allow to disable CLFLUSH feature manually by
* hw.clflush_disable tunable.
*/
if (hw_clflush_disable == 1)
cpu_feature &= ~CPUID_CLFSH;
}