2015-12-07 17:41:20 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1995 Bruce D. Evans.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the author nor the names of contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _X86_X86_VAR_H_
|
|
|
|
#define _X86_X86_VAR_H_
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Miscellaneous machine-dependent declarations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern long Maxmem;
|
|
|
|
extern u_int basemem;
|
|
|
|
extern int busdma_swi_pending;
|
|
|
|
extern u_int cpu_exthigh;
|
|
|
|
extern u_int cpu_feature;
|
|
|
|
extern u_int cpu_feature2;
|
|
|
|
extern u_int amd_feature;
|
|
|
|
extern u_int amd_feature2;
|
2017-09-07 21:29:51 +00:00
|
|
|
extern u_int amd_rascap;
|
2015-12-07 17:41:20 +00:00
|
|
|
extern u_int amd_pminfo;
|
2017-09-20 18:30:37 +00:00
|
|
|
extern u_int amd_extended_feature_extensions;
|
2015-12-07 17:41:20 +00:00
|
|
|
extern u_int via_feature_rng;
|
|
|
|
extern u_int via_feature_xcrypt;
|
|
|
|
extern u_int cpu_clflush_line_size;
|
|
|
|
extern u_int cpu_stdext_feature;
|
|
|
|
extern u_int cpu_stdext_feature2;
|
2018-01-14 12:36:23 +00:00
|
|
|
extern u_int cpu_stdext_feature3;
|
|
|
|
extern uint64_t cpu_ia32_arch_caps;
|
2015-12-07 17:41:20 +00:00
|
|
|
extern u_int cpu_fxsr;
|
|
|
|
extern u_int cpu_high;
|
|
|
|
extern u_int cpu_id;
|
|
|
|
extern u_int cpu_max_ext_state_size;
|
|
|
|
extern u_int cpu_mxcsr_mask;
|
|
|
|
extern u_int cpu_procinfo;
|
|
|
|
extern u_int cpu_procinfo2;
|
|
|
|
extern char cpu_vendor[];
|
|
|
|
extern u_int cpu_vendor_id;
|
|
|
|
extern u_int cpu_mon_mwait_flags;
|
|
|
|
extern u_int cpu_mon_min_size;
|
|
|
|
extern u_int cpu_mon_max_size;
|
|
|
|
extern u_int cpu_maxphyaddr;
|
2019-10-18 02:18:17 +00:00
|
|
|
extern u_int cpu_power_eax;
|
|
|
|
extern u_int cpu_power_ebx;
|
|
|
|
extern u_int cpu_power_ecx;
|
|
|
|
extern u_int cpu_power_edx;
|
2019-05-17 17:21:32 +00:00
|
|
|
extern u_int hv_base;
|
2015-12-07 17:41:20 +00:00
|
|
|
extern u_int hv_high;
|
|
|
|
extern char hv_vendor[];
|
|
|
|
extern char kstack[];
|
|
|
|
extern char sigcode[];
|
|
|
|
extern int szsigcode;
|
|
|
|
extern int workaround_erratum383;
|
|
|
|
extern int _udatasel;
|
|
|
|
extern int _ucodesel;
|
|
|
|
extern int _ucode32sel;
|
|
|
|
extern int _ufssel;
|
|
|
|
extern int _ugssel;
|
|
|
|
extern int use_xsave;
|
|
|
|
extern uint64_t xsave_mask;
|
2017-08-10 09:15:18 +00:00
|
|
|
extern u_int max_apic_id;
|
2019-02-07 02:17:34 +00:00
|
|
|
extern int i386_read_exec;
|
2018-04-30 20:18:32 +00:00
|
|
|
extern int pti;
|
2020-02-25 17:26:10 +00:00
|
|
|
extern int hw_ibrs_ibpb_active;
|
2019-05-14 17:02:20 +00:00
|
|
|
extern int hw_mds_disable;
|
2018-05-21 21:08:19 +00:00
|
|
|
extern int hw_ssb_active;
|
2019-11-16 00:26:42 +00:00
|
|
|
extern int x86_taa_enable;
|
2020-05-20 22:00:31 +00:00
|
|
|
extern int cpu_flush_rsb_ctxsw;
|
2020-06-12 22:14:45 +00:00
|
|
|
extern int x86_rngds_mitg_enable;
|
2020-10-14 22:57:50 +00:00
|
|
|
extern int cpu_amdc1e_bug;
|
2021-02-08 11:28:36 +01:00
|
|
|
extern char bootmethod[16];
|
2015-12-07 17:41:20 +00:00
|
|
|
|
|
|
|
struct pcb;
|
|
|
|
struct thread;
|
|
|
|
struct reg;
|
|
|
|
struct fpreg;
|
|
|
|
struct dbreg;
|
|
|
|
struct dumperinfo;
|
Handle broadcast NMIs.
On several Intel chipsets, diagnostic NMIs sent from BMC or NMIs
reporting hardware errors are broadcasted to all CPUs.
When kernel is configured to enter kdb on NMI, the outcome is
problematic, because each CPU tries to enter kdb. All CPUs are
executing NMI handlers, which set the latches disabling the nested NMI
delivery; this means that stop_cpus_hard(), used by kdb_enter() to
stop other cpus by broadcasting IPI_STOP_HARD NMI, cannot work. One
indication of this is the harmless but annoying diagnostic "timeout
stopping cpus".
Much more harming behaviour is that because all CPUs try to enter kdb,
and if ddb is used as debugger, all CPUs issue prompt on console and
race for the input, not to mention the simultaneous use of the ddb
shared state.
Try to fix this by introducing a pseudo-lock for simultaneous attempts
to handle NMIs. If one core happens to enter NMI trap handler, other
cores see it and simulate reception of the IPI_STOP_HARD. More,
generic_stop_cpus() avoids sending IPI_STOP_HARD and avoids waiting
for the acknowledgement, relying on the nmi handler on other cores
suspending and then restarting the CPU.
Since it is impossible to detect at runtime whether some stray NMI is
broadcast or unicast, add a knob for administrator (really developer)
to configure debugging NMI handling mode.
The updated patch was debugged with the help from Andrey Gapon (avg)
and discussed with him.
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential revision: https://reviews.freebsd.org/D8249
2016-10-24 16:40:27 +00:00
|
|
|
struct trapframe;
|
2015-12-07 17:41:20 +00:00
|
|
|
|
2016-03-29 19:56:48 +00:00
|
|
|
/*
|
|
|
|
* The interface type of the interrupt handler entry point cannot be
|
|
|
|
* expressed in C. Use simplest non-variadic function type as an
|
|
|
|
* approximation.
|
|
|
|
*/
|
|
|
|
typedef void alias_for_inthand_t(void);
|
|
|
|
|
2018-06-25 11:01:12 +00:00
|
|
|
bool acpi_get_fadt_bootflags(uint16_t *flagsp);
|
2015-12-07 17:41:20 +00:00
|
|
|
void *alloc_fpusave(int flags);
|
|
|
|
void busdma_swi(void);
|
2020-08-18 11:36:38 +00:00
|
|
|
u_int cpu_auxmsr(void);
|
i386: Merge PAE and non-PAE pmaps into same kernel.
Effectively all i386 kernels now have two pmaps compiled in: one
managing PAE pagetables, and another non-PAE. The implementation is
selected at cold time depending on the CPU features. The vm_paddr_t is
always 64bit now. As result, nx bit can be used on all capable CPUs.
Option PAE only affects the bus_addr_t: it is still 32bit for non-PAE
configs, for drivers compatibility. Kernel layout, esp. max kernel
address, low memory PDEs and max user address (same as trampoline
start) are now same for PAE and for non-PAE regardless of the type of
page tables used.
Non-PAE kernel (when using PAE pagetables) can handle physical memory
up to 24G now, larger memory requires re-tuning the KVA consumers and
instead the code caps the maximum at 24G. Unfortunately, a lot of
drivers do not use busdma(9) properly so by default even 4G barrier is
not easy. There are two tunables added: hw.above4g_allow and
hw.above24g_allow, the first one is kept enabled for now to evaluate
the status on HEAD, second is only for dev use.
i386 now creates three freelists if there is any memory above 4G, to
allow proper bounce pages allocation. Also, VM_KMEM_SIZE_SCALE changed
from 3 to 1.
The PAE_TABLES kernel config option is retired.
In collaboarion with: pho
Discussed with: emaste
Reviewed by: markj
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D18894
2019-01-30 02:07:13 +00:00
|
|
|
vm_paddr_t cpu_getmaxphyaddr(void);
|
2015-12-07 17:41:20 +00:00
|
|
|
bool cpu_mwait_usable(void);
|
|
|
|
void cpu_probe_amdc1e(void);
|
|
|
|
void cpu_setregs(void);
|
2021-02-19 18:36:08 -04:00
|
|
|
int dbreg_set_watchpoint(vm_offset_t addr, vm_size_t size, int access);
|
2021-03-19 16:39:12 -03:00
|
|
|
int dbreg_clr_watchpoint(vm_offset_t addr, vm_size_t size);
|
|
|
|
void dbreg_list_watchpoints(void);
|
2021-04-10 02:19:23 +03:00
|
|
|
void x86_clear_dbregs(struct pcb *pcb);
|
2018-03-20 20:20:49 +00:00
|
|
|
bool disable_wp(void);
|
|
|
|
void restore_wp(bool old_wp);
|
2017-08-09 18:09:09 +00:00
|
|
|
void finishidentcpu(void);
|
2018-01-05 21:06:19 +00:00
|
|
|
void identify_cpu1(void);
|
|
|
|
void identify_cpu2(void);
|
2018-11-12 19:17:26 +00:00
|
|
|
void identify_cpu_fixup_bsp(void);
|
2017-08-05 06:56:46 +00:00
|
|
|
void identify_hypervisor(void);
|
2015-12-07 17:41:20 +00:00
|
|
|
void initializecpu(void);
|
|
|
|
void initializecpucache(void);
|
2016-04-12 13:30:39 +00:00
|
|
|
bool fix_cpuid(void);
|
2015-12-07 17:41:20 +00:00
|
|
|
void fillw(int /*u_short*/ pat, void *base, size_t cnt);
|
|
|
|
int is_physical_memory(vm_paddr_t addr);
|
|
|
|
int isa_nmi(int cd);
|
2018-01-31 14:36:27 +00:00
|
|
|
void handle_ibrs_entry(void);
|
|
|
|
void handle_ibrs_exit(void);
|
2020-02-25 17:26:10 +00:00
|
|
|
void hw_ibrs_recalculate(bool all_cpus);
|
2019-05-14 17:02:20 +00:00
|
|
|
void hw_mds_recalculate(void);
|
2018-05-21 21:08:19 +00:00
|
|
|
void hw_ssb_recalculate(bool all_cpus);
|
2019-11-16 00:26:42 +00:00
|
|
|
void x86_taa_recalculate(void);
|
2020-06-12 22:14:45 +00:00
|
|
|
void x86_rngds_mitg_recalculate(bool all_cpus);
|
2016-10-24 20:47:46 +00:00
|
|
|
void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame);
|
|
|
|
void nmi_call_kdb_smp(u_int type, struct trapframe *frame);
|
|
|
|
void nmi_handle_intr(u_int type, struct trapframe *frame);
|
2015-12-07 17:41:20 +00:00
|
|
|
void pagecopy(void *from, void *to);
|
|
|
|
void printcpuinfo(void);
|
2018-01-19 15:42:34 +00:00
|
|
|
int pti_get_default(void);
|
2018-05-22 00:45:00 +00:00
|
|
|
int user_dbreg_trap(register_t dr6);
|
2015-12-07 17:41:20 +00:00
|
|
|
int minidumpsys(struct dumperinfo *);
|
|
|
|
struct pcb *get_pcb_td(struct thread *td);
|
2021-08-12 11:45:25 +03:00
|
|
|
void x86_set_fork_retval(struct thread *td);
|
2021-07-28 10:12:00 -07:00
|
|
|
uint64_t rdtsc_ordered(void);
|
2015-12-07 17:41:20 +00:00
|
|
|
|
2021-08-02 22:52:26 +03:00
|
|
|
/*
|
|
|
|
* MSR ops for x86_msr_op()
|
|
|
|
*/
|
2019-11-18 20:53:57 +00:00
|
|
|
#define MSR_OP_ANDNOT 0x00000001
|
|
|
|
#define MSR_OP_OR 0x00000002
|
|
|
|
#define MSR_OP_WRITE 0x00000003
|
2021-08-02 22:52:26 +03:00
|
|
|
#define MSR_OP_READ 0x00000004
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Where and which execution mode
|
|
|
|
*/
|
2019-11-18 20:53:57 +00:00
|
|
|
#define MSR_OP_LOCAL 0x10000000
|
2021-08-02 22:52:26 +03:00
|
|
|
#define MSR_OP_SCHED_ALL 0x20000000
|
|
|
|
#define MSR_OP_SCHED_ONE 0x30000000
|
|
|
|
#define MSR_OP_RENDEZVOUS_ALL 0x40000000
|
|
|
|
#define MSR_OP_RENDEZVOUS_ONE 0x50000000
|
|
|
|
#define MSR_OP_CPUID(id) ((id) << 8)
|
|
|
|
|
|
|
|
void x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res);
|
2019-11-18 20:53:57 +00:00
|
|
|
|
2015-12-07 17:41:20 +00:00
|
|
|
#endif
|