2003-11-03 21:53:38 +00:00
|
|
|
/*-
|
2017-11-27 15:11:47 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2003-11-03 21:53:38 +00:00
|
|
|
* Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
2014-01-23 20:10:22 +00:00
|
|
|
#ifndef _X86_APICVAR_H_
|
|
|
|
#define _X86_APICVAR_H_
|
2003-11-03 21:53:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Local && I/O APIC variable definitions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Layout of local APIC interrupt vectors:
|
|
|
|
*
|
|
|
|
* 0xff (255) +-------------+
|
2003-11-14 22:21:30 +00:00
|
|
|
* | | 15 (Spurious / IPIs / Local Interrupts)
|
2003-11-03 21:53:38 +00:00
|
|
|
* 0xf0 (240) +-------------+
|
2004-12-23 19:47:59 +00:00
|
|
|
* | | 14 (I/O Interrupts / Timer)
|
2003-11-03 21:53:38 +00:00
|
|
|
* 0xe0 (224) +-------------+
|
2003-11-14 19:10:13 +00:00
|
|
|
* | | 13 (I/O Interrupts)
|
2003-11-03 21:53:38 +00:00
|
|
|
* 0xd0 (208) +-------------+
|
2003-11-14 19:10:13 +00:00
|
|
|
* | | 12 (I/O Interrupts)
|
2003-11-03 21:53:38 +00:00
|
|
|
* 0xc0 (192) +-------------+
|
|
|
|
* | | 11 (I/O Interrupts)
|
|
|
|
* 0xb0 (176) +-------------+
|
|
|
|
* | | 10 (I/O Interrupts)
|
|
|
|
* 0xa0 (160) +-------------+
|
|
|
|
* | | 9 (I/O Interrupts)
|
|
|
|
* 0x90 (144) +-------------+
|
|
|
|
* | | 8 (I/O Interrupts / System Calls)
|
|
|
|
* 0x80 (128) +-------------+
|
|
|
|
* | | 7 (I/O Interrupts)
|
|
|
|
* 0x70 (112) +-------------+
|
|
|
|
* | | 6 (I/O Interrupts)
|
|
|
|
* 0x60 (96) +-------------+
|
|
|
|
* | | 5 (I/O Interrupts)
|
|
|
|
* 0x50 (80) +-------------+
|
|
|
|
* | | 4 (I/O Interrupts)
|
|
|
|
* 0x40 (64) +-------------+
|
|
|
|
* | | 3 (I/O Interrupts)
|
|
|
|
* 0x30 (48) +-------------+
|
2003-11-14 19:10:13 +00:00
|
|
|
* | | 2 (ATPIC Interrupts)
|
2003-11-03 21:53:38 +00:00
|
|
|
* 0x20 (32) +-------------+
|
|
|
|
* | | 1 (Exceptions, traps, faults, etc.)
|
|
|
|
* 0x10 (16) +-------------+
|
|
|
|
* | | 0 (Exceptions, traps, faults, etc.)
|
|
|
|
* 0x00 (0) +-------------+
|
|
|
|
*
|
|
|
|
* Note: 0x80 needs to be handled specially and not allocated to an
|
|
|
|
* I/O device!
|
|
|
|
*/
|
|
|
|
|
2017-08-10 09:16:40 +00:00
|
|
|
#define xAPIC_MAX_APIC_ID 0xfe
|
|
|
|
#define xAPIC_ID_ALL 0xff
|
|
|
|
#define MAX_APIC_ID 0x200
|
|
|
|
#define APIC_ID_ALL 0xffffffff
|
|
|
|
|
|
|
|
#define IOAPIC_MAX_ID xAPIC_MAX_APIC_ID
|
2004-12-23 19:47:59 +00:00
|
|
|
|
|
|
|
/* I/O Interrupts are used for external devices such as ISA, PCI, etc. */
|
2003-11-14 19:10:13 +00:00
|
|
|
#define APIC_IO_INTS (IDT_IO_INTS + 16)
|
2004-12-23 19:47:59 +00:00
|
|
|
#define APIC_NUM_IOINTS 191
|
|
|
|
|
|
|
|
/* The timer interrupt is used for clock handling and drives hardclock, etc. */
|
|
|
|
#define APIC_TIMER_INT (APIC_IO_INTS + APIC_NUM_IOINTS)
|
2003-11-03 21:53:38 +00:00
|
|
|
|
2004-12-07 20:15:01 +00:00
|
|
|
/*
|
|
|
|
********************* !!! WARNING !!! ******************************
|
|
|
|
* Each local apic has an interrupt receive fifo that is two entries deep
|
|
|
|
* for each interrupt priority class (higher 4 bits of interrupt vector).
|
|
|
|
* Once the fifo is full the APIC can no longer receive interrupts for this
|
|
|
|
* class and sending IPIs from other CPUs will be blocked.
|
|
|
|
* To avoid deadlocks there should be no more than two IPI interrupts
|
|
|
|
* pending at the same time.
|
|
|
|
* Currently this is guaranteed by dividing the IPIs in two groups that have
|
|
|
|
* each at most one IPI interrupt pending. The first group is protected by the
|
|
|
|
* smp_ipi_mtx and waits for the completion of the IPI (Only one IPI user
|
|
|
|
* at a time) The second group uses a single interrupt and a bitmap to avoid
|
|
|
|
* redundant IPI interrupts.
|
|
|
|
*/
|
|
|
|
|
2004-12-23 19:47:59 +00:00
|
|
|
/* Interrupts for local APIC LVT entries other than the timer. */
|
2009-06-07 22:52:48 +00:00
|
|
|
#define APIC_LOCAL_INTS 240
|
|
|
|
#define APIC_ERROR_INT APIC_LOCAL_INTS
|
|
|
|
#define APIC_THERMAL_INT (APIC_LOCAL_INTS + 1)
|
2010-05-24 15:45:05 +00:00
|
|
|
#define APIC_CMC_INT (APIC_LOCAL_INTS + 2)
|
|
|
|
#define APIC_IPI_INTS (APIC_LOCAL_INTS + 3)
|
2009-06-07 22:52:48 +00:00
|
|
|
|
2004-12-07 20:15:01 +00:00
|
|
|
#define IPI_RENDEZVOUS (APIC_IPI_INTS) /* Inter-CPU rendezvous. */
|
2003-11-03 21:53:38 +00:00
|
|
|
#define IPI_INVLTLB (APIC_IPI_INTS + 1) /* TLB Shootdown IPIs */
|
|
|
|
#define IPI_INVLPG (APIC_IPI_INTS + 2)
|
|
|
|
#define IPI_INVLRNG (APIC_IPI_INTS + 3)
|
2006-05-01 21:36:47 +00:00
|
|
|
#define IPI_INVLCACHE (APIC_IPI_INTS + 4)
|
2004-12-07 20:15:01 +00:00
|
|
|
/* Vector to handle bitmap based IPIs */
|
2015-03-14 00:30:41 +00:00
|
|
|
#define IPI_BITMAP_VECTOR (APIC_IPI_INTS + 5)
|
2003-11-03 21:53:38 +00:00
|
|
|
|
2014-01-23 20:10:22 +00:00
|
|
|
/* IPIs handled by IPI_BITMAP_VECTOR */
|
2004-12-07 20:15:01 +00:00
|
|
|
#define IPI_AST 0 /* Generate software trap. */
|
2005-06-09 18:23:54 +00:00
|
|
|
#define IPI_PREEMPT 1
|
2014-01-23 20:10:22 +00:00
|
|
|
#define IPI_HARDCLOCK 2
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
#define IPI_BITMAP_LAST IPI_HARDCLOCK
|
2004-12-07 20:15:01 +00:00
|
|
|
#define IPI_IS_BITMAPED(x) ((x) <= IPI_BITMAP_LAST)
|
|
|
|
|
2015-03-14 00:30:41 +00:00
|
|
|
#define IPI_STOP (APIC_IPI_INTS + 6) /* Stop CPU until restarted. */
|
|
|
|
#define IPI_SUSPEND (APIC_IPI_INTS + 7) /* Suspend CPU until restarted. */
|
|
|
|
#define IPI_DYN_FIRST (APIC_IPI_INTS + 8)
|
2015-09-11 03:54:37 +00:00
|
|
|
#define IPI_DYN_LAST (253) /* IPIs allocated at runtime */
|
2015-03-01 02:31:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* IPI_STOP_HARD does not need to occupy a slot in the IPI vector space since
|
|
|
|
* it is delivered using an NMI anyways.
|
|
|
|
*/
|
2015-09-11 03:54:37 +00:00
|
|
|
#define IPI_NMI_FIRST 254
|
|
|
|
#define IPI_TRACE 254 /* Interrupt for tracing. */
|
2015-03-01 02:31:27 +00:00
|
|
|
#define IPI_STOP_HARD 255 /* Stop CPU with a NMI. */
|
2004-12-07 20:15:01 +00:00
|
|
|
|
2004-12-23 19:47:59 +00:00
|
|
|
/*
|
|
|
|
* The spurious interrupt can share the priority class with the IPIs since
|
2004-12-07 20:15:01 +00:00
|
|
|
* it is not a normal interrupt. (Does not use the APIC's interrupt fifo)
|
|
|
|
*/
|
2003-11-03 21:53:38 +00:00
|
|
|
#define APIC_SPURIOUS_INT 255
|
|
|
|
|
|
|
|
#ifndef LOCORE
|
|
|
|
|
|
|
|
#define APIC_IPI_DEST_SELF -1
|
|
|
|
#define APIC_IPI_DEST_ALL -2
|
|
|
|
#define APIC_IPI_DEST_OTHERS -3
|
|
|
|
|
2004-06-23 15:29:20 +00:00
|
|
|
#define APIC_BUS_UNKNOWN -1
|
|
|
|
#define APIC_BUS_ISA 0
|
|
|
|
#define APIC_BUS_EISA 1
|
|
|
|
#define APIC_BUS_PCI 2
|
|
|
|
#define APIC_BUS_MAX APIC_BUS_PCI
|
|
|
|
|
Dynamically allocate IRQ ranges on x86.
Previously, x86 used static ranges of IRQ values for different types
of I/O interrupts. Interrupt pins on I/O APICs and 8259A PICs used
IRQ values from 0 to 254. MSI interrupts used a compile-time-defined
range starting at 256, and Xen event channels used a
compile-time-defined range after MSI. Some recent systems have more
than 255 I/O APIC interrupt pins which resulted in those IRQ values
overflowing into the MSI range triggering an assertion failure.
Replace statically assigned ranges with dynamic ranges. Do a single
pass computing the sizes of the IRQ ranges (PICs, MSI, Xen) to
determine the total number of IRQs required. Allocate the interrupt
source and interrupt count arrays dynamically once this pass has
completed. To minimize runtime complexity these arrays are only sized
once during bootup. The PIC range is determined by the PICs present
in the system. The MSI and Xen ranges continue to use a fixed size,
though this does make it possible to turn the MSI range size into a
tunable in the future.
As a result, various places are updated to use dynamic limits instead
of constants. In addition, the vmstat(8) utility has been taught to
understand that some kernels may treat 'intrcnt' and 'intrnames' as
pointers rather than arrays when extracting interrupt stats from a
crashdump. This is determined by the presence (vs absence) of a
global 'nintrcnt' symbol.
This change reverts r189404 which worked around a buggy BIOS which
enumerated an I/O APIC twice (using the same memory mapped address for
both entries but using an IRQ base of 256 for one entry and a valid
IRQ base for the second entry). Making the "base" of MSI IRQ values
dynamic avoids the panic that r189404 worked around, and there may now
be valid I/O APICs with an IRQ base above 256 which this workaround
would incorrectly skip.
If in the future the issue reported in PR 130483 reoccurs, we will
have to add a pass over the I/O APIC entries in the MADT to detect
duplicates using the memory mapped address and use some strategy to
choose the "correct" one.
While here, reserve room in intrcnts for the Hyper-V counters.
PR: 229429, 130483
Reviewed by: kib, royger, cem
Tested by: royger (Xen), kib (DMAR)
Approved by: re (gjb)
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D16861
2018-08-28 21:09:19 +00:00
|
|
|
#define IRQ_EXTINT -1
|
|
|
|
#define IRQ_NMI -2
|
|
|
|
#define IRQ_SMI -3
|
|
|
|
#define IRQ_DISABLED -4
|
Use VT-d interrupt remapping block (IR) to perform FSB messages
translation. In particular, despite IO-APICs only take 8bit apic id,
IR translation structures accept 32bit APIC Id, which allows x2APIC
mode to function properly. Extend msi_cpu of struct msi_intrsrc and
io_cpu of ioapic_intsrc to full int from one byte.
KPI of IR is isolated into the x86/iommu/iommu_intrmap.h, to avoid
bringing all dmar headers into interrupt code. The non-PCI(e) devices
which generate message interrupts on FSB require special handling. The
HPET FSB interrupts are remapped, while DMAR interrupts are not.
For each msi and ioapic interrupt source, the iommu cookie is added,
which is in fact index of the IRE (interrupt remap entry) in the IR
table. Cookie is made at the source allocation time, and then used at
the map time to fill both IRE and device registers. The MSI
address/data registers and IO-APIC redirection registers are
programmed with the special values which are recognized by IR and used
to restore the IRE index, to find proper delivery mode and target.
Map all MSI interrupts in the block when msi_map() is called.
Since an interrupt source setup and dismantle code are done in the
non-sleepable context, flushing interrupt entries cache in the IR
hardware, which is done async and ideally waits for the interrupt,
requires busy-wait for queue to drain. The dmar_qi_wait_for_seq() is
modified to take a boolean argument requesting busy-wait for the
written sequence number instead of waiting for interrupt.
Some interrupts are configured before IR is initialized, e.g. ACPI
SCI. Add intr_reprogram() function to reprogram all already
configured interrupts, and call it immediately before an IR unit is
enabled. There is still a small window after the IO-APIC redirection
entry is reprogrammed with cookie but before the unit is enabled, but
to fix this properly, IR must be started much earlier.
Add workarounds for 5500 and X58 northbridges, some revisions of which
have severe flaws in handling IR. Use the same identification methods
as employed by Linux.
Review: https://reviews.freebsd.org/D1892
Reviewed by: neel
Discussed with: jhb
Tested by: glebius, pho (previous versions)
Sponsored by: The FreeBSD Foundation
MFC after: 3 weeks
2015-03-19 13:57:47 +00:00
|
|
|
|
2003-11-03 21:53:38 +00:00
|
|
|
/*
|
2018-02-23 18:15:50 +00:00
|
|
|
* An APIC enumerator is a pseudo bus driver that enumerates APIC's including
|
2003-11-03 21:53:38 +00:00
|
|
|
* CPU's and I/O APIC's.
|
|
|
|
*/
|
|
|
|
struct apic_enumerator {
|
|
|
|
const char *apic_name;
|
|
|
|
int (*apic_probe)(void);
|
|
|
|
int (*apic_probe_cpus)(void);
|
|
|
|
int (*apic_setup_local)(void);
|
|
|
|
int (*apic_setup_io)(void);
|
|
|
|
SLIST_ENTRY(apic_enumerator) apic_next;
|
|
|
|
};
|
|
|
|
|
|
|
|
inthand_t
|
|
|
|
IDTVEC(apic_isr1), IDTVEC(apic_isr2), IDTVEC(apic_isr3),
|
2003-11-14 19:10:13 +00:00
|
|
|
IDTVEC(apic_isr4), IDTVEC(apic_isr5), IDTVEC(apic_isr6),
|
2010-05-24 15:45:05 +00:00
|
|
|
IDTVEC(apic_isr7), IDTVEC(cmcint), IDTVEC(errorint),
|
PTI for amd64.
The implementation of the Kernel Page Table Isolation (KPTI) for
amd64, first version. It provides a workaround for the 'meltdown'
vulnerability. PTI is turned off by default for now, enable with the
loader tunable vm.pmap.pti=1.
The pmap page table is split into kernel-mode table and user-mode
table. Kernel-mode table is identical to the non-PTI table, while
usermode table is obtained from kernel table by leaving userspace
mappings intact, but only leaving the following parts of the kernel
mapped:
kernel text (but not modules text)
PCPU
GDT/IDT/user LDT/task structures
IST stacks for NMI and doublefault handlers.
Kernel switches to user page table before returning to usermode, and
restores full kernel page table on the entry. Initial kernel-mode
stack for PTI trampoline is allocated in PCPU, it is only 16
qwords. Kernel entry trampoline switches page tables. then the
hardware trap frame is copied to the normal kstack, and execution
continues.
IST stacks are kept mapped and no trampoline is needed for
NMI/doublefault, but of course page table switch is performed.
On return to usermode, the trampoline is used again, iret frame is
copied to the trampoline stack, page tables are switched and iretq is
executed. The case of iretq faulting due to the invalid usermode
context is tricky, since the frame for fault is appended to the
trampoline frame. Besides copying the fault frame and original
(corrupted) frame to kstack, the fault frame must be patched to make
it look as if the fault occured on the kstack, see the comment in
doret_iret detection code in trap().
Currently kernel pages which are mapped during trampoline operation
are identical for all pmaps. They are registered using
pmap_pti_add_kva(). Besides initial registrations done during boot,
LDT and non-common TSS segments are registered if user requested their
use. In principle, they can be installed into kernel page table per
pmap with some work. Similarly, PCPU can be hidden from userspace
mapping using trampoline PCPU page, but again I do not see much
benefits besides complexity.
PDPE pages for the kernel half of the user page tables are
pre-allocated during boot because we need to know pml4 entries which
are copied to the top-level paging structure page, in advance on a new
pmap creation. I enforce this to avoid iterating over the all
existing pmaps if a new PDPE page is needed for PTI kernel mappings.
The iteration is a known problematic operation on i386.
The need to flush hidden kernel translations on the switch to user
mode make global tables (PG_G) meaningless and even harming, so PG_G
use is disabled for PTI case. Our existing use of PCID is
incompatible with PTI and is automatically disabled if PTI is
enabled. PCID can be forced on only for developer's benefit.
MCE is known to be broken, it requires IST stack to operate completely
correctly even for non-PTI case, and absolutely needs dedicated IST
stack because MCE delivery while trampoline did not switched from PTI
stack is fatal. The fix is pending.
Reviewed by: markj (partially)
Tested by: pho (previous version)
Discussed with: jeff, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
2018-01-17 11:44:21 +00:00
|
|
|
IDTVEC(spuriousint), IDTVEC(timerint),
|
|
|
|
IDTVEC(apic_isr1_pti), IDTVEC(apic_isr2_pti), IDTVEC(apic_isr3_pti),
|
|
|
|
IDTVEC(apic_isr4_pti), IDTVEC(apic_isr5_pti), IDTVEC(apic_isr6_pti),
|
|
|
|
IDTVEC(apic_isr7_pti), IDTVEC(cmcint_pti), IDTVEC(errorint_pti),
|
|
|
|
IDTVEC(spuriousint_pti), IDTVEC(timerint_pti);
|
2003-11-03 21:53:38 +00:00
|
|
|
|
2007-03-20 21:53:31 +00:00
|
|
|
extern vm_paddr_t lapic_paddr;
|
2017-08-10 09:16:03 +00:00
|
|
|
extern int *apic_cpuids;
|
2009-01-29 09:22:56 +00:00
|
|
|
|
2003-11-03 21:53:38 +00:00
|
|
|
void apic_register_enumerator(struct apic_enumerator *enumerator);
|
2007-03-05 20:35:17 +00:00
|
|
|
void *ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase);
|
2003-11-03 21:53:38 +00:00
|
|
|
int ioapic_disable_pin(void *cookie, u_int pin);
|
|
|
|
int ioapic_get_vector(void *cookie, u_int pin);
|
|
|
|
void ioapic_register(void *cookie);
|
|
|
|
int ioapic_remap_vector(void *cookie, u_int pin, int vector);
|
2004-06-23 15:29:20 +00:00
|
|
|
int ioapic_set_bus(void *cookie, u_int pin, int bus_type);
|
2003-11-03 21:53:38 +00:00
|
|
|
int ioapic_set_extint(void *cookie, u_int pin);
|
|
|
|
int ioapic_set_nmi(void *cookie, u_int pin);
|
2004-05-04 20:39:24 +00:00
|
|
|
int ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol);
|
|
|
|
int ioapic_set_triggermode(void *cookie, u_int pin,
|
|
|
|
enum intr_trigger trigger);
|
2003-11-03 21:53:38 +00:00
|
|
|
int ioapic_set_smi(void *cookie, u_int pin);
|
2014-06-16 08:43:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Struct containing pointers to APIC functions whose
|
|
|
|
* implementation is run time selectable.
|
|
|
|
*/
|
|
|
|
struct apic_ops {
|
|
|
|
void (*create)(u_int, int);
|
|
|
|
void (*init)(vm_paddr_t);
|
2015-02-09 21:00:56 +00:00
|
|
|
void (*xapic_mode)(void);
|
2016-09-19 15:58:45 +00:00
|
|
|
bool (*is_x2apic)(void);
|
2014-06-16 08:43:03 +00:00
|
|
|
void (*setup)(int);
|
|
|
|
void (*dump)(const char *);
|
|
|
|
void (*disable)(void);
|
2017-03-27 12:34:29 +00:00
|
|
|
void (*eoi)(void);
|
2014-06-16 08:43:03 +00:00
|
|
|
int (*id)(void);
|
|
|
|
int (*intr_pending)(u_int);
|
|
|
|
void (*set_logical_id)(u_int, u_int, u_int);
|
|
|
|
u_int (*cpuid)(u_int);
|
|
|
|
|
|
|
|
/* Vectors */
|
|
|
|
u_int (*alloc_vector)(u_int, u_int);
|
|
|
|
u_int (*alloc_vectors)(u_int, u_int *, u_int, u_int);
|
|
|
|
void (*enable_vector)(u_int, u_int);
|
|
|
|
void (*disable_vector)(u_int, u_int);
|
|
|
|
void (*free_vector)(u_int, u_int, u_int);
|
|
|
|
|
|
|
|
|
|
|
|
/* PMC */
|
|
|
|
int (*enable_pmc)(void);
|
|
|
|
void (*disable_pmc)(void);
|
|
|
|
void (*reenable_pmc)(void);
|
|
|
|
|
|
|
|
/* CMC */
|
|
|
|
void (*enable_cmc)(void);
|
|
|
|
|
2017-02-28 18:48:12 +00:00
|
|
|
/* AMD ELVT */
|
|
|
|
int (*enable_mca_elvt)(void);
|
|
|
|
|
2014-06-16 08:43:03 +00:00
|
|
|
/* IPI */
|
|
|
|
void (*ipi_raw)(register_t, u_int);
|
|
|
|
void (*ipi_vectored)(u_int, int);
|
|
|
|
int (*ipi_wait)(int);
|
2015-03-14 00:30:41 +00:00
|
|
|
int (*ipi_alloc)(inthand_t *ipifunc);
|
|
|
|
void (*ipi_free)(int vector);
|
2014-06-16 08:43:03 +00:00
|
|
|
|
|
|
|
/* LVT */
|
|
|
|
int (*set_lvt_mask)(u_int, u_int, u_char);
|
|
|
|
int (*set_lvt_mode)(u_int, u_int, u_int32_t);
|
|
|
|
int (*set_lvt_polarity)(u_int, u_int, enum intr_polarity);
|
|
|
|
int (*set_lvt_triggermode)(u_int, u_int, enum intr_trigger);
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct apic_ops apic_ops;
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_create(u_int apic_id, int boot_cpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.create(apic_id, boot_cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_init(vm_paddr_t addr)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.init(addr);
|
|
|
|
}
|
|
|
|
|
2015-02-09 21:00:56 +00:00
|
|
|
static inline void
|
|
|
|
lapic_xapic_mode(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.xapic_mode();
|
|
|
|
}
|
|
|
|
|
2016-09-19 15:58:45 +00:00
|
|
|
static inline bool
|
|
|
|
lapic_is_x2apic(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.is_x2apic());
|
|
|
|
}
|
|
|
|
|
2014-06-16 08:43:03 +00:00
|
|
|
static inline void
|
|
|
|
lapic_setup(int boot)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.setup(boot);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_dump(const char *str)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.dump(str);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_disable(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.disable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2017-03-27 12:34:29 +00:00
|
|
|
lapic_eoi(void)
|
2014-06-16 08:43:03 +00:00
|
|
|
{
|
|
|
|
|
2017-03-27 12:34:29 +00:00
|
|
|
apic_ops.eoi();
|
2014-06-16 08:43:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lapic_id(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.id());
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lapic_intr_pending(u_int vector)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.intr_pending(vector));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: UNUSED */
|
|
|
|
static inline void
|
|
|
|
lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.set_logical_id(apic_id, cluster, cluster_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u_int
|
|
|
|
apic_cpuid(u_int apic_id)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.cpuid(apic_id));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u_int
|
|
|
|
apic_alloc_vector(u_int apic_id, u_int irq)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.alloc_vector(apic_id, irq));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u_int
|
|
|
|
apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.alloc_vectors(apic_id, irqs, count, align));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
apic_enable_vector(u_int apic_id, u_int vector)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.enable_vector(apic_id, vector);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
apic_disable_vector(u_int apic_id, u_int vector)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.disable_vector(apic_id, vector);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
apic_free_vector(u_int apic_id, u_int vector, u_int irq)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.free_vector(apic_id, vector, irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lapic_enable_pmc(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.enable_pmc());
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_disable_pmc(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.disable_pmc();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_reenable_pmc(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.reenable_pmc();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_enable_cmc(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.enable_cmc();
|
|
|
|
}
|
|
|
|
|
2017-02-28 18:48:12 +00:00
|
|
|
static inline int
|
|
|
|
lapic_enable_mca_elvt(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.enable_mca_elvt());
|
|
|
|
}
|
|
|
|
|
2014-06-16 08:43:03 +00:00
|
|
|
static inline void
|
|
|
|
lapic_ipi_raw(register_t icrlo, u_int dest)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.ipi_raw(icrlo, dest);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_ipi_vectored(u_int vector, int dest)
|
|
|
|
{
|
|
|
|
|
|
|
|
apic_ops.ipi_vectored(vector, dest);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lapic_ipi_wait(int delay)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.ipi_wait(delay));
|
|
|
|
}
|
|
|
|
|
2015-03-14 00:30:41 +00:00
|
|
|
static inline int
|
|
|
|
lapic_ipi_alloc(inthand_t *ipifunc)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.ipi_alloc(ipifunc));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
lapic_ipi_free(int vector)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.ipi_free(vector));
|
|
|
|
}
|
|
|
|
|
2014-06-16 08:43:03 +00:00
|
|
|
static inline int
|
|
|
|
lapic_set_lvt_mask(u_int apic_id, u_int lvt, u_char masked)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.set_lvt_mask(apic_id, lvt, masked));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.set_lvt_mode(apic_id, lvt, mode));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.set_lvt_polarity(apic_id, lvt, pol));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (apic_ops.set_lvt_triggermode(apic_id, lvt, trigger));
|
|
|
|
}
|
|
|
|
|
2010-05-24 15:45:05 +00:00
|
|
|
void lapic_handle_cmc(void);
|
2010-03-29 19:13:34 +00:00
|
|
|
void lapic_handle_error(void);
|
2006-12-17 05:07:01 +00:00
|
|
|
void lapic_handle_intr(int vector, struct trapframe *frame);
|
|
|
|
void lapic_handle_timer(struct trapframe *frame);
|
2003-11-03 21:53:38 +00:00
|
|
|
|
2017-09-08 19:39:20 +00:00
|
|
|
int ioapic_get_rid(u_int apic_id, uint16_t *ridp);
|
|
|
|
|
2015-02-09 21:00:56 +00:00
|
|
|
extern int x2apic_mode;
|
2015-02-26 11:02:40 +00:00
|
|
|
extern int lapic_eoi_suppression;
|
2015-02-09 21:00:56 +00:00
|
|
|
|
|
|
|
#ifdef _SYS_SYSCTL_H_
|
|
|
|
SYSCTL_DECL(_hw_apic);
|
|
|
|
#endif
|
|
|
|
|
2003-11-03 21:53:38 +00:00
|
|
|
#endif /* !LOCORE */
|
2014-01-23 20:10:22 +00:00
|
|
|
#endif /* _X86_APICVAR_H_ */
|