Local APIC: add support for extended LVT entries found in AMD processors

The extended LVT entries can be used to configure interrupt delivery
for various events that are internal to a processor and can use this
feature.

All current processors that support the feature have four of such entries.
The entries are all masked upon the processor reset, but it's possible
that firmware may use some of them.

BIOS and Kernel Developer's Guides for some processor models do not assign
any particular names to the extended LVTs, while other BKDGs provide names
and suggested usage for them.
However, there is no fixed mapping between the LVTs and the processor
events in any processor model that supports the feature.  Any entry can be
assigned to any event.  The assignment is done by programming an offset
of an entry into configuration bits corresponding to an event.

This change does not expose the flexibility that the feature offers.
The change adds just a single method to configure a hardcoded extended LVT
entry to deliver APIC_CMC_INT.  The method is designed to be used with
Machine Check Error Thresholding mechanism on supported processor models.

For references please see BKDGs for families 10h - 16h and specifically
descriptions of APIC30, APIC400, APIC[530:500] registers.
For a description of the Error Thresholding mechanism see, for example,
BKDG for family 10h, section 2.12.1.6.
http://developer.amd.com/resources/developer-guides-manuals/

Thanks to jhb and kib for their suggestions.

Reviewed by:	kib
Discussed with:	jhb
MFC after:	5 weeks
Relnotes:	maybe
Differential Revision: https://reviews.freebsd.org/D9612
This commit is contained in:
avg 2017-02-28 18:48:12 +00:00
parent 0a68260784
commit c81dcb14ca
3 changed files with 180 additions and 19 deletions

View File

@ -241,18 +241,33 @@ enum LAPIC_REGISTERS {
LAPIC_CCR_TIMER = 0x39,
LAPIC_DCR_TIMER = 0x3e,
LAPIC_SELF_IPI = 0x3f, /* Only in x2APIC */
LAPIC_EXT_FEATURES = 0x40, /* AMD */
LAPIC_EXT_CTRL = 0x41, /* AMD */
LAPIC_EXT_SEOI = 0x42, /* AMD */
LAPIC_EXT_IER0 = 0x48, /* AMD */
LAPIC_EXT_IER1 = 0x49, /* AMD */
LAPIC_EXT_IER2 = 0x4a, /* AMD */
LAPIC_EXT_IER3 = 0x4b, /* AMD */
LAPIC_EXT_IER4 = 0x4c, /* AMD */
LAPIC_EXT_IER5 = 0x4d, /* AMD */
LAPIC_EXT_IER6 = 0x4e, /* AMD */
LAPIC_EXT_IER7 = 0x4f, /* AMD */
LAPIC_EXT_LVT0 = 0x50, /* AMD */
LAPIC_EXT_LVT1 = 0x51, /* AMD */
LAPIC_EXT_LVT2 = 0x52, /* AMD */
LAPIC_EXT_LVT3 = 0x53, /* AMD */
};
/*
* The LAPIC_SELF_IPI register only exists in x2APIC mode. The
* formula below is applicable only to reserve the memory region,
* i.e. for xAPIC mode, where LAPIC_SELF_IPI finely serves as the
* address past end of the region.
*/
#define LAPIC_MEM_REGION (LAPIC_SELF_IPI * 0x10)
#define LAPIC_MEM_MUL 0x10
/*
* Although some registers are available on AMD processors only,
* it's not a big waste to reserve them on all platforms.
* However, we need to watch out for this space being assigned for
* non-APIC purposes in the future processor models.
*/
#define LAPIC_MEM_REGION ((LAPIC_EXT_LVT3 + 1) * LAPIC_MEM_MUL)
/******************************************************************************
* I/O APIC structure
*/
@ -295,6 +310,7 @@ typedef struct IOAPIC ioapic_t;
#define APIC_VER_MAXLVT 0x00ff0000
#define MAXLVTSHIFT 16
#define APIC_VER_EOI_SUPPRESSION 0x01000000
#define APIC_VER_AMD_EXT_SPACE 0x80000000
/* fields in LDR */
#define APIC_LDR_RESERVED 0x00ffffff
@ -418,6 +434,13 @@ typedef struct IOAPIC ioapic_t;
#define APIC_TDCR_128 0x0a
#define APIC_TDCR_1 0x0b
/* Constants related to AMD Extended APIC Features Register */
#define APIC_EXTF_ELVT_MASK 0x00ff0000
#define APIC_EXTF_ELVT_SHIFT 16
#define APIC_EXTF_EXTID_CAP 0x00000004
#define APIC_EXTF_SEIO_CAP 0x00000002
#define APIC_EXTF_IER_CAP 0x00000001
/* LVT table indices */
#define APIC_LVT_LINT0 0
#define APIC_LVT_LINT1 1
@ -428,6 +451,13 @@ typedef struct IOAPIC ioapic_t;
#define APIC_LVT_CMCI 6
#define APIC_LVT_MAX APIC_LVT_CMCI
/* AMD extended LVT constants, seem to be assigned by fiat */
#define APIC_ELVT_IBS 0 /* Instruction based sampling */
#define APIC_ELVT_MCA 1 /* MCE thresholding */
#define APIC_ELVT_DEI 2 /* Deferred error interrupt */
#define APIC_ELVT_SBI 3 /* Sideband interface */
#define APIC_ELVT_MAX APIC_ELVT_SBI
/******************************************************************************
* I/O APIC defines
*/

View File

@ -232,6 +232,9 @@ struct apic_ops {
/* CMC */
void (*enable_cmc)(void);
/* AMD ELVT */
int (*enable_mca_elvt)(void);
/* IPI */
void (*ipi_raw)(register_t, u_int);
void (*ipi_vectored)(u_int, int);
@ -396,6 +399,13 @@ lapic_enable_cmc(void)
apic_ops.enable_cmc();
}
static inline int
lapic_enable_mca_elvt(void)
{
return (apic_ops.enable_mca_elvt());
}
static inline void
lapic_ipi_raw(register_t icrlo, u_int dest)
{

View File

@ -122,6 +122,7 @@ struct lvt {
struct lapic {
struct lvt la_lvts[APIC_LVT_MAX + 1];
struct lvt la_elvts[APIC_ELVT_MAX + 1];;
u_int la_id:8;
u_int la_cluster:4;
u_int la_cluster_id:2;
@ -146,6 +147,14 @@ static struct lvt lvts[APIC_LVT_MAX + 1] = {
{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT }, /* CMCI */
};
/* Global defaults for AMD local APIC ELVT entries. */
static struct lvt elvts[APIC_ELVT_MAX + 1] = {
{ 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
{ 1, 1, 1, 0, APIC_LVT_DM_FIXED, APIC_CMC_INT },
{ 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
{ 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
};
static inthand_t *ioint_handlers[] = {
NULL, /* 0 - 31 */
IDTVEC(apic_isr1), /* 32 - 63 */
@ -319,6 +328,7 @@ static int native_lapic_enable_pmc(void);
static void native_lapic_disable_pmc(void);
static void native_lapic_reenable_pmc(void);
static void native_lapic_enable_cmc(void);
static int native_lapic_enable_mca_elvt(void);
static int native_lapic_set_lvt_mask(u_int apic_id, u_int lvt,
u_char masked);
static int native_lapic_set_lvt_mode(u_int apic_id, u_int lvt,
@ -357,6 +367,7 @@ struct apic_ops apic_ops = {
.disable_pmc = native_lapic_disable_pmc,
.reenable_pmc = native_lapic_reenable_pmc,
.enable_cmc = native_lapic_enable_cmc,
.enable_mca_elvt = native_lapic_enable_mca_elvt,
#ifdef SMP
.ipi_raw = native_lapic_ipi_raw,
.ipi_vectored = native_lapic_ipi_vectored,
@ -371,15 +382,8 @@ struct apic_ops apic_ops = {
};
static uint32_t
lvt_mode(struct lapic *la, u_int pin, uint32_t value)
lvt_mode_impl(struct lapic *la, struct lvt *lvt, u_int pin, uint32_t value)
{
struct lvt *lvt;
KASSERT(pin <= APIC_LVT_MAX, ("%s: pin %u out of range", __func__, pin));
if (la->la_lvts[pin].lvt_active)
lvt = &la->la_lvts[pin];
else
lvt = &lvts[pin];
value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
APIC_LVT_VECTOR);
@ -411,6 +415,38 @@ lvt_mode(struct lapic *la, u_int pin, uint32_t value)
return (value);
}
static uint32_t
lvt_mode(struct lapic *la, u_int pin, uint32_t value)
{
struct lvt *lvt;
KASSERT(pin <= APIC_LVT_MAX,
("%s: pin %u out of range", __func__, pin));
if (la->la_lvts[pin].lvt_active)
lvt = &la->la_lvts[pin];
else
lvt = &lvts[pin];
return (lvt_mode_impl(la, lvt, pin, value));
}
static uint32_t
elvt_mode(struct lapic *la, u_int idx, uint32_t value)
{
struct lvt *elvt;
KASSERT(idx <= APIC_ELVT_MAX,
("%s: idx %u out of range", __func__, idx));
elvt = &la->la_elvts[idx];
KASSERT(elvt->lvt_active, ("%s: ELVT%u is not active", __func__, idx));
KASSERT(elvt->lvt_edgetrigger,
("%s: ELVT%u is not edge triggered", __func__, idx));
KASSERT(elvt->lvt_activehi,
("%s: ELVT%u is not active high", __func__, idx));
return (lvt_mode_impl(la, elvt, idx, value));
}
/*
* Map the local APIC and setup necessary interrupt vectors.
*/
@ -583,6 +619,10 @@ native_lapic_create(u_int apic_id, int boot_cpu)
lapics[apic_id].la_lvts[i] = lvts[i];
lapics[apic_id].la_lvts[i].lvt_active = 0;
}
for (i = 0; i <= APIC_ELVT_MAX; i++) {
lapics[apic_id].la_elvts[i] = elvts[i];
lapics[apic_id].la_elvts[i].lvt_active = 0;
}
for (i = 0; i <= APIC_NUM_IOINTS; i++)
lapics[apic_id].la_ioint_irqs[i] = -1;
lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
@ -602,18 +642,49 @@ native_lapic_create(u_int apic_id, int boot_cpu)
#endif
}
static inline uint32_t
amd_read_ext_features(void)
{
uint32_t version;
if (cpu_vendor_id != CPU_VENDOR_AMD)
return (0);
version = lapic_read32(LAPIC_VERSION);
if ((version & APIC_VER_AMD_EXT_SPACE) != 0)
return (lapic_read32(LAPIC_EXT_FEATURES));
else
return (0);
}
static inline uint32_t
amd_read_elvt_count(void)
{
uint32_t extf;
uint32_t count;
extf = amd_read_ext_features();
count = (extf & APIC_EXTF_ELVT_MASK) >> APIC_EXTF_ELVT_SHIFT;
count = min(count, APIC_ELVT_MAX + 1);
return (count);
}
/*
* Dump contents of local APIC registers
*/
static void
native_lapic_dump(const char* str)
{
uint32_t version;
uint32_t maxlvt;
uint32_t extf;
int elvt_count;
int i;
maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
version = lapic_read32(LAPIC_VERSION);
maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
lapic_read32(LAPIC_ID), lapic_read32(LAPIC_VERSION),
lapic_read32(LAPIC_ID), version,
lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
if ((cpu_feature2 & CPUID2_X2APIC) != 0)
printf(" x2APIC: %d", x2apic_mode);
@ -628,6 +699,14 @@ native_lapic_dump(const char* str)
printf("\n");
if (maxlvt >= APIC_LVT_CMCI)
printf(" cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
extf = amd_read_ext_features();
if (extf != 0) {
printf(" AMD ext features: 0x%08x\n", extf);
elvt_count = amd_read_elvt_count();
for (i = 0; i < elvt_count; i++)
printf(" AMD elvt%d: 0x%08x\n", i,
lapic_read32(LAPIC_EXT_LVT0 + i));
}
}
static void
@ -645,15 +724,19 @@ static void
native_lapic_setup(int boot)
{
struct lapic *la;
uint32_t version;
uint32_t maxlvt;
register_t saveintr;
char buf[MAXCOMLEN + 1];
int elvt_count;
int i;
saveintr = intr_disable();
la = &lapics[lapic_id()];
KASSERT(la->la_present, ("missing APIC structure"));
maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
version = lapic_read32(LAPIC_VERSION);
maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
/* Initialize the TPR to allow all interrupts. */
lapic_set_tpr(0);
@ -718,6 +801,13 @@ native_lapic_setup(int boot)
lapic_read32(LAPIC_LVT_CMCI)));
}
elvt_count = amd_read_elvt_count();
for (i = 0; i < elvt_count; i++) {
if (la->la_elvts[i].lvt_active)
lapic_write32(LAPIC_EXT_LVT0 + i,
elvt_mode(la, i, lapic_read32(LAPIC_EXT_LVT0 + i)));
}
intr_restore(saveintr);
}
@ -1311,6 +1401,37 @@ native_lapic_enable_cmc(void)
printf("lapic%u: CMCI unmasked\n", apic_id);
}
static int
native_lapic_enable_mca_elvt(void)
{
u_int apic_id;
uint32_t value;
int elvt_count;
#ifdef DEV_ATPIC
if (lapic_map == NULL)
return (-1);
#endif
apic_id = PCPU_GET(apic_id);
KASSERT(lapics[apic_id].la_present,
("%s: missing APIC %u", __func__, apic_id));
elvt_count = amd_read_elvt_count();
if (elvt_count <= APIC_ELVT_MCA)
return (-1);
value = lapic_read32(LAPIC_EXT_LVT0 + APIC_ELVT_MCA);
if ((value & APIC_LVT_M) == 0) {
printf("AMD MCE Thresholding Extended LVT is already active\n");
return (-1);
}
lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_masked = 0;
lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_active = 1;
if (bootverbose)
printf("lapic%u: MCE Thresholding ELVT unmasked\n", apic_id);
return (APIC_ELVT_MCA);
}
void
lapic_handle_error(void)
{