x86/xen: fallback when VCPUOP_send_nmi is not available
It has been reported that on some AWS instances VCPUOP_send_nmi returns -38 (ENOSYS). The hypercall is only available for HVM guests in Xen 4.7 and newer. Add a fallback to use the native NMI sending procedure when VCPUOP_send_nmi is not available, so that the NMI is not lost. Reported and Tested by: avg MFC after: 1 week Fixes: b2802351c162 ('xen: fix dispatching of NMIs') Sponsored by: Citrix Systems R&D
This commit is contained in:
parent
779fd05344
commit
ad15eeeaba
@ -107,6 +107,14 @@ static struct xen_ipi_handler xen_ipis[] =
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Save previous (native) handler as a fallback. Xen < 4.7 doesn't support
|
||||
* VCPUOP_send_nmi for HVM guests, and thus we need a fallback in that case:
|
||||
*
|
||||
* https://lists.freebsd.org/archives/freebsd-xen/2022-January/000032.html
|
||||
*/
|
||||
void (*native_ipi_vectored)(u_int, int);
|
||||
|
||||
/*------------------------------- Per-CPU Data -------------------------------*/
|
||||
#ifdef SMP
|
||||
DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
|
||||
@ -273,10 +281,11 @@ xen_pv_lapic_ipi_raw(register_t icrlo, u_int dest)
|
||||
}
|
||||
|
||||
#define PCPU_ID_GET(id, field) (pcpu_find(id)->pc_##field)
|
||||
static void
|
||||
static int
|
||||
send_nmi(int dest)
|
||||
{
|
||||
unsigned int cpu;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* NMIs are not routed over event channels, and instead delivered as on
|
||||
@ -286,24 +295,33 @@ send_nmi(int dest)
|
||||
*/
|
||||
switch(dest) {
|
||||
case APIC_IPI_DEST_SELF:
|
||||
HYPERVISOR_vcpu_op(VCPUOP_send_nmi, PCPU_GET(vcpu_id), NULL);
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, PCPU_GET(vcpu_id), NULL);
|
||||
break;
|
||||
case APIC_IPI_DEST_ALL:
|
||||
CPU_FOREACH(cpu)
|
||||
HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
|
||||
CPU_FOREACH(cpu) {
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
|
||||
PCPU_ID_GET(cpu, vcpu_id), NULL);
|
||||
if (rc != 0)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case APIC_IPI_DEST_OTHERS:
|
||||
CPU_FOREACH(cpu)
|
||||
if (cpu != PCPU_GET(cpuid))
|
||||
HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu != PCPU_GET(cpuid)) {
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
|
||||
PCPU_ID_GET(cpu, vcpu_id), NULL);
|
||||
if (rc != 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
|
||||
PCPU_ID_GET(apic_cpuid(dest), vcpu_id), NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
#undef PCPU_ID_GET
|
||||
|
||||
@ -312,9 +330,21 @@ xen_pv_lapic_ipi_vectored(u_int vector, int dest)
|
||||
{
|
||||
xen_intr_handle_t *ipi_handle;
|
||||
int ipi_idx, to_cpu, self;
|
||||
static bool pvnmi = true;
|
||||
|
||||
if (vector >= IPI_NMI_FIRST) {
|
||||
send_nmi(dest);
|
||||
if (pvnmi) {
|
||||
int rc = send_nmi(dest);
|
||||
|
||||
if (rc != 0) {
|
||||
printf(
|
||||
"Sending NMI using hypercall failed (%d) switching to APIC\n", rc);
|
||||
pvnmi = false;
|
||||
native_ipi_vectored(vector, dest);
|
||||
}
|
||||
} else
|
||||
native_ipi_vectored(vector, dest);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -580,8 +610,8 @@ xen_setup_cpus(void)
|
||||
xen_cpu_ipi_init(i);
|
||||
|
||||
/* Set the xen pv ipi ops to replace the native ones */
|
||||
if (xen_hvm_domain())
|
||||
apic_ops.ipi_vectored = xen_pv_lapic_ipi_vectored;
|
||||
native_ipi_vectored = apic_ops.ipi_vectored;
|
||||
apic_ops.ipi_vectored = xen_pv_lapic_ipi_vectored;
|
||||
}
|
||||
|
||||
/* Switch to using PV IPIs as soon as the vcpu_id is set. */
|
||||
|
Loading…
x
Reference in New Issue
Block a user