xen: update interface headers

This is a verbatim copy of the public headers from Xen 4.14.1.

No functional change intended.

Sponsored by: Citrix Systems R&D
This commit is contained in:
Roger Pau Monné 2020-12-30 12:31:12 +01:00
parent 8fc41ced9f
commit 5ed9deef6b
67 changed files with 8788 additions and 1205 deletions

View File

@ -61,15 +61,15 @@
*
* All memory which is shared with other entities in the system
* (including the hypervisor and other guests) must reside in memory
* which is mapped as Normal Inner-cacheable. This applies to:
* which is mapped as Normal Inner Write-Back Outer Write-Back Inner-Shareable.
* This applies to:
* - hypercall arguments passed via a pointer to guest memory.
* - memory shared via the grant table mechanism (including PV I/O
* rings etc).
* - memory shared with the hypervisor (struct shared_info, struct
* vcpu_info, the grant table, etc).
*
* Any Inner cache allocation strategy (Write-Back, Write-Through etc)
* is acceptable. There is no restriction on the Outer-cacheability.
* Any cache allocation hints are acceptable.
*/
/*
@ -173,7 +173,7 @@
typedef union { type *p; unsigned long q; } \
__guest_handle_ ## name; \
typedef union { type *p; uint64_aligned_t q; } \
__guest_handle_64_ ## name;
__guest_handle_64_ ## name
/*
* XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
@ -195,11 +195,22 @@
_sxghr_tmp->q = 0; \
_sxghr_tmp->p = val; \
} while ( 0 )
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
typedef uint64_t xen_pfn_t;
#define PRI_xen_pfn PRIx64
#define PRIu_xen_pfn PRIu64
/*
* Maximum number of virtual CPUs in legacy multi-processor guests.
* Only one. All other VCPUS must use VCPUOP_register_vcpu_info.
*/
#define XEN_LEGACY_MAX_VCPUS 1
typedef uint64_t xen_ulong_t;
#define PRI_xen_ulong PRIx64
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
# define __DECL_REG(n64, n32) union { \
@ -275,17 +286,6 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
#undef __DECL_REG
typedef uint64_t xen_pfn_t;
#define PRI_xen_pfn PRIx64
/* Maximum number of virtual CPUs in legacy multi-processor guests. */
/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
#define XEN_LEGACY_MAX_VCPUS 1
typedef uint64_t xen_ulong_t;
#define PRI_xen_ulong PRIx64
#if defined(__XEN__) || defined(__XEN_TOOLS__)
struct vcpu_guest_context {
#define _VGCF_online 0
#define VGCF_online (1<<_VGCF_online)
@ -293,7 +293,7 @@ struct vcpu_guest_context {
struct vcpu_guest_core_regs user_regs; /* Core CPU registers */
uint32_t sctlr;
uint64_t sctlr;
uint64_t ttbcr, ttbr0, ttbr1;
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
@ -306,10 +306,16 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
#define XEN_DOMCTL_CONFIG_GIC_NATIVE 0
#define XEN_DOMCTL_CONFIG_GIC_V2 1
#define XEN_DOMCTL_CONFIG_GIC_V3 2
#define XEN_DOMCTL_CONFIG_TEE_NONE 0
#define XEN_DOMCTL_CONFIG_TEE_OPTEE 1
struct xen_arch_domainconfig {
/* IN/OUT */
uint8_t gic_version;
/* IN */
uint16_t tee_type;
/* IN */
uint32_t nr_spis;
/*
* OUT
@ -376,7 +382,7 @@ typedef uint64_t xen_callback_t;
#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC)
#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)
#define SCTLR_GUEST_INIT 0x00c50078
#define SCTLR_GUEST_INIT xen_mk_ullong(0x00c50078)
/*
* Virtual machine platform (memory layout, interrupts)
@ -394,38 +400,45 @@ typedef uint64_t xen_callback_t;
*/
/* vGIC v2 mappings */
#define GUEST_GICD_BASE 0x03001000ULL
#define GUEST_GICD_SIZE 0x00001000ULL
#define GUEST_GICC_BASE 0x03002000ULL
#define GUEST_GICC_SIZE 0x00000100ULL
#define GUEST_GICD_BASE xen_mk_ullong(0x03001000)
#define GUEST_GICD_SIZE xen_mk_ullong(0x00001000)
#define GUEST_GICC_BASE xen_mk_ullong(0x03002000)
#define GUEST_GICC_SIZE xen_mk_ullong(0x00002000)
/* vGIC v3 mappings */
#define GUEST_GICV3_GICD_BASE 0x03001000ULL
#define GUEST_GICV3_GICD_SIZE 0x00010000ULL
#define GUEST_GICV3_GICD_BASE xen_mk_ullong(0x03001000)
#define GUEST_GICV3_GICD_SIZE xen_mk_ullong(0x00010000)
#define GUEST_GICV3_RDIST_STRIDE 0x20000ULL
#define GUEST_GICV3_RDIST_REGIONS 1
#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU127 */
#define GUEST_GICV3_GICR0_SIZE 0x01000000ULL
#define GUEST_GICV3_GICR0_BASE xen_mk_ullong(0x03020000) /* vCPU0..127 */
#define GUEST_GICV3_GICR0_SIZE xen_mk_ullong(0x01000000)
/* ACPI tables physical address */
#define GUEST_ACPI_BASE xen_mk_ullong(0x20000000)
#define GUEST_ACPI_SIZE xen_mk_ullong(0x02000000)
/* PL011 mappings */
#define GUEST_PL011_BASE xen_mk_ullong(0x22000000)
#define GUEST_PL011_SIZE xen_mk_ullong(0x00001000)
/*
* 16MB == 4096 pages reserved for guest to use as a region to map its
* grant table in.
*/
#define GUEST_GNTTAB_BASE 0x38000000ULL
#define GUEST_GNTTAB_SIZE 0x01000000ULL
#define GUEST_GNTTAB_BASE xen_mk_ullong(0x38000000)
#define GUEST_GNTTAB_SIZE xen_mk_ullong(0x01000000)
#define GUEST_MAGIC_BASE 0x39000000ULL
#define GUEST_MAGIC_SIZE 0x01000000ULL
#define GUEST_MAGIC_BASE xen_mk_ullong(0x39000000)
#define GUEST_MAGIC_SIZE xen_mk_ullong(0x01000000)
#define GUEST_RAM_BANKS 2
#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
#define GUEST_RAM0_SIZE 0xc0000000ULL
#define GUEST_RAM0_BASE xen_mk_ullong(0x40000000) /* 3GB of low RAM @ 1GB */
#define GUEST_RAM0_SIZE xen_mk_ullong(0xc0000000)
#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
#define GUEST_RAM1_SIZE 0xfe00000000ULL
#define GUEST_RAM1_BASE xen_mk_ullong(0x0200000000) /* 1016GB of RAM @ 8GB */
#define GUEST_RAM1_SIZE xen_mk_ullong(0xfe00000000)
#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */
/* Largest amount of actual RAM, not including holes */
@ -434,12 +447,17 @@ typedef uint64_t xen_callback_t;
#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
/* Current supported guest VCPUs */
#define GUEST_MAX_VCPUS 128
/* Interrupts */
#define GUEST_TIMER_VIRT_PPI 27
#define GUEST_TIMER_PHYS_S_PPI 29
#define GUEST_TIMER_PHYS_NS_PPI 30
#define GUEST_EVTCHN_PPI 31
#define GUEST_VPL011_SPI 32
/* PSCI functions */
#define PSCI_cpu_suspend 0
#define PSCI_cpu_off 1

View File

@ -0,0 +1,66 @@
/*
* smccc.h
*
* SMC/HVC interface in accordance with SMC Calling Convention.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright 2017 (C) EPAM Systems
*/
#ifndef __XEN_PUBLIC_ARCH_ARM_SMCCC_H__
#define __XEN_PUBLIC_ARCH_ARM_SMCCC_H__
#include "public/xen.h"
/*
* Hypervisor Service version.
*
* We can't use XEN version here, because of SMCCC requirements:
* Major revision should change every time SMC/HVC function is removed.
* Minor revision should change every time SMC/HVC function is added.
* So, it is SMCCC protocol revision code, not XEN version.
*
* Those values are subjected to change, when interface will be extended.
*/
#define XEN_SMCCC_MAJOR_REVISION 0
#define XEN_SMCCC_MINOR_REVISION 1
/* Hypervisor Service UID. Randomly generated with uuidgen. */
#define XEN_SMCCC_UID XEN_DEFINE_UUID(0xa71812dc, 0xc698, 0x4369, 0x9acf, \
0x79, 0xd1, 0x8d, 0xde, 0xe6, 0x67)
/* Standard Service Service Call version. */
#define SSSC_SMCCC_MAJOR_REVISION 0
#define SSSC_SMCCC_MINOR_REVISION 1
/* Standard Service Call UID. Randomly generated with uuidgen. */
#define SSSC_SMCCC_UID XEN_DEFINE_UUID(0xf863386f, 0x4b39, 0x4cbd, 0x9220,\
0xce, 0x16, 0x41, 0xe5, 0x9f, 0x6f)
#endif /* __XEN_PUBLIC_ARCH_ARM_SMCCC_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:b
*/

View File

@ -0,0 +1,295 @@
/*
* arch-x86/cpufeatureset.h
*
* CPU featureset definitions
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2015, 2016 Citrix Systems, Inc.
*/
/*
* There are two expected ways of including this header.
*
* 1) The "default" case (expected from tools etc).
*
* Simply #include <public/arch-x86/cpufeatureset.h>
*
* In this circumstance, normal header guards apply and the includer shall get
* an enumeration in the XEN_X86_FEATURE_xxx namespace.
*
* 2) The special case where the includer provides XEN_CPUFEATURE() in scope.
*
* In this case, no inclusion guards apply and the caller is responsible for
* their XEN_CPUFEATURE() being appropriate in the included context.
*/
#ifndef XEN_CPUFEATURE
/*
* Includer has not provided a custom XEN_CPUFEATURE(). Arrange for normal
* header guards, an enum and constants in the XEN_X86_FEATURE_xxx namespace.
*/
#ifndef __XEN_PUBLIC_ARCH_X86_CPUFEATURESET_H__
#define __XEN_PUBLIC_ARCH_X86_CPUFEATURESET_H__
#define XEN_CPUFEATURESET_DEFAULT_INCLUDE
#define XEN_CPUFEATURE(name, value) XEN_X86_FEATURE_##name = value,
enum {
#endif /* __XEN_PUBLIC_ARCH_X86_CPUFEATURESET_H__ */
#endif /* !XEN_CPUFEATURE */
#ifdef XEN_CPUFEATURE
/*
* A featureset is a bitmap of x86 features, represented as a collection of
* 32bit words.
*
* Words are as specified in vendors programming manuals, and shall not
* contain any synthesied values. New words may be added to the end of
* featureset.
*
* All featureset words currently originate from leaves specified for the
* CPUID instruction, but this is not preclude other sources of information.
*/
/*
* Attribute syntax:
*
* Attributes for a particular feature are provided as characters before the
* first space in the comment immediately following the feature value. Note -
* none of these attributes form part of the Xen public ABI.
*
* Special: '!'
* This bit has special properties and is not a straight indication of a
* piece of new functionality. Xen will handle these differently,
* and may override toolstack settings completely.
*
* Applicability to guests: 'A', 'S' or 'H'
* 'A' = All guests.
* 'S' = All HVM guests (not PV guests).
* 'H' = HVM HAP guests (not PV or HVM Shadow guests).
* Upper case => Available by default
* Lower case => Can be opted-in to, but not available by default.
*/
/* Intel-defined CPU features, CPUID level 0x00000001.edx, word 0 */
XEN_CPUFEATURE(FPU, 0*32+ 0) /*A Onboard FPU */
XEN_CPUFEATURE(VME, 0*32+ 1) /*S Virtual Mode Extensions */
XEN_CPUFEATURE(DE, 0*32+ 2) /*A Debugging Extensions */
XEN_CPUFEATURE(PSE, 0*32+ 3) /*S Page Size Extensions */
XEN_CPUFEATURE(TSC, 0*32+ 4) /*A Time Stamp Counter */
XEN_CPUFEATURE(MSR, 0*32+ 5) /*A Model-Specific Registers, RDMSR, WRMSR */
XEN_CPUFEATURE(PAE, 0*32+ 6) /*A Physical Address Extensions */
XEN_CPUFEATURE(MCE, 0*32+ 7) /*A Machine Check Architecture */
XEN_CPUFEATURE(CX8, 0*32+ 8) /*A CMPXCHG8 instruction */
XEN_CPUFEATURE(APIC, 0*32+ 9) /*!A Onboard APIC */
XEN_CPUFEATURE(SEP, 0*32+11) /*A SYSENTER/SYSEXIT */
XEN_CPUFEATURE(MTRR, 0*32+12) /*S Memory Type Range Registers */
XEN_CPUFEATURE(PGE, 0*32+13) /*S Page Global Enable */
XEN_CPUFEATURE(MCA, 0*32+14) /*A Machine Check Architecture */
XEN_CPUFEATURE(CMOV, 0*32+15) /*A CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
XEN_CPUFEATURE(PAT, 0*32+16) /*A Page Attribute Table */
XEN_CPUFEATURE(PSE36, 0*32+17) /*S 36-bit PSEs */
XEN_CPUFEATURE(CLFLUSH, 0*32+19) /*A CLFLUSH instruction */
XEN_CPUFEATURE(DS, 0*32+21) /* Debug Store */
XEN_CPUFEATURE(ACPI, 0*32+22) /*A ACPI via MSR */
XEN_CPUFEATURE(MMX, 0*32+23) /*A Multimedia Extensions */
XEN_CPUFEATURE(FXSR, 0*32+24) /*A FXSAVE and FXRSTOR instructions */
XEN_CPUFEATURE(SSE, 0*32+25) /*A Streaming SIMD Extensions */
XEN_CPUFEATURE(SSE2, 0*32+26) /*A Streaming SIMD Extensions-2 */
XEN_CPUFEATURE(SS, 0*32+27) /*A CPU self snoop */
XEN_CPUFEATURE(HTT, 0*32+28) /*!A Hyper-Threading Technology */
XEN_CPUFEATURE(TM1, 0*32+29) /* Thermal Monitor 1 */
XEN_CPUFEATURE(PBE, 0*32+31) /* Pending Break Enable */
/* Intel-defined CPU features, CPUID level 0x00000001.ecx, word 1 */
XEN_CPUFEATURE(SSE3, 1*32+ 0) /*A Streaming SIMD Extensions-3 */
XEN_CPUFEATURE(PCLMULQDQ, 1*32+ 1) /*A Carry-less multiplication */
XEN_CPUFEATURE(DTES64, 1*32+ 2) /* 64-bit Debug Store */
XEN_CPUFEATURE(MONITOR, 1*32+ 3) /* Monitor/Mwait support */
XEN_CPUFEATURE(DSCPL, 1*32+ 4) /* CPL Qualified Debug Store */
XEN_CPUFEATURE(VMX, 1*32+ 5) /*S Virtual Machine Extensions */
XEN_CPUFEATURE(SMX, 1*32+ 6) /* Safer Mode Extensions */
XEN_CPUFEATURE(EIST, 1*32+ 7) /* Enhanced SpeedStep */
XEN_CPUFEATURE(TM2, 1*32+ 8) /* Thermal Monitor 2 */
XEN_CPUFEATURE(SSSE3, 1*32+ 9) /*A Supplemental Streaming SIMD Extensions-3 */
XEN_CPUFEATURE(FMA, 1*32+12) /*A Fused Multiply Add */
XEN_CPUFEATURE(CX16, 1*32+13) /*A CMPXCHG16B */
XEN_CPUFEATURE(XTPR, 1*32+14) /* Send Task Priority Messages */
XEN_CPUFEATURE(PDCM, 1*32+15) /* Perf/Debug Capability MSR */
XEN_CPUFEATURE(PCID, 1*32+17) /*H Process Context ID */
XEN_CPUFEATURE(DCA, 1*32+18) /* Direct Cache Access */
XEN_CPUFEATURE(SSE4_1, 1*32+19) /*A Streaming SIMD Extensions 4.1 */
XEN_CPUFEATURE(SSE4_2, 1*32+20) /*A Streaming SIMD Extensions 4.2 */
XEN_CPUFEATURE(X2APIC, 1*32+21) /*!A Extended xAPIC */
XEN_CPUFEATURE(MOVBE, 1*32+22) /*A movbe instruction */
XEN_CPUFEATURE(POPCNT, 1*32+23) /*A POPCNT instruction */
XEN_CPUFEATURE(TSC_DEADLINE, 1*32+24) /*S TSC Deadline Timer */
XEN_CPUFEATURE(AESNI, 1*32+25) /*A AES instructions */
XEN_CPUFEATURE(XSAVE, 1*32+26) /*A XSAVE/XRSTOR/XSETBV/XGETBV */
XEN_CPUFEATURE(OSXSAVE, 1*32+27) /*! OSXSAVE */
XEN_CPUFEATURE(AVX, 1*32+28) /*A Advanced Vector Extensions */
XEN_CPUFEATURE(F16C, 1*32+29) /*A Half-precision convert instruction */
XEN_CPUFEATURE(RDRAND, 1*32+30) /*!A Digital Random Number Generator */
XEN_CPUFEATURE(HYPERVISOR, 1*32+31) /*!A Running under some hypervisor */
/* AMD-defined CPU features, CPUID level 0x80000001.edx, word 2 */
XEN_CPUFEATURE(SYSCALL, 2*32+11) /*A SYSCALL/SYSRET */
XEN_CPUFEATURE(NX, 2*32+20) /*A Execute Disable */
XEN_CPUFEATURE(MMXEXT, 2*32+22) /*A AMD MMX extensions */
XEN_CPUFEATURE(FFXSR, 2*32+25) /*A FFXSR instruction optimizations */
XEN_CPUFEATURE(PAGE1GB, 2*32+26) /*H 1Gb large page support */
XEN_CPUFEATURE(RDTSCP, 2*32+27) /*A RDTSCP */
XEN_CPUFEATURE(LM, 2*32+29) /*A Long Mode (x86-64) */
XEN_CPUFEATURE(3DNOWEXT, 2*32+30) /*A AMD 3DNow! extensions */
XEN_CPUFEATURE(3DNOW, 2*32+31) /*A 3DNow! */
/* AMD-defined CPU features, CPUID level 0x80000001.ecx, word 3 */
XEN_CPUFEATURE(LAHF_LM, 3*32+ 0) /*A LAHF/SAHF in long mode */
XEN_CPUFEATURE(CMP_LEGACY, 3*32+ 1) /*!A If yes HyperThreading not valid */
XEN_CPUFEATURE(SVM, 3*32+ 2) /*S Secure virtual machine */
XEN_CPUFEATURE(EXTAPIC, 3*32+ 3) /* Extended APIC space */
XEN_CPUFEATURE(CR8_LEGACY, 3*32+ 4) /*S CR8 in 32-bit mode */
XEN_CPUFEATURE(ABM, 3*32+ 5) /*A Advanced bit manipulation */
XEN_CPUFEATURE(SSE4A, 3*32+ 6) /*A SSE-4A */
XEN_CPUFEATURE(MISALIGNSSE, 3*32+ 7) /*A Misaligned SSE mode */
XEN_CPUFEATURE(3DNOWPREFETCH, 3*32+ 8) /*A 3DNow prefetch instructions */
XEN_CPUFEATURE(OSVW, 3*32+ 9) /* OS Visible Workaround */
XEN_CPUFEATURE(IBS, 3*32+10) /* Instruction Based Sampling */
XEN_CPUFEATURE(XOP, 3*32+11) /*A extended AVX instructions */
XEN_CPUFEATURE(SKINIT, 3*32+12) /* SKINIT/STGI instructions */
XEN_CPUFEATURE(WDT, 3*32+13) /* Watchdog timer */
XEN_CPUFEATURE(LWP, 3*32+15) /* Light Weight Profiling */
XEN_CPUFEATURE(FMA4, 3*32+16) /*A 4 operands MAC instructions */
XEN_CPUFEATURE(NODEID_MSR, 3*32+19) /* NodeId MSR */
XEN_CPUFEATURE(TBM, 3*32+21) /*A trailing bit manipulations */
XEN_CPUFEATURE(TOPOEXT, 3*32+22) /* topology extensions CPUID leafs */
XEN_CPUFEATURE(DBEXT, 3*32+26) /*A data breakpoint extension */
XEN_CPUFEATURE(MONITORX, 3*32+29) /* MONITOR extension (MONITORX/MWAITX) */
/* Intel-defined CPU features, CPUID level 0x0000000D:1.eax, word 4 */
XEN_CPUFEATURE(XSAVEOPT, 4*32+ 0) /*A XSAVEOPT instruction */
XEN_CPUFEATURE(XSAVEC, 4*32+ 1) /*A XSAVEC/XRSTORC instructions */
XEN_CPUFEATURE(XGETBV1, 4*32+ 2) /*A XGETBV with %ecx=1 */
XEN_CPUFEATURE(XSAVES, 4*32+ 3) /*S XSAVES/XRSTORS instructions */
/* Intel-defined CPU features, CPUID level 0x00000007:0.ebx, word 5 */
XEN_CPUFEATURE(FSGSBASE, 5*32+ 0) /*A {RD,WR}{FS,GS}BASE instructions */
XEN_CPUFEATURE(TSC_ADJUST, 5*32+ 1) /*S TSC_ADJUST MSR available */
XEN_CPUFEATURE(SGX, 5*32+ 2) /* Software Guard extensions */
XEN_CPUFEATURE(BMI1, 5*32+ 3) /*A 1st bit manipulation extensions */
XEN_CPUFEATURE(HLE, 5*32+ 4) /*A Hardware Lock Elision */
XEN_CPUFEATURE(AVX2, 5*32+ 5) /*A AVX2 instructions */
XEN_CPUFEATURE(FDP_EXCP_ONLY, 5*32+ 6) /*! x87 FDP only updated on exception. */
XEN_CPUFEATURE(SMEP, 5*32+ 7) /*S Supervisor Mode Execution Protection */
XEN_CPUFEATURE(BMI2, 5*32+ 8) /*A 2nd bit manipulation extensions */
XEN_CPUFEATURE(ERMS, 5*32+ 9) /*A Enhanced REP MOVSB/STOSB */
XEN_CPUFEATURE(INVPCID, 5*32+10) /*H Invalidate Process Context ID */
XEN_CPUFEATURE(RTM, 5*32+11) /*A Restricted Transactional Memory */
XEN_CPUFEATURE(PQM, 5*32+12) /* Platform QoS Monitoring */
XEN_CPUFEATURE(NO_FPU_SEL, 5*32+13) /*! FPU CS/DS stored as zero */
XEN_CPUFEATURE(MPX, 5*32+14) /*s Memory Protection Extensions */
XEN_CPUFEATURE(PQE, 5*32+15) /* Platform QoS Enforcement */
XEN_CPUFEATURE(AVX512F, 5*32+16) /*A AVX-512 Foundation Instructions */
XEN_CPUFEATURE(AVX512DQ, 5*32+17) /*A AVX-512 Doubleword & Quadword Instrs */
XEN_CPUFEATURE(RDSEED, 5*32+18) /*A RDSEED instruction */
XEN_CPUFEATURE(ADX, 5*32+19) /*A ADCX, ADOX instructions */
XEN_CPUFEATURE(SMAP, 5*32+20) /*S Supervisor Mode Access Prevention */
XEN_CPUFEATURE(AVX512_IFMA, 5*32+21) /*A AVX-512 Integer Fused Multiply Add */
XEN_CPUFEATURE(CLFLUSHOPT, 5*32+23) /*A CLFLUSHOPT instruction */
XEN_CPUFEATURE(CLWB, 5*32+24) /*A CLWB instruction */
XEN_CPUFEATURE(AVX512PF, 5*32+26) /*A AVX-512 Prefetch Instructions */
XEN_CPUFEATURE(AVX512ER, 5*32+27) /*A AVX-512 Exponent & Reciprocal Instrs */
XEN_CPUFEATURE(AVX512CD, 5*32+28) /*A AVX-512 Conflict Detection Instrs */
XEN_CPUFEATURE(SHA, 5*32+29) /*A SHA1 & SHA256 instructions */
XEN_CPUFEATURE(AVX512BW, 5*32+30) /*A AVX-512 Byte and Word Instructions */
XEN_CPUFEATURE(AVX512VL, 5*32+31) /*A AVX-512 Vector Length Extensions */
/* Intel-defined CPU features, CPUID level 0x00000007:0.ecx, word 6 */
XEN_CPUFEATURE(PREFETCHWT1, 6*32+ 0) /*A PREFETCHWT1 instruction */
XEN_CPUFEATURE(AVX512_VBMI, 6*32+ 1) /*A AVX-512 Vector Byte Manipulation Instrs */
XEN_CPUFEATURE(UMIP, 6*32+ 2) /*S User Mode Instruction Prevention */
XEN_CPUFEATURE(PKU, 6*32+ 3) /*H Protection Keys for Userspace */
XEN_CPUFEATURE(OSPKE, 6*32+ 4) /*! OS Protection Keys Enable */
XEN_CPUFEATURE(AVX512_VBMI2, 6*32+ 6) /*A Additional AVX-512 Vector Byte Manipulation Instrs */
XEN_CPUFEATURE(CET_SS, 6*32+ 7) /* CET - Shadow Stacks */
XEN_CPUFEATURE(GFNI, 6*32+ 8) /*A Galois Field Instrs */
XEN_CPUFEATURE(VAES, 6*32+ 9) /*A Vector AES Instrs */
XEN_CPUFEATURE(VPCLMULQDQ, 6*32+10) /*A Vector Carry-less Multiplication Instrs */
XEN_CPUFEATURE(AVX512_VNNI, 6*32+11) /*A Vector Neural Network Instrs */
XEN_CPUFEATURE(AVX512_BITALG, 6*32+12) /*A Support for VPOPCNT[B,W] and VPSHUFBITQMB */
XEN_CPUFEATURE(AVX512_VPOPCNTDQ, 6*32+14) /*A POPCNT for vectors of DW/QW */
XEN_CPUFEATURE(TSXLDTRK, 6*32+16) /*a TSX load tracking suspend/resume insns */
XEN_CPUFEATURE(RDPID, 6*32+22) /*A RDPID instruction */
XEN_CPUFEATURE(CLDEMOTE, 6*32+25) /*A CLDEMOTE instruction */
XEN_CPUFEATURE(MOVDIRI, 6*32+27) /*a MOVDIRI instruction */
XEN_CPUFEATURE(MOVDIR64B, 6*32+28) /*a MOVDIR64B instruction */
XEN_CPUFEATURE(ENQCMD, 6*32+29) /* ENQCMD{,S} instructions */
/* AMD-defined CPU features, CPUID level 0x80000007.edx, word 7 */
XEN_CPUFEATURE(ITSC, 7*32+ 8) /* Invariant TSC */
XEN_CPUFEATURE(EFRO, 7*32+10) /* APERF/MPERF Read Only interface */
/* AMD-defined CPU features, CPUID level 0x80000008.ebx, word 8 */
XEN_CPUFEATURE(CLZERO, 8*32+ 0) /*A CLZERO instruction */
XEN_CPUFEATURE(RSTR_FP_ERR_PTRS, 8*32+ 2) /*A (F)X{SAVE,RSTOR} always saves/restores FPU Error pointers */
XEN_CPUFEATURE(WBNOINVD, 8*32+ 9) /* WBNOINVD instruction */
XEN_CPUFEATURE(IBPB, 8*32+12) /*A IBPB support only (no IBRS, used by AMD) */
XEN_CPUFEATURE(AMD_PPIN, 8*32+23) /* Protected Processor Inventory Number */
/* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A AVX512 Neural Network Instructions */
XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */
XEN_CPUFEATURE(SRBDS_CTRL, 9*32+ 9) /* MSR_MCU_OPT_CTRL and RNGDS_MITG_DIS. */
XEN_CPUFEATURE(MD_CLEAR, 9*32+10) /*A VERW clears microarchitectural buffers */
XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
XEN_CPUFEATURE(SERIALIZE, 9*32+14) /*a SERIALIZE insn */
XEN_CPUFEATURE(CET_IBT, 9*32+20) /* CET - Indirect Branch Tracking */
XEN_CPUFEATURE(IBRSB, 9*32+26) /*A IBRS and IBPB support (used by Intel) */
XEN_CPUFEATURE(STIBP, 9*32+27) /*A STIBP */
XEN_CPUFEATURE(L1D_FLUSH, 9*32+28) /*S MSR_FLUSH_CMD and L1D flush. */
XEN_CPUFEATURE(ARCH_CAPS, 9*32+29) /*a IA32_ARCH_CAPABILITIES MSR */
XEN_CPUFEATURE(CORE_CAPS, 9*32+30) /* IA32_CORE_CAPABILITIES MSR */
XEN_CPUFEATURE(SSBD, 9*32+31) /*A MSR_SPEC_CTRL.SSBD available */
/* Intel-defined CPU features, CPUID level 0x00000007:1.eax, word 10 */
XEN_CPUFEATURE(AVX512_BF16, 10*32+ 5) /*A AVX512 BFloat16 Instructions */
#endif /* XEN_CPUFEATURE */
/* Clean up from a default include. Close the enum (for C). */
#ifdef XEN_CPUFEATURESET_DEFAULT_INCLUDE
#undef XEN_CPUFEATURESET_DEFAULT_INCLUDE
#undef XEN_CPUFEATURE
};
#endif /* XEN_CPUFEATURESET_DEFAULT_INCLUDE */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -1,8 +1,8 @@
/******************************************************************************
* arch-x86/cpuid.h
*
*
* CPUID interface to Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -20,9 +20,9 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*
* Copyright (c) 2007 Citrix Systems, Inc.
*
*
* Authors:
* Keir Fraser <keir@xen.org>
*/
@ -73,18 +73,46 @@
#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
/*
* Leaf 4 (0x40000x03)
* Sub-leaf 0: EAX: bit 0: emulated tsc
* bit 1: host tsc is known to be reliable
* bit 2: RDTSCP instruction available
* EBX: tsc_mode: 0=default (emulate if necessary), 1=emulate,
* 2=no emulation, 3=no emulation + TSC_AUX support
* ECX: guest tsc frequency in kHz
* EDX: guest tsc incarnation (migration count)
* Sub-leaf 1: EAX: tsc offset low part
* EBX: tsc offset high part
* ECX: multiplicator for tsc->ns conversion
* EDX: shift amount for tsc->ns conversion
* Sub-leaf 2: EAX: host tsc frequency in kHz
*/
/*
* Leaf 5 (0x40000x04)
* HVM-specific features
* EAX: Features
* EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
* Sub-leaf 0: EAX: Features
* Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
* Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag)
*/
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */
/* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
#define XEN_CPUID_MAX_NUM_LEAVES 4
/*
* Leaf 6 (0x40000x05)
* PV-specific parameters
* Sub-leaf 0: EAX: max available sub-leaf
* Sub-leaf 0: EBX: bits 0-7: max machine address width
*/
/* Max. address width in bits taking memory hotplug into account. */
#define XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK (0xffu << 0)
#define XEN_CPUID_MAX_NUM_LEAVES 5
#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */

Binary file not shown.

View File

@ -1,7 +1,7 @@
/*
/*
* Structure definitions for HVM state that is held by Xen and must
* be saved along with the domain's memory and device-model state.
*
*
* Copyright (c) 2007 XenSource Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@ -26,8 +26,10 @@
#ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
#define __XEN_PUBLIC_HVM_SAVE_X86_H__
/*
* Save/restore header: general info about the save file.
#include "../../xen.h"
/*
* Save/restore header: general info about the save file.
*/
#define HVM_FILE_MAGIC 0x54381286
@ -43,10 +45,13 @@ struct hvm_save_header {
DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
/*
* Processor
*
* Compat: Pre-3.4 didn't have msr_tsc_aux
* Compat:
* - Pre-3.4 didn't have msr_tsc_aux
* - Pre-4.7 didn't have fpu_initialised
*/
struct hvm_hw_cpu {
@ -82,7 +87,7 @@ struct hvm_hw_cpu {
uint64_t dr2;
uint64_t dr3;
uint64_t dr6;
uint64_t dr7;
uint64_t dr7;
uint32_t cs_sel;
uint32_t ds_sel;
@ -132,7 +137,7 @@ struct hvm_hw_cpu {
uint64_t shadow_gs;
/* msr content saved/restored. */
uint64_t msr_flags;
uint64_t msr_flags; /* Obsolete, ignored. */
uint64_t msr_lstar;
uint64_t msr_star;
uint64_t msr_cstar;
@ -156,6 +161,11 @@ struct hvm_hw_cpu {
};
/* error code for pending event */
uint32_t error_code;
#define _XEN_X86_FPU_INITIALISED 0
#define XEN_X86_FPU_INITIALISED (1U<<_XEN_X86_FPU_INITIALISED)
uint32_t flags;
uint32_t pad0;
};
struct hvm_hw_cpu_compat {
@ -191,7 +201,7 @@ struct hvm_hw_cpu_compat {
uint64_t dr2;
uint64_t dr3;
uint64_t dr6;
uint64_t dr7;
uint64_t dr7;
uint32_t cs_sel;
uint32_t ds_sel;
@ -241,7 +251,7 @@ struct hvm_hw_cpu_compat {
uint64_t shadow_gs;
/* msr content saved/restored. */
uint64_t msr_flags;
uint64_t msr_flags; /* Obsolete, ignored. */
uint64_t msr_lstar;
uint64_t msr_star;
uint64_t msr_cstar;
@ -267,18 +277,26 @@ struct hvm_hw_cpu_compat {
uint32_t error_code;
};
static inline int _hvm_hw_fix_cpu(void *h) {
static inline int _hvm_hw_fix_cpu(void *h, uint32_t size) {
union hvm_hw_cpu_union {
struct hvm_hw_cpu nat;
struct hvm_hw_cpu_compat cmp;
} *ucpu = (union hvm_hw_cpu_union *)h;
/* If we copy from the end backwards, we should
* be able to do the modification in-place */
ucpu->nat.error_code = ucpu->cmp.error_code;
ucpu->nat.pending_event = ucpu->cmp.pending_event;
ucpu->nat.tsc = ucpu->cmp.tsc;
ucpu->nat.msr_tsc_aux = 0;
if ( size == sizeof(struct hvm_hw_cpu_compat) )
{
/*
* If we copy from the end backwards, we should
* be able to do the modification in-place.
*/
ucpu->nat.error_code = ucpu->cmp.error_code;
ucpu->nat.pending_event = ucpu->cmp.pending_event;
ucpu->nat.tsc = ucpu->cmp.tsc;
ucpu->nat.msr_tsc_aux = 0;
}
/* Mimic the old behaviour by unconditionally setting fpu_initialised. */
ucpu->nat.flags = XEN_X86_FPU_INITIALISED;
return 0;
}
@ -340,37 +358,50 @@ struct hvm_hw_vpic {
DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
/*
* IO-APIC
*/
#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
struct hvm_hw_vioapic {
uint64_t base_address;
uint32_t ioregsel;
uint32_t id;
union vioapic_redir_entry
{
uint64_t bits;
struct {
uint8_t vector;
uint8_t delivery_mode:3;
uint8_t dest_mode:1;
uint8_t delivery_status:1;
uint8_t polarity:1;
uint8_t remote_irr:1;
uint8_t trig_mode:1;
uint8_t mask:1;
uint8_t reserve:7;
uint8_t reserved[4];
uint8_t dest_id;
} fields;
} redirtbl[VIOAPIC_NUM_PINS];
union vioapic_redir_entry
{
uint64_t bits;
struct {
uint8_t vector;
uint8_t delivery_mode:3;
uint8_t dest_mode:1;
uint8_t delivery_status:1;
uint8_t polarity:1;
uint8_t remote_irr:1;
uint8_t trig_mode:1;
uint8_t mask:1;
uint8_t reserve:7;
uint8_t reserved[4];
uint8_t dest_id;
} fields;
};
#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
#define XEN_HVM_VIOAPIC(name, cnt) \
struct name { \
uint64_t base_address; \
uint32_t ioregsel; \
uint32_t id; \
union vioapic_redir_entry redirtbl[cnt]; \
}
XEN_HVM_VIOAPIC(hvm_hw_vioapic, VIOAPIC_NUM_PINS);
#ifndef __XEN__
#undef XEN_HVM_VIOAPIC
#else
#undef VIOAPIC_NUM_PINS
#endif
DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
/*
* LAPIC
*/
@ -390,6 +421,7 @@ struct hvm_hw_lapic_regs {
DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
/*
* IRQs
*/
@ -433,7 +465,7 @@ struct hvm_hw_pci_link {
DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
/*
/*
* PIT
*/
@ -458,9 +490,10 @@ struct hvm_hw_pit {
DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
/*
/*
* RTC
*/
*/
#define RTC_CMOS_SIZE 14
struct hvm_hw_rtc {
@ -469,10 +502,13 @@ struct hvm_hw_rtc {
/* Index register for 2-part operations */
uint8_t cmos_index;
uint8_t pad0;
/* RTC offset from host time */
int64_t rtc_offset;
};
DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
/*
* HPET
*/
@ -502,6 +538,7 @@ struct hvm_hw_hpet {
DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
/*
* PM timer
*/
@ -542,12 +579,11 @@ struct hvm_hw_cpu_xsave {
struct {
struct { char x[512]; } fpu_sse;
struct {
struct hvm_hw_cpu_xsave_hdr {
uint64_t xstate_bv; /* Updated by XRSTOR */
uint64_t reserved[7];
uint64_t xcomp_bv; /* Updated by XRSTOR{C,S} */
uint64_t reserved[6];
} xsave_hdr; /* The 64-byte header */
struct { char x[0]; } ymm; /* YMM */
} save_area;
};
@ -567,7 +603,13 @@ struct hvm_viridian_domain_context {
DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context);
struct hvm_viridian_vcpu_context {
uint64_t apic_assist;
uint64_t vp_assist_msr;
uint8_t apic_assist_pending;
uint8_t _pad[7];
uint64_t simp_msr;
uint64_t sint_msr[16];
uint64_t stimer_config_msr[4];
uint64_t stimer_count_msr[4];
};
DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
@ -576,6 +618,7 @@ struct hvm_vmce_vcpu {
uint64_t caps;
uint64_t mci_ctl2_bank0;
uint64_t mci_ctl2_bank1;
uint64_t mcg_ext_ctl;
};
DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
@ -586,24 +629,21 @@ struct hvm_tsc_adjust {
DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
struct hvm_msr {
uint32_t count;
struct hvm_one_msr {
uint32_t index;
uint32_t _rsvd;
uint64_t val;
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
} msr[];
#elif defined(__GNUC__)
} msr[0];
#else
} msr[1 /* variable size */];
#endif
} msr[XEN_FLEX_ARRAY_DIM];
};
#define CPU_MSR_CODE 20
/*
/* Range 22 - 34 (inclusive) reserved for Amazon */
/*
* Largest type-code in use
*/
#define HVM_SAVE_CODE_MAX 20

View File

@ -35,11 +35,7 @@ struct xen_pmu_amd_ctxt {
uint32_t ctrls;
/* Counter MSRs */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint64_t regs[];
#elif defined(__GNUC__)
uint64_t regs[0];
#endif
uint64_t regs[XEN_FLEX_ARRAY_DIM];
};
typedef struct xen_pmu_amd_ctxt xen_pmu_amd_ctxt_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_amd_ctxt_t);
@ -71,11 +67,7 @@ struct xen_pmu_intel_ctxt {
uint64_t debugctl;
/* Fixed and architectural counter MSRs */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint64_t regs[];
#elif defined(__GNUC__)
uint64_t regs[0];
#endif
uint64_t regs[XEN_FLEX_ARRAY_DIM];
};
typedef struct xen_pmu_intel_ctxt xen_pmu_intel_ctxt_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_intel_ctxt_t);
@ -164,3 +156,4 @@ DEFINE_XEN_GUEST_HANDLE(xen_pmu_arch_t);
* indent-tabs-mode: nil
* End:
*/

View File

@ -1,11 +1,11 @@
/******************************************************************************
* arch-x86/mca.h
*
*
* Contributed by Advanced Micro Devices, Inc.
* Author: Christoph Egger <Christoph.Egger@amd.com>
*
* Guest OS machine check interface to x86 Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -88,6 +88,9 @@
#define XEN_MC_NOTDELIVERED 0x10
/* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */
/* Applicable to all mc_vcpuid fields below. */
#define XEN_MC_VCPUID_INVALID 0xffff
#ifndef __ASSEMBLY__
#define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */
@ -110,6 +113,7 @@ struct mcinfo_common {
uint16_t size; /* size of this struct in bytes */
};
#define MC_FLAG_CORRECTABLE (1 << 0)
#define MC_FLAG_UNCORRECTABLE (1 << 1)
#define MC_FLAG_RECOVERABLE (1 << 2)
@ -147,13 +151,14 @@ struct mcinfo_bank {
uint64_t mc_tsc;
};
struct mcinfo_msr {
uint64_t reg; /* MSR */
uint64_t value; /* MSR value */
};
/* contains mc information from other
* or additional mc MSRs */
* or additional mc MSRs */
struct mcinfo_extended {
struct mcinfo_common common;
@ -165,9 +170,9 @@ struct mcinfo_extended {
/*
* Currently Intel extended MSR (32/64) include all gp registers
* and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be
* useful at present. So expand this array to 16/32 to leave room.
* useful at present. So expand this array to 32 to leave room.
*/
struct mcinfo_msr mc_msr[sizeof(void *) * 4];
struct mcinfo_msr mc_msr[32];
};
/* Recovery Action flags. Giving recovery result information to DOM0 */
@ -190,10 +195,10 @@ struct mcinfo_extended {
/* L3 cache disable Action */
#define MC_ACTION_CACHE_SHRINK (0x1 << 2)
/* Below interface used between XEN/DOM0 for passing XEN's recovery action
* information to DOM0.
/* Below interface used between XEN/DOM0 for passing XEN's recovery action
* information to DOM0.
* usage Senario: After offlining broken page, XEN might pass its page offline
* recovery action result to DOM0. DOM0 will save the information in
* recovery action result to DOM0. DOM0 will save the information in
* non-volatile memory for further proactive actions, such as offlining the
* easy broken page earlier when doing next reboot.
*/
@ -226,6 +231,7 @@ struct mcinfo_recovery
} action_info;
};
#define MCINFO_HYPERCALLSIZE 1024
#define MCINFO_MAXSIZE 768
@ -240,7 +246,9 @@ typedef struct mc_info mc_info_t;
DEFINE_XEN_GUEST_HANDLE(mc_info_t);
#define __MC_MSR_ARRAYSIZE 8
#if __XEN_INTERFACE_VERSION__ <= 0x00040d00
#define __MC_NMSRS 1
#endif
#define MC_NCAPS 7 /* 7 CPU feature flag words */
#define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */
#define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */
@ -251,8 +259,8 @@ DEFINE_XEN_GUEST_HANDLE(mc_info_t);
#define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */
struct mcinfo_logical_cpu {
uint32_t mc_cpunr;
uint32_t mc_chipid;
uint32_t mc_cpunr;
uint32_t mc_chipid;
uint16_t mc_coreid;
uint16_t mc_threadid;
uint32_t mc_apicid;
@ -276,7 +284,8 @@ struct mcinfo_logical_cpu {
typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t;
DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t);
/*
/*
* OS's should use these instead of writing their own lookup function
* each with its own bugs and drawbacks.
* We use macros instead of static inline functions to allow guests
@ -307,8 +316,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t);
struct mcinfo_common *_mic; \
\
found = 0; \
(_ret) = NULL; \
if (_mi == NULL) break; \
(_ret) = NULL; \
if (_mi == NULL) break; \
_mic = x86_mcinfo_first(_mi); \
for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \
if (_mic->type == (_type)) { \
@ -320,6 +329,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t);
(_ret) = found ? _mic : NULL; \
} while (0)
/* Usecase 1
* Register machine check trap callback handler
* (already done via "set_trap_table" hypercall)
@ -339,8 +349,8 @@ struct xen_mc_fetch {
/* IN/OUT variables. */
uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT,
XEN_MC_ACK if ack'ing an earlier fetch */
/* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED,
XEN_MC_NODATA, XEN_MC_NOMATCH */
/* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED,
XEN_MC_NODATA, XEN_MC_NOMATCH */
uint32_t _pad0;
uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */
@ -350,6 +360,7 @@ struct xen_mc_fetch {
typedef struct xen_mc_fetch xen_mc_fetch_t;
DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t);
/* Usecase 4
* This tells the hypervisor to notify a DomU about the machine check error
*/
@ -371,30 +382,33 @@ DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t);
#define XEN_MC_physcpuinfo 3
struct xen_mc_physcpuinfo {
/* IN/OUT */
uint32_t ncpus;
uint32_t _pad0;
/* OUT */
XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info;
/* IN/OUT */
uint32_t ncpus;
uint32_t _pad0;
/* OUT */
XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info;
};
#define XEN_MC_msrinject 4
#define MC_MSRINJ_MAXMSRS 8
struct xen_mc_msrinject {
/* IN */
uint32_t mcinj_cpunr; /* target processor id */
uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */
uint32_t mcinj_count; /* 0 .. count-1 in array are valid */
uint32_t _pad0;
struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS];
/* IN */
uint32_t mcinj_cpunr; /* target processor id */
uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */
uint32_t mcinj_count; /* 0 .. count-1 in array are valid */
domid_t mcinj_domid; /* valid only if MC_MSRINJ_F_GPADDR is
present in mcinj_flags */
uint16_t _pad0;
struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS];
};
/* Flags for mcinj_flags above; bits 16-31 are reserved */
#define MC_MSRINJ_F_INTERPOSE 0x1
#define MC_MSRINJ_F_GPADDR 0x2
#define XEN_MC_mceinject 5
struct xen_mc_mceinject {
unsigned int mceinj_cpunr; /* target processor id */
unsigned int mceinj_cpunr; /* target processor id */
};
#if defined(__XEN__) || defined(__XEN_TOOLS__)
@ -402,12 +416,13 @@ struct xen_mc_mceinject {
#define XEN_MC_INJECT_TYPE_MASK 0x7
#define XEN_MC_INJECT_TYPE_MCE 0x0
#define XEN_MC_INJECT_TYPE_CMCI 0x1
#define XEN_MC_INJECT_TYPE_LMCE 0x2
#define XEN_MC_INJECT_CPU_BROADCAST 0x8
struct xen_mc_inject_v2 {
uint32_t flags;
struct xenctl_bitmap cpumap;
uint32_t flags;
struct xenctl_bitmap cpumap;
};
#endif

View File

@ -1,8 +1,8 @@
/******************************************************************************
* xen-x86_32.h
*
*
* Guest OS interface to x86 32-bit Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -58,34 +58,31 @@
#define __HYPERVISOR_VIRT_START_PAE 0xF5800000
#define __MACH2PHYS_VIRT_START_PAE 0xF5800000
#define __MACH2PHYS_VIRT_END_PAE 0xF6800000
#define HYPERVISOR_VIRT_START_PAE \
mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
#define MACH2PHYS_VIRT_START_PAE \
mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
#define MACH2PHYS_VIRT_END_PAE \
mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
#define HYPERVISOR_VIRT_START_PAE xen_mk_ulong(__HYPERVISOR_VIRT_START_PAE)
#define MACH2PHYS_VIRT_START_PAE xen_mk_ulong(__MACH2PHYS_VIRT_START_PAE)
#define MACH2PHYS_VIRT_END_PAE xen_mk_ulong(__MACH2PHYS_VIRT_END_PAE)
/* Non-PAE bounds are obsolete. */
#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
#define HYPERVISOR_VIRT_START_NONPAE \
mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
xen_mk_ulong(__HYPERVISOR_VIRT_START_NONPAE)
#define MACH2PHYS_VIRT_START_NONPAE \
mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
xen_mk_ulong(__MACH2PHYS_VIRT_START_NONPAE)
#define MACH2PHYS_VIRT_END_NONPAE \
mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
xen_mk_ulong(__MACH2PHYS_VIRT_END_NONPAE)
#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
#ifndef HYPERVISOR_VIRT_START
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
#endif
#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
#ifndef machine_to_phys_mapping
#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
@ -112,22 +109,44 @@
#ifndef __ASSEMBLY__
#if defined(XEN_GENERATING_COMPAT_HEADERS)
/* nothing */
#elif defined(__XEN__) || defined(__XEN_TOOLS__)
/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax). */
#define __DECL_REG_LO8(which) union { \
uint32_t e ## which ## x; \
uint16_t which ## x; \
struct { \
uint8_t which ## l; \
uint8_t which ## h; \
}; \
}
#define __DECL_REG_LO16(name) union { \
uint32_t e ## name, _e ## name; \
uint16_t name; \
}
#else
/* Other sources must always use the proper 32-bit name (e.g., eax). */
#define __DECL_REG_LO8(which) uint32_t e ## which ## x
#define __DECL_REG_LO16(name) uint32_t e ## name
#endif
struct cpu_user_regs {
uint32_t ebx;
uint32_t ecx;
uint32_t edx;
uint32_t esi;
uint32_t edi;
uint32_t ebp;
uint32_t eax;
__DECL_REG_LO8(b);
__DECL_REG_LO8(c);
__DECL_REG_LO8(d);
__DECL_REG_LO16(si);
__DECL_REG_LO16(di);
__DECL_REG_LO16(bp);
__DECL_REG_LO8(a);
uint16_t error_code; /* private */
uint16_t entry_vector; /* private */
uint32_t eip;
__DECL_REG_LO16(ip);
uint16_t cs;
uint8_t saved_upcall_mask;
uint8_t _pad0;
uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
uint32_t esp;
__DECL_REG_LO16(flags); /* eflags.IF == !saved_upcall_mask */
__DECL_REG_LO16(sp);
uint16_t ss, _pad1;
uint16_t es, _pad2;
uint16_t ds, _pad3;
@ -137,6 +156,9 @@ struct cpu_user_regs {
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
#undef __DECL_REG_LO8
#undef __DECL_REG_LO16
/*
* Page-directory addresses above 4GB do not fit into architectural %cr3.
* When accessing %cr3, or equivalent field in vcpu_guest_context, guests

View File

@ -1,8 +1,8 @@
/******************************************************************************
* xen-x86_64.h
*
*
* Guest OS interface to x86 64-bit Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -44,11 +44,11 @@
*/
#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
#define FLAT_RING3_CS64 0xe033 /* GDT index 262 */
#define FLAT_RING3_DS32 0xe02b /* GDT index 261 */
#define FLAT_RING3_DS64 0x0000 /* NULL selector */
#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
#define FLAT_RING3_SS32 0xe02b /* GDT index 261 */
#define FLAT_RING3_SS64 0xe02b /* GDT index 261 */
#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
@ -76,12 +76,12 @@
#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
#ifndef HYPERVISOR_VIRT_START
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
#define HYPERVISOR_VIRT_END xen_mk_ulong(__HYPERVISOR_VIRT_END)
#endif
#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
#ifndef machine_to_phys_mapping
#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
@ -130,7 +130,35 @@ struct iret_context {
/* Bottom of iret stack frame. */
};
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */
#define __DECL_REG_LOHI(which) union { \
uint64_t r ## which ## x; \
uint32_t e ## which ## x; \
uint16_t which ## x; \
struct { \
uint8_t which ## l; \
uint8_t which ## h; \
}; \
}
#define __DECL_REG_LO8(name) union { \
uint64_t r ## name; \
uint32_t e ## name; \
uint16_t name; \
uint8_t name ## l; \
}
#define __DECL_REG_LO16(name) union { \
uint64_t r ## name; \
uint32_t e ## name; \
uint16_t name; \
}
#define __DECL_REG_HI(num) union { \
uint64_t r ## num; \
uint32_t r ## num ## d; \
uint16_t r ## num ## w; \
uint8_t r ## num ## b; \
}
#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
#define __DECL_REG(name) union { \
uint64_t r ## name, e ## name; \
@ -141,40 +169,51 @@ struct iret_context {
#define __DECL_REG(name) uint64_t r ## name
#endif
#ifndef __DECL_REG_LOHI
#define __DECL_REG_LOHI(name) __DECL_REG(name ## x)
#define __DECL_REG_LO8 __DECL_REG
#define __DECL_REG_LO16 __DECL_REG
#define __DECL_REG_HI(num) uint64_t r ## num
#endif
struct cpu_user_regs {
uint64_t r15;
uint64_t r14;
uint64_t r13;
uint64_t r12;
__DECL_REG(bp);
__DECL_REG(bx);
uint64_t r11;
uint64_t r10;
uint64_t r9;
uint64_t r8;
__DECL_REG(ax);
__DECL_REG(cx);
__DECL_REG(dx);
__DECL_REG(si);
__DECL_REG(di);
__DECL_REG_HI(15);
__DECL_REG_HI(14);
__DECL_REG_HI(13);
__DECL_REG_HI(12);
__DECL_REG_LO8(bp);
__DECL_REG_LOHI(b);
__DECL_REG_HI(11);
__DECL_REG_HI(10);
__DECL_REG_HI(9);
__DECL_REG_HI(8);
__DECL_REG_LOHI(a);
__DECL_REG_LOHI(c);
__DECL_REG_LOHI(d);
__DECL_REG_LO8(si);
__DECL_REG_LO8(di);
uint32_t error_code; /* private */
uint32_t entry_vector; /* private */
__DECL_REG(ip);
__DECL_REG_LO16(ip);
uint16_t cs, _pad0[1];
uint8_t saved_upcall_mask;
uint8_t _pad1[3];
__DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
__DECL_REG(sp);
__DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */
__DECL_REG_LO8(sp);
uint16_t ss, _pad2[3];
uint16_t es, _pad3[3];
uint16_t ds, _pad4[3];
uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
uint16_t fs, _pad5[3];
uint16_t gs, _pad6[3];
};
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
#undef __DECL_REG
#undef __DECL_REG_LOHI
#undef __DECL_REG_LO8
#undef __DECL_REG_LO16
#undef __DECL_REG_HI
#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)

View File

@ -1,8 +1,8 @@
/******************************************************************************
* arch-x86/xen.h
*
*
* Guest OS interface to x86 Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -54,13 +54,20 @@
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#if defined(__i386__)
# ifdef __XEN__
__DeFiNe__ __DECL_REG_LO8(which) uint32_t e ## which ## x
__DeFiNe__ __DECL_REG_LO16(name) union { uint32_t e ## name; }
# endif
#include "xen-x86_32.h"
# ifdef __XEN__
__UnDeF__ __DECL_REG_LO8
__UnDeF__ __DECL_REG_LO16
__DeFiNe__ __DECL_REG_LO8(which) e ## which ## x
__DeFiNe__ __DECL_REG_LO16(name) e ## name
# endif
#elif defined(__x86_64__)
#include "xen-x86_64.h"
#endif
@ -68,6 +75,7 @@
#ifndef __ASSEMBLY__
typedef unsigned long xen_pfn_t;
#define PRI_xen_pfn "lx"
#define PRIu_xen_pfn "lu"
#endif
#define XEN_HAVE_PV_GUEST_ENTRY 1
@ -94,6 +102,7 @@ typedef unsigned long xen_pfn_t;
#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
/*
* ` enum neg_errnoval
* ` HYPERVISOR_update_descriptor(u64 pa, u64 desc);
@ -151,17 +160,15 @@ DEFINE_XEN_GUEST_HANDLE(trap_info_t);
typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
/*
* The following is all CPU context. Note that the fpu_ctxt block is filled
* The following is all CPU context. Note that the fpu_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
*
* Also note that when calling DOMCTL_setvcpucontext and VCPU_initialise
* for HVM and PVH guests, not all information in this structure is updated:
* Also note that when calling DOMCTL_setvcpucontext for HVM guests, not all
* information in this structure is updated, the fields read include: fpu_ctxt
* (if VGCT_I387_VALID is set), flags, user_regs and debugreg[*].
*
* - For HVM guests, the structures read include: fpu_ctxt (if
* VGCT_I387_VALID is set), flags, user_regs, debugreg[*]
*
* - PVH guests are the same as HVM guests, but additionally use ctrlreg[3] to
* set cr3. All other fields not used should be set to 0.
* Note: VCPUOP_initialise for HVM guests is non-symetric with
* DOMCTL_setvcpucontext, and uses struct vcpu_hvm_context from hvm/hvm_vcpu.h
*/
struct vcpu_guest_context {
/* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
@ -267,10 +274,64 @@ typedef struct arch_shared_info arch_shared_info_t;
* XEN_DOMCTL_INTERFACE_VERSION.
*/
struct xen_arch_domainconfig {
char dummy;
#define _XEN_X86_EMU_LAPIC 0
#define XEN_X86_EMU_LAPIC (1U<<_XEN_X86_EMU_LAPIC)
#define _XEN_X86_EMU_HPET 1
#define XEN_X86_EMU_HPET (1U<<_XEN_X86_EMU_HPET)
#define _XEN_X86_EMU_PM 2
#define XEN_X86_EMU_PM (1U<<_XEN_X86_EMU_PM)
#define _XEN_X86_EMU_RTC 3
#define XEN_X86_EMU_RTC (1U<<_XEN_X86_EMU_RTC)
#define _XEN_X86_EMU_IOAPIC 4
#define XEN_X86_EMU_IOAPIC (1U<<_XEN_X86_EMU_IOAPIC)
#define _XEN_X86_EMU_PIC 5
#define XEN_X86_EMU_PIC (1U<<_XEN_X86_EMU_PIC)
#define _XEN_X86_EMU_VGA 6
#define XEN_X86_EMU_VGA (1U<<_XEN_X86_EMU_VGA)
#define _XEN_X86_EMU_IOMMU 7
#define XEN_X86_EMU_IOMMU (1U<<_XEN_X86_EMU_IOMMU)
#define _XEN_X86_EMU_PIT 8
#define XEN_X86_EMU_PIT (1U<<_XEN_X86_EMU_PIT)
#define _XEN_X86_EMU_USE_PIRQ 9
#define XEN_X86_EMU_USE_PIRQ (1U<<_XEN_X86_EMU_USE_PIRQ)
#define _XEN_X86_EMU_VPCI 10
#define XEN_X86_EMU_VPCI (1U<<_XEN_X86_EMU_VPCI)
#define XEN_X86_EMU_ALL (XEN_X86_EMU_LAPIC | XEN_X86_EMU_HPET | \
XEN_X86_EMU_PM | XEN_X86_EMU_RTC | \
XEN_X86_EMU_IOAPIC | XEN_X86_EMU_PIC | \
XEN_X86_EMU_VGA | XEN_X86_EMU_IOMMU | \
XEN_X86_EMU_PIT | XEN_X86_EMU_USE_PIRQ |\
XEN_X86_EMU_VPCI)
uint32_t emulation_flags;
};
/* Location of online VCPU bitmap. */
#define XEN_ACPI_CPU_MAP 0xaf00
#define XEN_ACPI_CPU_MAP_LEN ((HVM_MAX_VCPUS + 7) / 8)
/* GPE0 bit set during CPU hotplug */
#define XEN_ACPI_GPE0_CPUHP_BIT 2
#endif
/*
* Representations of architectural CPUID and MSR information. Used as the
* serialised version of Xen's internal representation.
*/
typedef struct xen_cpuid_leaf {
#define XEN_CPUID_NO_SUBLEAF 0xffffffffu
uint32_t leaf, subleaf;
uint32_t a, b, c, d;
} xen_cpuid_leaf_t;
DEFINE_XEN_GUEST_HANDLE(xen_cpuid_leaf_t);
typedef struct xen_msr_entry {
uint32_t idx;
uint32_t flags; /* Reserved MBZ. */
uint64_t val;
} xen_msr_entry_t;
DEFINE_XEN_GUEST_HANDLE(xen_msr_entry_t);
#endif /* !__ASSEMBLY__ */
/*
@ -303,6 +364,13 @@ struct xen_arch_domainconfig {
#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
#endif
/*
* Debug console IO port, also called "port E9 hack". Each character written
* to this IO port will be printed on the hypervisor console, subject to log
* level restrictions.
*/
#define XEN_HVM_DEBUGCONS_IOPORT 0xe9
#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
/*

View File

@ -1,8 +1,8 @@
/******************************************************************************
* arch-x86_32.h
*
*
* Guest OS interface to x86 32-bit Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the

View File

@ -1,8 +1,8 @@
/******************************************************************************
* arch-x86_64.h
*
*
* Guest OS interface to x86 64-bit Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the

255
sys/xen/interface/argo.h Normal file
View File

@ -0,0 +1,255 @@
/******************************************************************************
* Argo : Hypervisor-Mediated data eXchange
*
* Derived from v4v, the version 2 of v2v.
*
* Copyright (c) 2010, Citrix Systems
* Copyright (c) 2018-2019, BAE Systems
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __XEN_PUBLIC_ARGO_H__
#define __XEN_PUBLIC_ARGO_H__
#include "xen.h"
#define XEN_ARGO_DOMID_ANY DOMID_INVALID
/* The maximum size of an Argo ring is defined to be: 16MB (0x1000000 bytes). */
#define XEN_ARGO_MAX_RING_SIZE (0x1000000ULL)
/* Fixed-width type for "argo port" number. Nothing to do with evtchns. */
typedef uint32_t xen_argo_port_t;
/* gfn type: 64-bit fixed-width on all architectures */
typedef uint64_t xen_argo_gfn_t;
/*
* XEN_ARGO_MAXIOV : maximum number of iovs accepted in a single sendv.
* Caution is required if this value is increased: this determines the size of
* an array of xen_argo_iov_t structs on the hypervisor stack, so could cause
* stack overflow if the value is too large.
* The Linux Argo driver never passes more than two iovs.
*/
#define XEN_ARGO_MAXIOV 8U
typedef struct xen_argo_iov
{
XEN_GUEST_HANDLE(uint8) iov_hnd;
uint32_t iov_len;
uint32_t pad;
} xen_argo_iov_t;
typedef struct xen_argo_addr
{
xen_argo_port_t aport;
domid_t domain_id;
uint16_t pad;
} xen_argo_addr_t;
typedef struct xen_argo_send_addr
{
struct xen_argo_addr src;
struct xen_argo_addr dst;
} xen_argo_send_addr_t;
typedef struct xen_argo_ring
{
/* Guests should use atomic operations to access rx_ptr */
uint32_t rx_ptr;
/* Guests should use atomic operations to access tx_ptr */
uint32_t tx_ptr;
/*
* Header space reserved for later use. Align the start of the ring to a
* multiple of the message slot size.
*/
uint8_t reserved[56];
uint8_t ring[XEN_FLEX_ARRAY_DIM];
} xen_argo_ring_t;
typedef struct xen_argo_register_ring
{
xen_argo_port_t aport;
domid_t partner_id;
uint16_t pad;
uint32_t len;
} xen_argo_register_ring_t;
typedef struct xen_argo_unregister_ring
{
xen_argo_port_t aport;
domid_t partner_id;
uint16_t pad;
} xen_argo_unregister_ring_t;
/* Messages on the ring are padded to a multiple of this size. */
#define XEN_ARGO_MSG_SLOT_SIZE 0x10
/*
* Notify flags
*/
/* Ring exists */
#define XEN_ARGO_RING_EXISTS (1U << 0)
/* Ring is shared, not unicast */
#define XEN_ARGO_RING_SHARED (1U << 1)
/* Ring is empty */
#define XEN_ARGO_RING_EMPTY (1U << 2)
/* Sufficient space to queue space_required bytes might exist */
#define XEN_ARGO_RING_SUFFICIENT (1U << 3)
/* Insufficient ring size for space_required bytes */
#define XEN_ARGO_RING_EMSGSIZE (1U << 4)
/* Too many domains waiting for available space signals for this ring */
#define XEN_ARGO_RING_EBUSY (1U << 5)
typedef struct xen_argo_ring_data_ent
{
struct xen_argo_addr ring;
uint16_t flags;
uint16_t pad;
uint32_t space_required;
uint32_t max_message_size;
} xen_argo_ring_data_ent_t;
typedef struct xen_argo_ring_data
{
uint32_t nent;
uint32_t pad;
struct xen_argo_ring_data_ent data[XEN_FLEX_ARRAY_DIM];
} xen_argo_ring_data_t;
struct xen_argo_ring_message_header
{
uint32_t len;
struct xen_argo_addr source;
uint32_t message_type;
uint8_t data[XEN_FLEX_ARRAY_DIM];
};
/*
* Hypercall operations
*/
/*
* XEN_ARGO_OP_register_ring
*
* Register a ring using the guest-supplied memory pages.
* Also used to reregister an existing ring (eg. after resume from hibernate).
*
* The first argument struct indicates the port number for the ring to register
* and the partner domain, if any, that is to be allowed to send to the ring.
* A wildcard (XEN_ARGO_DOMID_ANY) may be supplied instead of a partner domid,
* and if the hypervisor has wildcard sender rings enabled, this will allow
* any domain (XSM notwithstanding) to send to the ring.
*
* The second argument is an array of guest frame numbers and the third argument
* indicates the size of the array. This operation only supports 4K-sized pages.
*
* arg1: XEN_GUEST_HANDLE(xen_argo_register_ring_t)
* arg2: XEN_GUEST_HANDLE(xen_argo_gfn_t)
* arg3: unsigned long npages
* arg4: unsigned long flags (32-bit value)
*/
#define XEN_ARGO_OP_register_ring 1
/* Register op flags */
/*
* Fail exist:
* If set, reject attempts to (re)register an existing established ring.
* If clear, reregistration occurs if the ring exists, with the new ring
* taking the place of the old, preserving tx_ptr if it remains valid.
*/
#define XEN_ARGO_REGISTER_FLAG_FAIL_EXIST 0x1
#ifdef __XEN__
/* Mask for all defined flags. */
#define XEN_ARGO_REGISTER_FLAG_MASK XEN_ARGO_REGISTER_FLAG_FAIL_EXIST
#endif
/*
* XEN_ARGO_OP_unregister_ring
*
* Unregister a previously-registered ring, ending communication.
*
* arg1: XEN_GUEST_HANDLE(xen_argo_unregister_ring_t)
* arg2: NULL
* arg3: 0 (ZERO)
* arg4: 0 (ZERO)
*/
#define XEN_ARGO_OP_unregister_ring 2
/*
* XEN_ARGO_OP_sendv
*
* Send a list of buffers contained in iovs.
*
* The send address struct specifies the source and destination addresses
* for the message being sent, which are used to find the destination ring:
* Xen first looks for a most-specific match with a registered ring with
* (id.addr == dst) and (id.partner == sending_domain) ;
* if that fails, it then looks for a wildcard match (aka multicast receiver)
* where (id.addr == dst) and (id.partner == DOMID_ANY).
*
* For each iov entry, send iov_len bytes from iov_base to the destination ring.
* If insufficient space exists in the destination ring, it will return -EAGAIN
* and Xen will notify the caller when sufficient space becomes available.
*
* The message type is a 32-bit data field available to communicate message
* context data (eg. kernel-to-kernel, rather than application layer).
*
* arg1: XEN_GUEST_HANDLE(xen_argo_send_addr_t) source and dest addresses
* arg2: XEN_GUEST_HANDLE(xen_argo_iov_t) iovs
* arg3: unsigned long niov
* arg4: unsigned long message type (32-bit value)
*/
#define XEN_ARGO_OP_sendv 3
/*
* XEN_ARGO_OP_notify
*
* Asks Xen for information about other rings in the system.
*
* ent->ring is the xen_argo_addr_t of the ring you want information on.
* Uses the same ring matching rules as XEN_ARGO_OP_sendv.
*
* ent->space_required : if this field is not null then Xen will check
* that there is space in the destination ring for this many bytes of payload.
* If the ring is too small for the requested space_required, it will set the
* XEN_ARGO_RING_EMSGSIZE flag on return.
* If sufficient space is available, it will set XEN_ARGO_RING_SUFFICIENT
* and CANCEL any pending notification for that ent->ring; otherwise it
* will schedule a notification event and the flag will not be set.
*
* These flags are set by Xen when notify replies:
* XEN_ARGO_RING_EXISTS ring exists
* XEN_ARGO_RING_SHARED ring is registered for wildcard partner
* XEN_ARGO_RING_EMPTY ring is empty
* XEN_ARGO_RING_SUFFICIENT sufficient space for space_required is there
* XEN_ARGO_RING_EMSGSIZE space_required is too large for the ring size
* XEN_ARGO_RING_EBUSY too many domains waiting for available space signals
*
* arg1: XEN_GUEST_HANDLE(xen_argo_ring_data_t) ring_data (may be NULL)
* arg2: NULL
* arg3: 0 (ZERO)
* arg4: 0 (ZERO)
*/
#define XEN_ARGO_OP_notify 4
#endif

View File

@ -0,0 +1,42 @@
#ifndef __XEN_DEVICE_TREE_DEFS_H__
#define __XEN_DEVICE_TREE_DEFS_H__
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/*
* The device tree compiler (DTC) is allocating the phandle from 1 to
* onwards. Reserve a high value for the GIC phandle.
*/
#define GUEST_PHANDLE_GIC (65000)
#define GUEST_ROOT_ADDRESS_CELLS 2
#define GUEST_ROOT_SIZE_CELLS 2
/**
* IRQ line type.
*
* DT_IRQ_TYPE_NONE - default, unspecified type
* DT_IRQ_TYPE_EDGE_RISING - rising edge triggered
* DT_IRQ_TYPE_EDGE_FALLING - falling edge triggered
* DT_IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered
* DT_IRQ_TYPE_LEVEL_HIGH - high level triggered
* DT_IRQ_TYPE_LEVEL_LOW - low level triggered
* DT_IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
* DT_IRQ_TYPE_SENSE_MASK - Mask for all the above bits
* DT_IRQ_TYPE_INVALID - Use to initialize the type
*/
#define DT_IRQ_TYPE_NONE 0x00000000
#define DT_IRQ_TYPE_EDGE_RISING 0x00000001
#define DT_IRQ_TYPE_EDGE_FALLING 0x00000002
#define DT_IRQ_TYPE_EDGE_BOTH \
(DT_IRQ_TYPE_EDGE_FALLING | DT_IRQ_TYPE_EDGE_RISING)
#define DT_IRQ_TYPE_LEVEL_HIGH 0x00000004
#define DT_IRQ_TYPE_LEVEL_LOW 0x00000008
#define DT_IRQ_TYPE_LEVEL_MASK \
(DT_IRQ_TYPE_LEVEL_LOW | DT_IRQ_TYPE_LEVEL_HIGH)
#define DT_IRQ_TYPE_SENSE_MASK 0x0000000f
#define DT_IRQ_TYPE_INVALID 0x00000010
#endif
#endif

View File

@ -1,8 +1,8 @@
/******************************************************************************
* dom0_ops.h
*
*
* Process command requests from domain-0 guest OS.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the

File diff suppressed because it is too large Load Diff

View File

@ -173,7 +173,9 @@
* The (non-default) location the initial phys-to-machine map should be
* placed at by the hypervisor (Dom0) or the tools (DomU).
* The kernel must be prepared for this mapping to be established using
* large pages, despite such otherwise not being available to guests.
* large pages, despite such otherwise not being available to guests. Note
* that these large pages may be misaligned in PFN space (they'll obviously
* be aligned in MFN and virtual address spaces).
* The kernel must also be able to handle the page table pages used for
* this mapping not being accessible through the initial mapping.
* (Only x86-64 supports this at present.)
@ -217,7 +219,7 @@
/*
* System information exported through crash notes.
*
* The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
* The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
* note in case of a system crash. This note will contain various
* information about the system, see xen/include/xen/elfcore.h.
*/
@ -226,13 +228,14 @@
/*
* System registers exported through crash notes.
*
* The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
* The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
* note per cpu in case of a system crash. This note is architecture
* specific and will contain registers not saved in the "CORE" note.
* See xen/include/xen/elfcore.h for more information.
*/
#define XEN_ELFNOTE_CRASH_REGS 0x1000002
/*
* xen dump-core none note.
* xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE

View File

@ -1,20 +1,49 @@
/*
* There are two expected ways of including this header.
*
* 1) The "default" case (expected from tools etc).
*
* Simply #include <public/errno.h>
*
* In this circumstance, normal header guards apply and the includer shall get
* an enumeration in the XEN_xxx namespace, appropriate for C or assembly.
*
* 2) The special case where the includer provides a XEN_ERRNO() in scope.
*
* In this case, no inclusion guards apply and the caller is responsible for
* their XEN_ERRNO() being appropriate in the included context. The header
* will unilaterally #undef XEN_ERRNO().
*/
#ifndef XEN_ERRNO
/*
* Includer has not provided a custom XEN_ERRNO(). Arrange for normal header
* guards, an automatic enum (for C code) and constants in the XEN_xxx
* namespace.
*/
#ifndef __XEN_PUBLIC_ERRNO_H__
#define __XEN_PUBLIC_ERRNO_H__
#define XEN_ERRNO_DEFAULT_INCLUDE
#ifndef __ASSEMBLY__
#define XEN_ERRNO(name, value) XEN_##name = value,
enum xen_errno {
#else /* !__ASSEMBLY__ */
#elif __XEN_INTERFACE_VERSION__ < 0x00040700
#define XEN_ERRNO(name, value) .equ XEN_##name, value
#endif /* __ASSEMBLY__ */
#endif /* __XEN_PUBLIC_ERRNO_H__ */
#endif /* !XEN_ERRNO */
/* ` enum neg_errnoval { [ -Efoo for each Efoo in the list below ] } */
/* ` enum errnoval { */
#endif /* __XEN_PUBLIC_ERRNO_H__ */
#ifdef XEN_ERRNO
/*
@ -41,6 +70,7 @@ XEN_ERRNO(ENOEXEC, 8) /* Exec format error */
XEN_ERRNO(EBADF, 9) /* Bad file number */
XEN_ERRNO(ECHILD, 10) /* No child processes */
XEN_ERRNO(EAGAIN, 11) /* Try again */
XEN_ERRNO(EWOULDBLOCK, 11) /* Operation would block. Aliases EAGAIN */
XEN_ERRNO(ENOMEM, 12) /* Out of memory */
XEN_ERRNO(EACCES, 13) /* Permission denied */
XEN_ERRNO(EFAULT, 14) /* Bad address */
@ -48,17 +78,21 @@ XEN_ERRNO(EBUSY, 16) /* Device or resource busy */
XEN_ERRNO(EEXIST, 17) /* File exists */
XEN_ERRNO(EXDEV, 18) /* Cross-device link */
XEN_ERRNO(ENODEV, 19) /* No such device */
XEN_ERRNO(EISDIR, 21) /* Is a directory */
XEN_ERRNO(EINVAL, 22) /* Invalid argument */
XEN_ERRNO(ENFILE, 23) /* File table overflow */
XEN_ERRNO(EMFILE, 24) /* Too many open files */
XEN_ERRNO(ENOSPC, 28) /* No space left on device */
XEN_ERRNO(EROFS, 30) /* Read-only file system */
XEN_ERRNO(EMLINK, 31) /* Too many links */
XEN_ERRNO(EDOM, 33) /* Math argument out of domain of func */
XEN_ERRNO(ERANGE, 34) /* Math result not representable */
XEN_ERRNO(EDEADLK, 35) /* Resource deadlock would occur */
XEN_ERRNO(EDEADLOCK, 35) /* Resource deadlock would occur. Aliases EDEADLK */
XEN_ERRNO(ENAMETOOLONG, 36) /* File name too long */
XEN_ERRNO(ENOLCK, 37) /* No record locks available */
XEN_ERRNO(ENOSYS, 38) /* Function not implemented */
XEN_ERRNO(ENOTEMPTY, 39) /* Directory not empty */
XEN_ERRNO(ENODATA, 61) /* No data available */
XEN_ERRNO(ETIME, 62) /* Timer expired */
XEN_ERRNO(EBADMSG, 74) /* Not a data message */
@ -68,6 +102,7 @@ XEN_ERRNO(EILSEQ, 84) /* Illegal byte sequence */
XEN_ERRNO(ERESTART, 85) /* Interrupted system call should be restarted */
#endif
XEN_ERRNO(ENOTSOCK, 88) /* Socket operation on non-socket */
XEN_ERRNO(EMSGSIZE, 90) /* Message too large. */
XEN_ERRNO(EOPNOTSUPP, 95) /* Operation not supported on transport endpoint */
XEN_ERRNO(EADDRINUSE, 98) /* Address already in use */
XEN_ERRNO(EADDRNOTAVAIL, 99) /* Cannot assign requested address */
@ -75,20 +110,17 @@ XEN_ERRNO(ENOBUFS, 105) /* No buffer space available */
XEN_ERRNO(EISCONN, 106) /* Transport endpoint is already connected */
XEN_ERRNO(ENOTCONN, 107) /* Transport endpoint is not connected */
XEN_ERRNO(ETIMEDOUT, 110) /* Connection timed out */
XEN_ERRNO(ECONNREFUSED, 111) /* Connection refused */
#undef XEN_ERRNO
#endif /* XEN_ERRNO */
#ifndef __XEN_PUBLIC_ERRNO_H__
#define __XEN_PUBLIC_ERRNO_H__
/* ` } */
/* Clean up from a default include. Close the enum (for C). */
#ifdef XEN_ERRNO_DEFAULT_INCLUDE
#undef XEN_ERRNO_DEFAULT_INCLUDE
#ifndef __ASSEMBLY__
};
#endif
#define XEN_EWOULDBLOCK XEN_EAGAIN /* Operation would block */
#define XEN_EDEADLOCK XEN_EDEADLK /* Resource deadlock would occur */
#endif /* __XEN_PUBLIC_ERRNO_H__ */
#endif /* XEN_ERRNO_DEFAULT_INCLUDE */

View File

@ -74,6 +74,9 @@
#define EVTCHNOP_init_control 11
#define EVTCHNOP_expand_array 12
#define EVTCHNOP_set_priority 13
#ifdef __XEN__
#define EVTCHNOP_reset_cont 14
#endif
/* ` } */
typedef uint32_t evtchn_port_t;
@ -85,7 +88,7 @@ DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
* is allocated in <dom> and returned as <port>.
* NOTES:
* 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
* 2. <rdom> may be DOMID_SELF, allowing loopback connections.
* 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
struct evtchn_alloc_unbound {
/* IN parameters */
@ -307,7 +310,7 @@ typedef struct evtchn_expand_array evtchn_expand_array_t;
*/
struct evtchn_set_priority {
/* IN parameters. */
uint32_t port;
evtchn_port_t port;
uint32_t priority;
};
typedef struct evtchn_set_priority evtchn_set_priority_t;

View File

@ -1,8 +1,8 @@
/******************************************************************************
* features.h
*
*
* Feature flags, reported by XENVER_get_features.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -102,6 +102,18 @@
/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */
#define XENFEAT_memory_op_vnode_supported 13
/* arm: Hypervisor supports ARM SMC calling convention. */
#define XENFEAT_ARM_SMCCC_supported 14
/*
* x86/PVH: If set, ACPI RSDP can be placed at any address. Otherwise RSDP
* must be located in lower 1MB, as required by ACPI Specification for IA-PC
* systems.
* This feature flag is only consulted if XEN_ELFNOTE_GUEST_OS contains
* the "linux" string.
*/
#define XENFEAT_linux_rsdp_unrestricted 15
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */

View File

@ -43,7 +43,7 @@
* table are identified by grant references. A grant reference is an
* integer, which indexes into the grant table. It acts as a
* capability which the grantee can use to perform operations on the
* granters memory.
* granter's memory.
*
* This capability-based system allows shared-memory communications
* between unprivileged domains. A grant reference also encapsulates
@ -411,12 +411,13 @@ typedef struct gnttab_dump_table gnttab_dump_table_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
/*
* GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
* foreign domain has previously registered its interest in the transfer via
* <domid, ref>.
* GNTTABOP_transfer: Transfer <frame> to a foreign domain. The foreign domain
* has previously registered its interest in the transfer via <domid, ref>.
*
* Note that, even if the transfer fails, the specified page no longer belongs
* to the calling domain *unless* the error is GNTST_bad_page.
*
* Note further that only PV guests can use this operation.
*/
struct gnttab_transfer {
/* IN parameters. */
@ -429,6 +430,7 @@ struct gnttab_transfer {
typedef struct gnttab_transfer gnttab_transfer_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
/*
* GNTTABOP_copy: Hypervisor based copy
* source and destinations can be eithers MFNs or, for foreign domains,
@ -513,10 +515,9 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
/*
* GNTTABOP_set_version: Request a particular version of the grant
* table shared table structure. This operation can only be performed
* once in any given domain. It must be performed before any grants
* are activated; otherwise, the domain will be stuck with version 1.
* The only defined versions are 1 and 2.
* table shared table structure. This operation may be used to toggle
* between different versions, but must be performed while no grants
* are active. The only defined versions are 1 and 2.
*/
struct gnttab_set_version {
/* IN/OUT parameters */
@ -525,6 +526,7 @@ struct gnttab_set_version {
typedef struct gnttab_set_version gnttab_set_version_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t);
/*
* GNTTABOP_get_status_frames: Get the list of frames used to store grant
* status for <dom>. In grant format version 2, the status is separated
@ -586,9 +588,9 @@ struct gnttab_cache_flush {
} a;
uint16_t offset; /* offset from start of grant */
uint16_t length; /* size within the grant */
#define GNTTAB_CACHE_CLEAN (1<<0)
#define GNTTAB_CACHE_INVAL (1<<1)
#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
#define GNTTAB_CACHE_CLEAN (1u<<0)
#define GNTTAB_CACHE_INVAL (1u<<1)
#define GNTTAB_CACHE_SOURCE_GREF (1u<<31)
uint32_t op;
};
typedef struct gnttab_cache_flush gnttab_cache_flush_t;

View File

@ -0,0 +1,480 @@
/*
* Copyright (c) 2016, Citrix Systems Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
#define __XEN_PUBLIC_HVM_DM_OP_H__
#include "../xen.h"
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#include "../event_channel.h"
#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif
/*
* IOREQ Servers
*
* The interface between an I/O emulator an Xen is called an IOREQ Server.
* A domain supports a single 'legacy' IOREQ Server which is instantiated if
* parameter...
*
* HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
* ioreq structures), or...
* HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
* ioreq ring), or...
* HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
* to request buffered I/O emulation).
*
* The following hypercalls facilitate the creation of IOREQ Servers for
* 'secondary' emulators which are invoked to implement port I/O, memory, or
* PCI config space ranges which they explicitly register.
*/
typedef uint16_t ioservid_t;
/*
* XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
* secondary emulator.
*
* The <id> handed back is unique for target domain. The valur of
* <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
* hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then the buffered
* ioreq ring will not be allocated and hence all emulation requests to
* this server will be synchronous.
*/
#define XEN_DMOP_create_ioreq_server 1
struct xen_dm_op_create_ioreq_server {
/* IN - should server handle buffered ioreqs */
uint8_t handle_bufioreq;
uint8_t pad[3];
/* OUT - server id */
ioservid_t id;
};
/*
* XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
* access IOREQ Server <id>.
*
* If the IOREQ Server is handling buffered emulation requests, the
* emulator needs to bind to event channel <bufioreq_port> to listen for
* them. (The event channels used for synchronous emulation requests are
* specified in the per-CPU ioreq structures).
* In addition, if the XENMEM_acquire_resource memory op cannot be used,
* the emulator will need to map the synchronous ioreq structures and
* buffered ioreq ring (if it exists) from guest memory. If <flags> does
* not contain XEN_DMOP_no_gfns then these pages will be made available and
* the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
* respectively. (If the IOREQ Server is not handling buffered emulation
* only <ioreq_gfn> will be valid).
*
* NOTE: To access the synchronous ioreq structures and buffered ioreq
* ring, it is preferable to use the XENMEM_acquire_resource memory
* op specifying resource type XENMEM_resource_ioreq_server.
*/
#define XEN_DMOP_get_ioreq_server_info 2
struct xen_dm_op_get_ioreq_server_info {
/* IN - server id */
ioservid_t id;
/* IN - flags */
uint16_t flags;
#define _XEN_DMOP_no_gfns 0
#define XEN_DMOP_no_gfns (1u << _XEN_DMOP_no_gfns)
/* OUT - buffered ioreq port */
evtchn_port_t bufioreq_port;
/* OUT - sync ioreq gfn (see block comment above) */
uint64_aligned_t ioreq_gfn;
/* OUT - buffered ioreq gfn (see block comment above)*/
uint64_aligned_t bufioreq_gfn;
};
/*
* XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
* emulation by the client of
* IOREQ Server <id>.
* XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
* previously registered for
* emulation by the client of
* IOREQ Server <id>.
*
* There are three types of I/O that can be emulated: port I/O, memory
* accesses and PCI config space accesses. The <type> field denotes which
* type of range* the <start> and <end> (inclusive) fields are specifying.
* PCI config space ranges are specified by segment/bus/device/function
* values which should be encoded using the DMOP_PCI_SBDF helper macro
* below.
*
* NOTE: unless an emulation request falls entirely within a range mapped
* by a secondary emulator, it will not be passed to that emulator.
*/
#define XEN_DMOP_map_io_range_to_ioreq_server 3
#define XEN_DMOP_unmap_io_range_from_ioreq_server 4
struct xen_dm_op_ioreq_server_range {
/* IN - server id */
ioservid_t id;
uint16_t pad;
/* IN - type of range */
uint32_t type;
# define XEN_DMOP_IO_RANGE_PORT 0 /* I/O port range */
# define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
# define XEN_DMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
/* IN - inclusive start and end of range */
uint64_aligned_t start, end;
};
#define XEN_DMOP_PCI_SBDF(s,b,d,f) \
((((s) & 0xffff) << 16) | \
(((b) & 0xff) << 8) | \
(((d) & 0x1f) << 3) | \
((f) & 0x07))
/*
* XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
*
* The IOREQ Server will not be passed any emulation requests until it is
* in the enabled state.
* Note that the contents of the ioreq_gfn and bufioreq_gfn (see
* XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
* is in the enabled state.
*/
#define XEN_DMOP_set_ioreq_server_state 5
struct xen_dm_op_set_ioreq_server_state {
/* IN - server id */
ioservid_t id;
/* IN - enabled? */
uint8_t enabled;
uint8_t pad;
};
/*
* XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
*
* Any registered I/O ranges will be automatically deregistered.
*/
#define XEN_DMOP_destroy_ioreq_server 6
struct xen_dm_op_destroy_ioreq_server {
/* IN - server id */
ioservid_t id;
uint16_t pad;
};
/*
* XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn
* range.
*
* NOTE: The bitmap passed back to the caller is passed in a
* secondary buffer.
*/
#define XEN_DMOP_track_dirty_vram 7
struct xen_dm_op_track_dirty_vram {
/* IN - number of pages to be tracked */
uint32_t nr;
uint32_t pad;
/* IN - first pfn to track */
uint64_aligned_t first_pfn;
};
/*
* XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
* PCI INTx pins.
*/
#define XEN_DMOP_set_pci_intx_level 8
struct xen_dm_op_set_pci_intx_level {
/* IN - PCI INTx identification (domain:bus:device:intx) */
uint16_t domain;
uint8_t bus, device, intx;
/* IN - Level: 0 -> deasserted, 1 -> asserted */
uint8_t level;
};
/*
* XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
* ISA IRQ lines.
*/
#define XEN_DMOP_set_isa_irq_level 9
struct xen_dm_op_set_isa_irq_level {
/* IN - ISA IRQ (0-15) */
uint8_t isa_irq;
/* IN - Level: 0 -> deasserted, 1 -> asserted */
uint8_t level;
};
/*
* XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
*/
#define XEN_DMOP_set_pci_link_route 10
struct xen_dm_op_set_pci_link_route {
/* PCI INTx line (0-3) */
uint8_t link;
/* ISA IRQ (1-15) or 0 -> disable link */
uint8_t isa_irq;
};
/*
* XEN_DMOP_modified_memory: Notify that a set of pages were modified by
* an emulator.
*
* DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
* @nr_extents entries.
*
* On error, @nr_extents will contain the index+1 of the extent that
* had the error. It is not defined if or which pages may have been
* marked as dirty, in this event.
*/
#define XEN_DMOP_modified_memory 11
struct xen_dm_op_modified_memory {
/*
* IN - Number of extents to be processed
* OUT -returns n+1 for failing extent
*/
uint32_t nr_extents;
/* IN/OUT - Must be set to 0 */
uint32_t opaque;
};
struct xen_dm_op_modified_memory_extent {
/* IN - number of contiguous pages modified */
uint32_t nr;
uint32_t pad;
/* IN - first pfn modified */
uint64_aligned_t first_pfn;
};
/*
* XEN_DMOP_set_mem_type: Notify that a region of memory is to be treated
* in a specific way. (See definition of
* hvmmem_type_t).
*
* NOTE: In the event of a continuation (return code -ERESTART), the
* @first_pfn is set to the value of the pfn of the remaining
* region and @nr reduced to the size of the remaining region.
*/
#define XEN_DMOP_set_mem_type 12
struct xen_dm_op_set_mem_type {
/* IN - number of contiguous pages */
uint32_t nr;
/* IN - new hvmmem_type_t of region */
uint16_t mem_type;
uint16_t pad;
/* IN - first pfn in region */
uint64_aligned_t first_pfn;
};
/*
* XEN_DMOP_inject_event: Inject an event into a VCPU, which will
* get taken up when it is next scheduled.
*
* Note that the caller should know enough of the state of the CPU before
* injecting, to know what the effect of injecting the event will be.
*/
#define XEN_DMOP_inject_event 13
struct xen_dm_op_inject_event {
/* IN - index of vCPU */
uint32_t vcpuid;
/* IN - interrupt vector */
uint8_t vector;
/* IN - event type (DMOP_EVENT_* ) */
uint8_t type;
/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
# define XEN_DMOP_EVENT_ext_int 0 /* external interrupt */
# define XEN_DMOP_EVENT_nmi 2 /* nmi */
# define XEN_DMOP_EVENT_hw_exc 3 /* hardware exception */
# define XEN_DMOP_EVENT_sw_int 4 /* software interrupt (CD nn) */
# define XEN_DMOP_EVENT_pri_sw_exc 5 /* ICEBP (F1) */
# define XEN_DMOP_EVENT_sw_exc 6 /* INT3 (CC), INTO (CE) */
/* IN - instruction length */
uint8_t insn_len;
uint8_t pad0;
/* IN - error code (or ~0 to skip) */
uint32_t error_code;
uint32_t pad1;
/* IN - type-specific extra data (%cr2 for #PF, pending_dbg for #DB) */
uint64_aligned_t cr2;
};
/*
* XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
*/
#define XEN_DMOP_inject_msi 14
struct xen_dm_op_inject_msi {
/* IN - MSI data (lower 32 bits) */
uint32_t data;
uint32_t pad;
/* IN - MSI address (0xfeexxxxx) */
uint64_aligned_t addr;
};
/*
* XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
* to specific memory type <type>
* for specific accesses <flags>
*
* For now, flags only accept the value of XEN_DMOP_IOREQ_MEM_ACCESS_WRITE,
* which means only write operations are to be forwarded to an ioreq server.
* Support for the emulation of read operations can be added when an ioreq
* server has such requirement in future.
*/
#define XEN_DMOP_map_mem_type_to_ioreq_server 15
struct xen_dm_op_map_mem_type_to_ioreq_server {
ioservid_t id; /* IN - ioreq server id */
uint16_t type; /* IN - memory type */
uint32_t flags; /* IN - types of accesses to be forwarded to the
ioreq server. flags with 0 means to unmap the
ioreq server */
#define XEN_DMOP_IOREQ_MEM_ACCESS_READ (1u << 0)
#define XEN_DMOP_IOREQ_MEM_ACCESS_WRITE (1u << 1)
uint64_t opaque; /* IN/OUT - only used for hypercall continuation,
has to be set to zero by the caller */
};
/*
* XEN_DMOP_remote_shutdown : Declare a shutdown for another domain
* Identical to SCHEDOP_remote_shutdown
*/
#define XEN_DMOP_remote_shutdown 16
struct xen_dm_op_remote_shutdown {
uint32_t reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
/* (Other reason values are not blocked) */
};
/*
* XEN_DMOP_relocate_memory : Relocate GFNs for the specified guest.
* Identical to XENMEM_add_to_physmap with
* space == XENMAPSPACE_gmfn_range.
*/
#define XEN_DMOP_relocate_memory 17
struct xen_dm_op_relocate_memory {
/* All fields are IN/OUT, with their OUT state undefined. */
/* Number of GFNs to process. */
uint32_t size;
uint32_t pad;
/* Starting GFN to relocate. */
uint64_aligned_t src_gfn;
/* Starting GFN where GFNs should be relocated. */
uint64_aligned_t dst_gfn;
};
/*
* XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space.
* Identical to XEN_DOMCTL_pin_mem_cacheattr.
*/
#define XEN_DMOP_pin_memory_cacheattr 18
struct xen_dm_op_pin_memory_cacheattr {
uint64_aligned_t start; /* Start gfn. */
uint64_aligned_t end; /* End gfn. */
/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
#define XEN_DMOP_MEM_CACHEATTR_UC 0
#define XEN_DMOP_MEM_CACHEATTR_WC 1
#define XEN_DMOP_MEM_CACHEATTR_WT 4
#define XEN_DMOP_MEM_CACHEATTR_WP 5
#define XEN_DMOP_MEM_CACHEATTR_WB 6
#define XEN_DMOP_MEM_CACHEATTR_UCM 7
#define XEN_DMOP_DELETE_MEM_CACHEATTR (~(uint32_t)0)
uint32_t type; /* XEN_DMOP_MEM_CACHEATTR_* */
uint32_t pad;
};
struct xen_dm_op {
uint32_t op;
uint32_t pad;
union {
struct xen_dm_op_create_ioreq_server create_ioreq_server;
struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
struct xen_dm_op_track_dirty_vram track_dirty_vram;
struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
struct xen_dm_op_set_pci_link_route set_pci_link_route;
struct xen_dm_op_modified_memory modified_memory;
struct xen_dm_op_set_mem_type set_mem_type;
struct xen_dm_op_inject_event inject_event;
struct xen_dm_op_inject_msi inject_msi;
struct xen_dm_op_map_mem_type_to_ioreq_server
map_mem_type_to_ioreq_server;
struct xen_dm_op_remote_shutdown remote_shutdown;
struct xen_dm_op_relocate_memory relocate_memory;
struct xen_dm_op_pin_memory_cacheattr pin_memory_cacheattr;
} u;
};
#endif /* __XEN__ || __XEN_TOOLS__ */
struct xen_dm_op_buf {
XEN_GUEST_HANDLE(void) h;
xen_ulong_t size;
};
typedef struct xen_dm_op_buf xen_dm_op_buf_t;
DEFINE_XEN_GUEST_HANDLE(xen_dm_op_buf_t);
/* ` enum neg_errnoval
* ` HYPERVISOR_dm_op(domid_t domid,
* ` unsigned int nr_bufs,
* ` xen_dm_op_buf_t bufs[])
* `
*
* @domid is the domain the hypercall operates on.
* @nr_bufs is the number of buffers in the @bufs array.
* @bufs points to an array of buffers where @bufs[0] contains a struct
* xen_dm_op, describing the specific device model operation and its
* parameters.
* @bufs[1..] may be referenced in the parameters for the purposes of
* passing extra information to or from the domain.
*/
#endif /* __XEN_PUBLIC_HVM_DM_OP_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -23,6 +23,8 @@
#ifndef __XEN_PUBLIC_HVM_E820_H__
#define __XEN_PUBLIC_HVM_E820_H__
#include "../xen.h"
/* E820 location in HVM virtual address space. */
#define HVM_E820_PAGE 0x00090000
#define HVM_E820_NR_OFFSET 0x000001E8
@ -30,6 +32,7 @@
#define HVM_BELOW_4G_RAM_END 0xF0000000
#define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END
#define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
#define HVM_BELOW_4G_MMIO_LENGTH ((xen_mk_ullong(1) << 32) - \
HVM_BELOW_4G_MMIO_START)
#endif /* __XEN_PUBLIC_HVM_E820_H__ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
* hvm/hvm_info_table.h
*
*
* HVM parameter and information table, written into guest memory map.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,6 +34,14 @@
/* Maximum we can support with current vLAPIC ID mapping. */
#define HVM_MAX_VCPUS 128
/*
* In some cases SMP HVM guests may require knowledge of Xen's idea of vCPU ids
* for their vCPUs. For example, HYPERVISOR_vcpu_op and some EVTCHNOP_*
* hypercalls take vcpu id as a parameter. It is valid for HVM guests to assume
* that Xen's vCPU id always equals to ACPI (not APIC!) id in MADT table which
* is always present for SMP guests.
*/
struct hvm_info_table {
char signature[8]; /* "HVM INFO" */
uint32_t length;

View File

@ -32,12 +32,33 @@
#define HVMOP_get_param 1
struct xen_hvm_param {
domid_t domid; /* IN */
uint16_t pad;
uint32_t index; /* IN */
uint64_t value; /* IN/OUT */
};
typedef struct xen_hvm_param xen_hvm_param_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
struct xen_hvm_altp2m_suppress_ve {
uint16_t view;
uint8_t suppress_ve; /* Boolean type. */
uint8_t pad1;
uint32_t pad2;
uint64_t gfn;
};
struct xen_hvm_altp2m_suppress_ve_multi {
uint16_t view;
uint8_t suppress_ve; /* Boolean type. */
uint8_t pad1;
int32_t first_error; /* Should be set to 0. */
uint64_t first_gfn; /* Value may be updated. */
uint64_t last_gfn;
uint64_t first_error_gfn; /* Gfn of the first error. */
};
#if __XEN_INTERFACE_VERSION__ < 0x00040900
/* Set the logical level of one of a domain's PCI INTx wires. */
#define HVMOP_set_pci_intx_level 2
struct xen_hvm_set_pci_intx_level {
@ -76,64 +97,38 @@ struct xen_hvm_set_pci_link_route {
typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5
/*
* hvmmem_type_t should not be defined when generating the corresponding
* compat header. This will ensure that the improperly named HVMMEM_(*)
* values are defined only once.
*/
#ifndef XEN_GENERATING_COMPAT_HEADERS
typedef enum {
HVMMEM_ram_rw, /* Normal read/write guest RAM */
HVMMEM_ram_ro, /* Read-only; writes are discarded */
HVMMEM_mmio_dm, /* Reads and write go to the device model */
HVMMEM_mmio_write_dm /* Read-only; writes go to the device model */
#if __XEN_INTERFACE_VERSION__ < 0x00040700
HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */
#else
HVMMEM_unused, /* Placeholder; setting memory to this type
will fail for code after 4.7.0 */
#endif
HVMMEM_ioreq_server /* Memory type claimed by an ioreq server; type
changes to this value are only allowed after
an ioreq server has claimed its ownership.
Only pages with HVMMEM_ram_rw are allowed to
change to this type; conversely, pages with
this type are only allowed to be changed back
to HVMMEM_ram_rw. */
} hvmmem_type_t;
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Track dirty VRAM. */
#define HVMOP_track_dirty_vram 6
struct xen_hvm_track_dirty_vram {
/* Domain to be tracked. */
domid_t domid;
/* Number of pages to track. */
uint32_t nr;
/* First pfn to track. */
uint64_aligned_t first_pfn;
/* OUT variable. */
/* Dirty bitmap buffer. */
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
};
typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
/* Notify that some pages got modified by the Device Model. */
#define HVMOP_modified_memory 7
struct xen_hvm_modified_memory {
/* Domain to be updated. */
domid_t domid;
/* Number of pages. */
uint32_t nr;
/* First pfn. */
uint64_aligned_t first_pfn;
};
typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
#define HVMOP_set_mem_type 8
/* Notify that a region of memory is to be treated in a specific way. */
struct xen_hvm_set_mem_type {
/* Domain to be updated. */
domid_t domid;
/* Memory type */
uint16_t hvmmem_type;
/* Number of pages. */
uint32_t nr;
/* First pfn. */
uint64_aligned_t first_pfn;
};
typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#endif /* XEN_GENERATING_COMPAT_HEADERS */
/* Hint from PV drivers for pagetable destruction. */
#define HVMOP_pagetable_dying 9
@ -172,38 +167,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
/* Deprecated by XENMEM_access_op_get_access */
#define HVMOP_get_mem_access 13
#define HVMOP_inject_trap 14
/* Inject a trap into a VCPU, which will get taken up on the next
* scheduling of it. Note that the caller should know enough of the
* state of the CPU before injecting, to know what the effect of
* injecting the trap will be.
*/
struct xen_hvm_inject_trap {
/* Domain to be queried. */
domid_t domid;
/* VCPU */
uint32_t vcpuid;
/* Vector number */
uint32_t vector;
/* Trap type (HVMOP_TRAP_*) */
uint32_t type;
/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
# define HVMOP_TRAP_ext_int 0 /* external interrupt */
# define HVMOP_TRAP_nmi 2 /* nmi */
# define HVMOP_TRAP_hw_exc 3 /* hardware exception */
# define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */
# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
# define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */
/* Error code, or ~0u to skip */
uint32_t error_code;
/* Intruction length */
uint32_t insn_len;
/* CR2 for page faults */
uint64_aligned_t cr2;
};
typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#define HVMOP_get_mem_type 15
@ -223,51 +186,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* MSI injection for emulated devices */
#define HVMOP_inject_msi 16
struct xen_hvm_inject_msi {
/* Domain to be injected */
domid_t domid;
/* Data -- lower 32 bits */
uint32_t data;
/* Address (0xfeexxxxx) */
uint64_t addr;
};
typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
/*
* IOREQ Servers
*
* The interface between an I/O emulator an Xen is called an IOREQ Server.
* A domain supports a single 'legacy' IOREQ Server which is instantiated if
* parameter...
*
* HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
* ioreq structures), or...
* HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
* ioreq ring), or...
* HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
* to request buffered I/O emulation).
*
* The following hypercalls facilitate the creation of IOREQ Servers for
* 'secondary' emulators which are invoked to implement port I/O, memory, or
* PCI config space ranges which they explicitly register.
* Definitions relating to DMOP_create_ioreq_server. (Defined here for
* backwards compatibility).
*/
typedef uint16_t ioservid_t;
/*
* HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
* emulator servicing domain <domid>.
*
* The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
* the buffered ioreq ring will not be allocated and hence all emulation
* requestes to this server will be synchronous.
*/
#define HVMOP_create_ioreq_server 17
struct xen_hvm_create_ioreq_server {
domid_t domid; /* IN - domain to be serviced */
#define HVM_IOREQSRV_BUFIOREQ_OFF 0
#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
/*
@ -275,107 +198,6 @@ struct xen_hvm_create_ioreq_server {
* the pointer pair gets read atomically:
*/
#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
ioservid_t id; /* OUT - server id */
};
typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
/*
* HVMOP_get_ioreq_server_info: Get all the information necessary to access
* IOREQ Server <id>.
*
* The emulator needs to map the synchronous ioreq structures and buffered
* ioreq ring (if it exists) that Xen uses to request emulation. These are
* hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
* respectively. In addition, if the IOREQ Server is handling buffered
* emulation requests, the emulator needs to bind to event channel
* <bufioreq_port> to listen for them. (The event channels used for
* synchronous emulation requests are specified in the per-CPU ioreq
* structures in <ioreq_pfn>).
* If the IOREQ Server is not handling buffered emulation requests then the
* values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
*/
#define HVMOP_get_ioreq_server_info 18
struct xen_hvm_get_ioreq_server_info {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
evtchn_port_t bufioreq_port; /* OUT - buffered ioreq port */
uint64_aligned_t ioreq_pfn; /* OUT - sync ioreq pfn */
uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
};
typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
/*
* HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
* for emulation by the client of IOREQ
* Server <id>
* HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
* for emulation by the client of IOREQ
* Server <id>
*
* There are three types of I/O that can be emulated: port I/O, memory accesses
* and PCI config space accesses. The <type> field denotes which type of range
* the <start> and <end> (inclusive) fields are specifying.
* PCI config space ranges are specified by segment/bus/device/function values
* which should be encoded using the HVMOP_PCI_SBDF helper macro below.
*
* NOTE: unless an emulation request falls entirely within a range mapped
* by a secondary emulator, it will not be passed to that emulator.
*/
#define HVMOP_map_io_range_to_ioreq_server 19
#define HVMOP_unmap_io_range_from_ioreq_server 20
struct xen_hvm_io_range {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
uint32_t type; /* IN - type of range */
# define HVMOP_IO_RANGE_PORT 0 /* I/O port range */
# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
# define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
uint64_aligned_t start, end; /* IN - inclusive start and end of range */
};
typedef struct xen_hvm_io_range xen_hvm_io_range_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
#define HVMOP_PCI_SBDF(s,b,d,f) \
((((s) & 0xffff) << 16) | \
(((b) & 0xff) << 8) | \
(((d) & 0x1f) << 3) | \
((f) & 0x07))
/*
* HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
* <domid>.
*
* Any registered I/O ranges will be automatically deregistered.
*/
#define HVMOP_destroy_ioreq_server 21
struct xen_hvm_destroy_ioreq_server {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
};
typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
/*
* HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
* domain <domid>.
*
* The IOREQ Server will not be passed any emulation requests until it is in the
* enabled state.
* Note that the contents of the ioreq_pfn and bufioreq_fn (see
* HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
* the enabled state.
*/
#define HVMOP_set_ioreq_server_state 22
struct xen_hvm_set_ioreq_server_state {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
uint8_t enabled; /* IN - enabled? */
};
typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
@ -421,27 +243,62 @@ struct xen_hvm_altp2m_vcpu_enable_notify {
typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
struct xen_hvm_altp2m_vcpu_disable_notify {
uint32_t vcpu_id;
};
typedef struct xen_hvm_altp2m_vcpu_disable_notify xen_hvm_altp2m_vcpu_disable_notify_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_disable_notify_t);
struct xen_hvm_altp2m_view {
/* IN/OUT variable */
uint16_t view;
/* Create view only: default access type
* NOTE: currently ignored */
uint16_t hvmmem_default_access; /* xenmem_access_t */
};
typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
#if __XEN_INTERFACE_VERSION__ < 0x00040a00
struct xen_hvm_altp2m_set_mem_access {
/* view */
uint16_t view;
/* Memory type */
uint16_t hvmmem_access; /* xenmem_access_t */
uint16_t access; /* xenmem_access_t */
uint32_t pad;
/* gfn */
uint64_t gfn;
};
typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */
struct xen_hvm_altp2m_mem_access {
/* view */
uint16_t view;
/* Memory type */
uint16_t access; /* xenmem_access_t */
uint32_t pad;
/* gfn */
uint64_t gfn;
};
typedef struct xen_hvm_altp2m_mem_access xen_hvm_altp2m_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_mem_access_t);
struct xen_hvm_altp2m_set_mem_access_multi {
/* view */
uint16_t view;
uint16_t pad;
/* Number of pages */
uint32_t nr;
/*
* Used for continuation purposes.
* Must be set to zero upon initial invocation.
*/
uint64_t opaque;
/* List of pfns to set access for */
XEN_GUEST_HANDLE(const_uint64) pfn_list;
/* Corresponding list of access settings for pfn_list */
XEN_GUEST_HANDLE(const_uint8) access_list;
};
struct xen_hvm_altp2m_change_gfn {
/* view */
@ -456,13 +313,24 @@ struct xen_hvm_altp2m_change_gfn {
typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
struct xen_hvm_altp2m_get_vcpu_p2m_idx {
uint32_t vcpu_id;
uint16_t altp2m_idx;
};
struct xen_hvm_altp2m_set_visibility {
uint16_t altp2m_idx;
uint8_t visible;
uint8_t pad;
};
struct xen_hvm_altp2m_op {
uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */
uint32_t cmd;
/* Get/set the altp2m state for a domain */
#define HVMOP_altp2m_get_domain_state 1
#define HVMOP_altp2m_set_domain_state 2
/* Set the current VCPU to receive altp2m event notifications */
/* Set a given VCPU to receive altp2m event notifications */
#define HVMOP_altp2m_vcpu_enable_notify 3
/* Create a new view */
#define HVMOP_altp2m_create_p2m 4
@ -474,15 +342,40 @@ struct xen_hvm_altp2m_op {
#define HVMOP_altp2m_set_mem_access 7
/* Change a p2m entry to have a different gfn->mfn mapping */
#define HVMOP_altp2m_change_gfn 8
/* Set access for an array of pages */
#define HVMOP_altp2m_set_mem_access_multi 9
/* Set the "Suppress #VE" bit on a page */
#define HVMOP_altp2m_set_suppress_ve 10
/* Get the "Suppress #VE" bit of a page */
#define HVMOP_altp2m_get_suppress_ve 11
/* Get the access of a page of memory from a certain view */
#define HVMOP_altp2m_get_mem_access 12
/* Disable altp2m event notifications for a given VCPU */
#define HVMOP_altp2m_vcpu_disable_notify 13
/* Get the active vcpu p2m index */
#define HVMOP_altp2m_get_p2m_idx 14
/* Set the "Supress #VE" bit for a range of pages */
#define HVMOP_altp2m_set_suppress_ve_multi 15
/* Set visibility for a given altp2m view */
#define HVMOP_altp2m_set_visibility 16
domid_t domain;
uint16_t pad1;
uint32_t pad2;
union {
struct xen_hvm_altp2m_domain_state domain_state;
struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
struct xen_hvm_altp2m_view view;
struct xen_hvm_altp2m_set_mem_access set_mem_access;
struct xen_hvm_altp2m_change_gfn change_gfn;
struct xen_hvm_altp2m_domain_state domain_state;
struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
struct xen_hvm_altp2m_view view;
#if __XEN_INTERFACE_VERSION__ < 0x00040a00
struct xen_hvm_altp2m_set_mem_access set_mem_access;
#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */
struct xen_hvm_altp2m_mem_access mem_access;
struct xen_hvm_altp2m_change_gfn change_gfn;
struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
struct xen_hvm_altp2m_suppress_ve suppress_ve;
struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi;
struct xen_hvm_altp2m_vcpu_disable_notify disable_notify;
struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx;
struct xen_hvm_altp2m_set_visibility set_visibility;
uint8_t pad[64];
} u;
};

View File

@ -0,0 +1,144 @@
/*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2015, Roger Pau Monne <roger.pau@citrix.com>
*/
#ifndef __XEN_PUBLIC_HVM_HVM_VCPU_H__
#define __XEN_PUBLIC_HVM_HVM_VCPU_H__
#include "../xen.h"
struct vcpu_hvm_x86_32 {
uint32_t eax;
uint32_t ecx;
uint32_t edx;
uint32_t ebx;
uint32_t esp;
uint32_t ebp;
uint32_t esi;
uint32_t edi;
uint32_t eip;
uint32_t eflags;
uint32_t cr0;
uint32_t cr3;
uint32_t cr4;
uint32_t pad1;
/*
* EFER should only be used to set the NXE bit (if required)
* when starting a vCPU in 32bit mode with paging enabled or
* to set the LME/LMA bits in order to start the vCPU in
* compatibility mode.
*/
uint64_t efer;
uint32_t cs_base;
uint32_t ds_base;
uint32_t ss_base;
uint32_t es_base;
uint32_t tr_base;
uint32_t cs_limit;
uint32_t ds_limit;
uint32_t ss_limit;
uint32_t es_limit;
uint32_t tr_limit;
uint16_t cs_ar;
uint16_t ds_ar;
uint16_t ss_ar;
uint16_t es_ar;
uint16_t tr_ar;
uint16_t pad2[3];
};
/*
* The layout of the _ar fields of the segment registers is the
* following:
*
* Bits [0,3]: type (bits 40-43).
* Bit 4: s (descriptor type, bit 44).
* Bit [5,6]: dpl (descriptor privilege level, bits 45-46).
* Bit 7: p (segment-present, bit 47).
* Bit 8: avl (available for system software, bit 52).
* Bit 9: l (64-bit code segment, bit 53).
* Bit 10: db (meaning depends on the segment, bit 54).
* Bit 11: g (granularity, bit 55)
* Bits [12,15]: unused, must be blank.
*
* A more complete description of the meaning of this fields can be
* obtained from the Intel SDM, Volume 3, section 3.4.5.
*/
struct vcpu_hvm_x86_64 {
uint64_t rax;
uint64_t rcx;
uint64_t rdx;
uint64_t rbx;
uint64_t rsp;
uint64_t rbp;
uint64_t rsi;
uint64_t rdi;
uint64_t rip;
uint64_t rflags;
uint64_t cr0;
uint64_t cr3;
uint64_t cr4;
uint64_t efer;
/*
* Using VCPU_HVM_MODE_64B implies that the vCPU is launched
* directly in long mode, so the cached parts of the segment
* registers get set to match that environment.
*
* If the user wants to launch the vCPU in compatibility mode
* the 32-bit structure should be used instead.
*/
};
struct vcpu_hvm_context {
#define VCPU_HVM_MODE_32B 0 /* 32bit fields of the structure will be used. */
#define VCPU_HVM_MODE_64B 1 /* 64bit fields of the structure will be used. */
uint32_t mode;
uint32_t pad;
/* CPU registers. */
union {
struct vcpu_hvm_x86_32 x86_32;
struct vcpu_hvm_x86_64 x86_64;
} cpu_regs;
};
typedef struct vcpu_hvm_context vcpu_hvm_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_hvm_context_t);
#endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -69,8 +69,15 @@
#define HVM_XS_SYSTEM_PRODUCT_NAME "bios-strings/system-product-name"
#define HVM_XS_SYSTEM_VERSION "bios-strings/system-version"
#define HVM_XS_SYSTEM_SERIAL_NUMBER "bios-strings/system-serial-number"
#define HVM_XS_BASEBOARD_MANUFACTURER "bios-strings/baseboard-manufacturer"
#define HVM_XS_BASEBOARD_PRODUCT_NAME "bios-strings/baseboard-product-name"
#define HVM_XS_BASEBOARD_VERSION "bios-strings/baseboard-version"
#define HVM_XS_BASEBOARD_SERIAL_NUMBER "bios-strings/baseboard-serial-number"
#define HVM_XS_BASEBOARD_ASSET_TAG "bios-strings/baseboard-asset-tag"
#define HVM_XS_BASEBOARD_LOCATION_IN_CHASSIS "bios-strings/baseboard-location-in-chassis"
#define HVM_XS_ENCLOSURE_MANUFACTURER "bios-strings/enclosure-manufacturer"
#define HVM_XS_ENCLOSURE_SERIAL_NUMBER "bios-strings/enclosure-serial-number"
#define HVM_XS_ENCLOSURE_ASSET_TAG "bios-strings/enclosure-asset-tag"
#define HVM_XS_BATTERY_MANUFACTURER "bios-strings/battery-manufacturer"
#define HVM_XS_BATTERY_DEVICE_NAME "bios-strings/battery-device-name"

View File

@ -1,7 +1,7 @@
/*
* ioreq.h: I/O request definitions for device models
* Copyright (c) 2004, Intel Corporation.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -45,7 +45,7 @@
*
* For I/O type IOREQ_TYPE_PCI_CONFIG, the physical address is formatted
* as follows:
*
*
* 63....48|47..40|39..35|34..32|31........0
* SEGMENT |BUS |DEV |FN |OFFSET
*/
@ -57,7 +57,7 @@ struct ioreq {
uint32_t vp_eport; /* evtchn for notifications to/from device model */
uint16_t _pad0;
uint8_t state:4;
uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
* of the real data to use. */
uint8_t dir:1; /* 1=read, 0=write */
uint8_t df:1;
@ -99,18 +99,23 @@ struct buffered_iopage {
typedef struct buffered_iopage buffered_iopage_t;
/*
* ACPI Control/Event register locations. Location is controlled by a
* ACPI Control/Event register locations. Location is controlled by a
* version number in HVM_PARAM_ACPI_IOPORTS_LOCATION.
*/
/* Version 0 (default): Traditional Xen locations. */
/*
* Version 0 (default): Traditional (obsolete) Xen locations.
*
* These are now only used for compatibility with VMs migrated
* from older Xen versions.
*/
#define ACPI_PM1A_EVT_BLK_ADDRESS_V0 0x1f40
#define ACPI_PM1A_CNT_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x04)
#define ACPI_PM_TMR_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x08)
#define ACPI_GPE0_BLK_ADDRESS_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0 + 0x20)
#define ACPI_GPE0_BLK_LEN_V0 0x08
/* Version 1: Locations preferred by modern Qemu. */
/* Version 1: Locations preferred by modern Qemu (including Qemu-trad). */
#define ACPI_PM1A_EVT_BLK_ADDRESS_V1 0xb000
#define ACPI_PM1A_CNT_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x04)
#define ACPI_PM_TMR_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x08)
@ -124,6 +129,7 @@ typedef struct buffered_iopage buffered_iopage_t;
#define ACPI_GPE0_BLK_ADDRESS ACPI_GPE0_BLK_ADDRESS_V0
#define ACPI_GPE0_BLK_LEN ACPI_GPE0_BLK_LEN_V0
#endif /* _IOREQ_H_ */
/*

View File

@ -25,22 +25,66 @@
#include "hvm_op.h"
/* These parameters are deprecated and their meaning is undefined. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#define HVM_PARAM_PAE_ENABLED 4
#define HVM_PARAM_DM_DOMAIN 13
#define HVM_PARAM_MEMORY_EVENT_CR0 20
#define HVM_PARAM_MEMORY_EVENT_CR3 21
#define HVM_PARAM_MEMORY_EVENT_CR4 22
#define HVM_PARAM_MEMORY_EVENT_INT3 23
#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
#define HVM_PARAM_MEMORY_EVENT_MSR 30
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
/*
* Parameter space for HVMOP_{set,get}_param.
*/
#define HVM_PARAM_CALLBACK_IRQ 0
#define HVM_PARAM_CALLBACK_IRQ_TYPE_MASK xen_mk_ullong(0xFF00000000000000)
/*
* How should CPU0 event-channel notifications be delivered?
* val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
* val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
* Domain = val[47:32], Bus = val[31:16],
* DevFn = val[15: 8], IntX = val[ 1: 0]
* val[63:56] == 2: val[7:0] is a vector number, check for
* XENFEAT_hvm_callback_vector to know if this delivery
* method is available.
*
* If val == 0 then CPU0 event-channel notifications are not delivered.
* If val != 0, val[63:56] encodes the type, as follows:
*/
#define HVM_PARAM_CALLBACK_IRQ 0
#define HVM_PARAM_CALLBACK_TYPE_GSI 0
/*
* val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0,
* and disables all notifications.
*/
#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1
/*
* val[55:0] is a delivery PCI INTx line:
* Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0]
*/
#if defined(__i386__) || defined(__x86_64__)
#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2
/*
* val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know
* if this delivery method is available.
*/
#elif defined(__arm__) || defined(__aarch64__)
#define HVM_PARAM_CALLBACK_TYPE_PPI 2
/*
* val[55:16] needs to be zero.
* val[15:8] is interrupt flag of the PPI used by event-channel:
* bit 8: the PPI is edge(1) or level(0) triggered
* bit 9: the PPI is active low(1) or high(0)
* val[7:0] is a PPI number used by event-channel.
* This is only used by ARM/ARM64 and masking/eoi the interrupt associated to
* the notification is handled by the interrupt controller.
*/
#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK 0xFF00
#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL 2
#endif
/*
* These are not used by Xen. They are here for convenience of HVM-guest
@ -49,12 +93,9 @@
#define HVM_PARAM_STORE_PFN 1
#define HVM_PARAM_STORE_EVTCHN 2
#define HVM_PARAM_PAE_ENABLED 4
#define HVM_PARAM_IOREQ_PFN 5
#define HVM_PARAM_BUFIOREQ_PFN 6
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
#if defined(__i386__) || defined(__x86_64__)
@ -98,11 +139,41 @@
#define _HVMPV_reference_tsc 3
#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc)
/* Use Hypercall for remote TLB flush */
#define _HVMPV_hcall_remote_tlb_flush 4
#define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush)
/* Use APIC assist */
#define _HVMPV_apic_assist 5
#define HVMPV_apic_assist (1 << _HVMPV_apic_assist)
/* Enable crash MSRs */
#define _HVMPV_crash_ctl 6
#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
/* Enable SYNIC MSRs */
#define _HVMPV_synic 7
#define HVMPV_synic (1 << _HVMPV_synic)
/* Enable STIMER MSRs */
#define _HVMPV_stimer 8
#define HVMPV_stimer (1 << _HVMPV_stimer)
/* Use Synthetic Cluster IPI Hypercall */
#define _HVMPV_hcall_ipi 9
#define HVMPV_hcall_ipi (1 << _HVMPV_hcall_ipi)
#define HVMPV_feature_mask \
(HVMPV_base_freq | \
HVMPV_no_freq | \
HVMPV_time_ref_count | \
HVMPV_reference_tsc)
(HVMPV_base_freq | \
HVMPV_no_freq | \
HVMPV_time_ref_count | \
HVMPV_reference_tsc | \
HVMPV_hcall_remote_tlb_flush | \
HVMPV_apic_assist | \
HVMPV_crash_ctl | \
HVMPV_synic | \
HVMPV_stimer | \
HVMPV_hcall_ipi)
#endif
@ -137,9 +208,6 @@
/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
#define HVM_PARAM_IDENT_PT 12
/* Device Model domain, defaults to 0. */
#define HVM_PARAM_DM_DOMAIN 13
/* ACPI S state: currently support S0 and S3 on x86. */
#define HVM_PARAM_ACPI_S_STATE 14
@ -164,14 +232,6 @@
*/
#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
/* Deprecated */
#define HVM_PARAM_MEMORY_EVENT_CR0 20
#define HVM_PARAM_MEMORY_EVENT_CR3 21
#define HVM_PARAM_MEMORY_EVENT_CR4 22
#define HVM_PARAM_MEMORY_EVENT_INT3 23
#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
#define HVM_PARAM_MEMORY_EVENT_MSR 30
/* Boolean: Enable nestedhvm (hvm only) */
#define HVM_PARAM_NESTEDHVM 24
@ -189,9 +249,57 @@
/* Location of the VM Generation ID in guest physical address space. */
#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
/* Boolean: Enable altp2m */
/*
* Set mode for altp2m:
* disabled: don't activate altp2m (default)
* mixed: allow access to all altp2m ops for both in-guest and external tools
* external: allow access to external privileged tools only
* limited: guest only has limited access (ie. control VMFUNC and #VE)
*
* Note that 'mixed' mode has not been evaluated for safety from a
* security perspective. Before using this mode in a
* security-critical environment, each subop should be evaluated for
* safety, with unsafe subops blacklisted in XSM.
*/
#define HVM_PARAM_ALTP2M 35
#define XEN_ALTP2M_disabled 0
#define XEN_ALTP2M_mixed 1
#define XEN_ALTP2M_external 2
#define XEN_ALTP2M_limited 3
#define HVM_NR_PARAMS 36
/*
* Size of the x87 FPU FIP/FDP registers that the hypervisor needs to
* save/restore. This is a workaround for a hardware limitation that
* does not allow the full FIP/FDP and FCS/FDS to be restored.
*
* Valid values are:
*
* 8: save/restore 64-bit FIP/FDP and clear FCS/FDS (default if CPU
* has FPCSDS feature).
*
* 4: save/restore 32-bit FIP/FDP, FCS/FDS, and clear upper 32-bits of
* FIP/FDP.
*
* 0: allow hypervisor to choose based on the value of FIP/FDP
* (default if CPU does not have FPCSDS).
*
* If FPCSDS (bit 13 in CPUID leaf 0x7, subleaf 0x0) is set, the CPU
* never saves FCS/FDS and this parameter should be left at the
* default of 8.
*/
#define HVM_PARAM_X87_FIP_WIDTH 36
/*
* TSS (and its size) used on Intel when CR0.PE=0. The address occupies
* the low 32 bits, while the size is in the high 32 ones.
*/
#define HVM_PARAM_VM86_TSS_SIZED 37
/* Enable MCA capabilities. */
#define HVM_PARAM_MCA_CAP 38
#define XEN_HVM_MCA_CAP_LMCE (xen_mk_ullong(1) << 0)
#define XEN_HVM_MCA_CAP_MASK XEN_HVM_MCA_CAP_LMCE
#define HVM_NR_PARAMS 39
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */

View File

@ -1,7 +1,7 @@
/*
* pvdrivers.h: Register of PV drivers product numbers.
* Copyright (c) 2012, Citrix Systems Inc.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -26,9 +26,9 @@
/*
* This is the master registry of product numbers for
* PV drivers.
* PV drivers.
* If you need a new product number allocating, please
* post to xen-devel@lists.xensource.com. You should NOT use
* post to xen-devel@lists.xenproject.org. You should NOT use
* a product number without allocating one.
* If you maintain a separate versioning and distribution path
* for PV drivers you should have a separate product number so

View File

@ -1,9 +1,9 @@
/*
/*
* hvm/save.h
*
* Structure definitions for HVM state that is held by Xen and must
* be saved along with the domain's memory and device-model state.
*
*
* Copyright (c) 2007 XenSource Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@ -29,13 +29,13 @@
#define __XEN_PUBLIC_HVM_SAVE_H__
/*
* Structures in this header *must* have the same layout in 32bit
* and 64bit environments: this means that all fields must be explicitly
* sized types and aligned to their sizes, and the structs must be
* Structures in this header *must* have the same layout in 32bit
* and 64bit environments: this means that all fields must be explicitly
* sized types and aligned to their sizes, and the structs must be
* a multiple of eight bytes long.
*
* Only the state necessary for saving and restoring (i.e. fields
* that are analogous to actual hardware state) should go in this file.
* Only the state necessary for saving and restoring (i.e. fields
* that are analogous to actual hardware state) should go in this file.
* Internal mechanisms should be kept in Xen-private headers.
*/
@ -43,7 +43,7 @@
#error "Anonymous structs/unions are a GNU extension."
#endif
/*
/*
* Each entry is preceded by a descriptor giving its type and length
*/
struct hvm_save_descriptor {
@ -52,9 +52,10 @@ struct hvm_save_descriptor {
uint32_t length; /* In bytes, *not* including this descriptor */
};
/*
* Each entry has a datatype associated with it: for example, the CPU state
* is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU),
/*
* Each entry has a datatype associated with it: for example, the CPU state
* is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU),
* and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU).
* DECLARE_HVM_SAVE_TYPE binds these things together with some type-system
* ugliness.
@ -62,21 +63,23 @@ struct hvm_save_descriptor {
#ifdef __XEN__
# define DECLARE_HVM_SAVE_TYPE_COMPAT(_x, _code, _type, _ctype, _fix) \
static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h) { return _fix(h); } \
struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];}; \
struct __HVM_SAVE_TYPE_COMPAT_##_x { _ctype t; }
static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h, uint32_t size) \
{ return _fix(h, size); } \
struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];}; \
struct __HVM_SAVE_TYPE_COMPAT_##_x { _ctype t; }
# include <xen/lib.h> /* BUG() */
# define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h) { BUG(); return -1; } \
static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h, uint32_t size) \
{ BUG(); return -1; } \
struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}; \
struct __HVM_SAVE_TYPE_COMPAT_##_x { _type t; }
struct __HVM_SAVE_TYPE_COMPAT_##_x { _type t; }
#else
# define DECLARE_HVM_SAVE_TYPE_COMPAT(_x, _code, _type, _ctype, _fix) \
struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];}
struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];}
# define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}
struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}
#endif
#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
@ -88,11 +91,11 @@ struct hvm_save_descriptor {
# define HVM_SAVE_LENGTH_COMPAT(_x) (sizeof (HVM_SAVE_TYPE_COMPAT(_x)))
# define HVM_SAVE_HAS_COMPAT(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->cpt)-1)
# define HVM_SAVE_FIX_COMPAT(_x, _dst) __HVM_SAVE_FIX_COMPAT_##_x(_dst)
# define HVM_SAVE_FIX_COMPAT(_x, _dst, _size) __HVM_SAVE_FIX_COMPAT_##_x(_dst, _size)
#endif
/*
* The series of save records is teminated by a zero-type, zero-length
/*
* The series of save records is teminated by a zero-type, zero-length
* descriptor.
*/

129
sys/xen/interface/hypfs.h Normal file
View File

@ -0,0 +1,129 @@
/******************************************************************************
* Xen Hypervisor Filesystem
*
* Copyright (c) 2019, SUSE Software Solutions Germany GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __XEN_PUBLIC_HYPFS_H__
#define __XEN_PUBLIC_HYPFS_H__
#include "xen.h"
/*
* Definitions for the __HYPERVISOR_hypfs_op hypercall.
*/
/* Highest version number of the hypfs interface currently defined. */
#define XEN_HYPFS_VERSION 1
/* Maximum length of a path in the filesystem. */
#define XEN_HYPFS_MAX_PATHLEN 1024
struct xen_hypfs_direntry {
uint8_t type;
#define XEN_HYPFS_TYPE_DIR 0
#define XEN_HYPFS_TYPE_BLOB 1
#define XEN_HYPFS_TYPE_STRING 2
#define XEN_HYPFS_TYPE_UINT 3
#define XEN_HYPFS_TYPE_INT 4
#define XEN_HYPFS_TYPE_BOOL 5
uint8_t encoding;
#define XEN_HYPFS_ENC_PLAIN 0
#define XEN_HYPFS_ENC_GZIP 1
uint16_t pad; /* Returned as 0. */
uint32_t content_len; /* Current length of data. */
uint32_t max_write_len; /* Max. length for writes (0 if read-only). */
};
struct xen_hypfs_dirlistentry {
struct xen_hypfs_direntry e;
/* Offset in bytes to next entry (0 == this is the last entry). */
uint16_t off_next;
/* Zero terminated entry name, possibly with some padding for alignment. */
char name[XEN_FLEX_ARRAY_DIM];
};
/*
* Hypercall operations.
*/
/*
* XEN_HYPFS_OP_get_version
*
* Read highest interface version supported by the hypervisor.
*
* arg1 - arg4: all 0/NULL
*
* Possible return values:
* >0: highest supported interface version
* <0: negative Xen errno value
*/
#define XEN_HYPFS_OP_get_version 0
/*
* XEN_HYPFS_OP_read
*
* Read a filesystem entry.
*
* Returns the direntry and contents of an entry in the buffer supplied by the
* caller (struct xen_hypfs_direntry with the contents following directly
* after it).
* The data buffer must be at least the size of the direntry returned. If the
* data buffer was not large enough for all the data -ENOBUFS and no entry
* data is returned, but the direntry will contain the needed size for the
* returned data.
* The format of the contents is according to its entry type and encoding.
* The contents of a directory are multiple struct xen_hypfs_dirlistentry
* items.
*
* arg1: XEN_GUEST_HANDLE(path name)
* arg2: length of path name (including trailing zero byte)
* arg3: XEN_GUEST_HANDLE(data buffer written by hypervisor)
* arg4: data buffer size
*
* Possible return values:
* 0: success
* <0 : negative Xen errno value
*/
#define XEN_HYPFS_OP_read 1
/*
* XEN_HYPFS_OP_write_contents
*
* Write contents of a filesystem entry.
*
* Writes an entry with the contents of a buffer supplied by the caller.
* The data type and encoding can't be changed. The size can be changed only
* for blobs and strings.
*
* arg1: XEN_GUEST_HANDLE(path name)
* arg2: length of path name (including trailing zero byte)
* arg3: XEN_GUEST_HANDLE(content buffer read by hypervisor)
* arg4: content buffer size
*
* Possible return values:
* 0: success
* <0 : negative Xen errno value
*/
#define XEN_HYPFS_OP_write_contents 2
#endif /* __XEN_PUBLIC_HYPFS_H__ */

View File

@ -0,0 +1,49 @@
/*
* 9pfs.h -- Xen 9PFS transport
*
* Refer to docs/misc/9pfs.markdown for the specification
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
*/
#ifndef __XEN_PUBLIC_IO_9PFS_H__
#define __XEN_PUBLIC_IO_9PFS_H__
#include "../grant_table.h"
#include "ring.h"
/*
* See docs/misc/9pfs.markdown in xen.git for the full specification:
* https://xenbits.xen.org/docs/unstable/misc/9pfs.html
*/
DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs);
#endif
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -89,15 +89,23 @@
* Values: string
*
* A free formatted string providing sufficient information for the
* backend driver to open the backing device. (e.g. the path to the
* file or block device representing the backing store.)
* hotplug script to attach the device and provide a suitable
* handler (ie: a block device) for blkback to use.
*
* physical-device
* Values: "MAJOR:MINOR"
* Notes: 11
*
* MAJOR and MINOR are the major number and minor number of the
* backing device respectively.
*
* physical-device-path
* Values: path string
*
* A string that contains the absolute path to the disk image. On
* NetBSD and Linux this is always a block device, while on FreeBSD
* it can be either a block device or a regular file.
*
* type
* Values: "file", "phy", "tap"
*
@ -110,7 +118,7 @@
*
* The underlying storage is not affected by the direct IO memory
* lifetime bug. See:
* http://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
* https://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
*
* Therefore this option gives the backend permission to use
* O_DIRECT, notwithstanding that bug.
@ -208,10 +216,9 @@
* Default Value: 1
*
* This optional property, set by the toolstack, instructs the backend
* to offer discard to the frontend. If the property is missing the
* backend should offer discard if the backing storage actually supports
* it. This optional property, set by the toolstack, requests that the
* backend offer, or not offer, discard to the frontend.
* to offer (or not to offer) discard to the frontend. If the property
* is missing the backend should offer discard if the backing storage
* actually supports it.
*
* discard-alignment
* Values: <uint32_t>
@ -247,18 +254,26 @@
* sector-size
* Values: <uint32_t>
*
* The logical sector size, in bytes, of the backend device.
* The logical block size, in bytes, of the underlying storage. This
* must be a power of two with a minimum value of 512.
*
* NOTE: Because of implementation bugs in some frontends this must be
* set to 512, unless the frontend advertizes a non-zero value
* in its "feature-large-sector-size" xenbus node. (See below).
*
* physical-sector-size
* Values: <uint32_t>
* Default Value: <"sector-size">
*
* The physical sector size, in bytes, of the backend device.
* The physical block size, in bytes, of the backend storage. This
* must be an integer multiple of "sector-size".
*
* sectors
* Values: <uint64_t>
*
* The size of the backend device, expressed in units of its logical
* sector size ("sector-size").
* The size of the backend device, expressed in units of "sector-size".
* The product of "sector-size" and "sectors" must also be an integer
* multiple of "physical-sector-size", if that node is present.
*
*****************************************************************************
* Frontend XenBus Nodes
@ -314,6 +329,8 @@
* The size of the frontend allocated request ring buffer in units of
* machine pages. The value must be a power of 2.
*
*--------------------------------- Features ---------------------------------
*
* feature-persistent
* Values: 0/1 (boolean)
* Default Value: 0
@ -324,7 +341,7 @@
* access (even when it should be read-only). If the frontend hits the
* maximum number of allowed persistently mapped grants, it can fallback
* to non persistent mode. This will cause a performance degradation,
* since the backend driver will still try to map those grants
* since the the backend driver will still try to map those grants
* persistently. Since the persistent grants protocol is compatible with
* the previous protocol, a frontend driver can choose to work in
* persistent mode even when the backend doesn't support it.
@ -335,6 +352,17 @@
* decides to limit the maximum number of persistently mapped grants
* to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
*
* feature-large-sector-size
* Values: 0/1 (boolean)
* Default Value: 0
*
* A value of "1" indicates that the frontend will correctly supply and
* interpret all sector-based quantities in terms of the "sector-size"
* value supplied in the backend info, whatever that may be set to.
* If this node is not present or its value is "0" then it is assumed
* that the frontend requires that the logical block size is 512 as it
* is hardcoded (which is the case in some frontend implementations).
*
*------------------------- Virtual Device Properties -------------------------
*
* device-type
@ -391,6 +419,55 @@
* than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
*(10) The discard-secure property may be present and will be set to 1 if the
* backing device supports secure discard.
*(11) Only used by Linux and NetBSD.
*/
/*
* Multiple hardware queues/rings:
* If supported, the backend will write the key "multi-queue-max-queues" to
* the directory for that vbd, and set its value to the maximum supported
* number of queues.
* Frontends that are aware of this feature and wish to use it can write the
* key "multi-queue-num-queues" with the number they wish to use, which must be
* greater than zero, and no more than the value reported by the backend in
* "multi-queue-max-queues".
*
* For frontends requesting just one queue, the usual event-channel and
* ring-ref keys are written as before, simplifying the backend processing
* to avoid distinguishing between a frontend that doesn't understand the
* multi-queue feature, and one that does, but requested only one queue.
*
* Frontends requesting two or more queues must not write the toplevel
* event-channel and ring-ref keys, instead writing those keys under sub-keys
* having the name "queue-N" where N is the integer ID of the queue/ring for
* which those keys belong. Queues are indexed from zero.
* For example, a frontend with two queues must write the following set of
* queue-related keys:
*
* /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
* /local/domain/1/device/vbd/0/queue-0 = ""
* /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
* /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
* /local/domain/1/device/vbd/0/queue-1 = ""
* /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
* /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
*
* It is also possible to use multiple queues/rings together with
* feature multi-page ring buffer.
* For example, a frontend requests two queues/rings and the size of each ring
* buffer is two pages must write the following set of related keys:
*
* /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
* /local/domain/1/device/vbd/0/ring-page-order = "1"
* /local/domain/1/device/vbd/0/queue-0 = ""
* /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
* /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
* /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
* /local/domain/1/device/vbd/0/queue-1 = ""
* /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
* /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
* /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
*
*/
/*
@ -551,12 +628,11 @@
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
/*
* NB. first_sect and last_sect in blkif_request_segment, as well as
* sector_number in blkif_request, are always expressed in 512-byte units.
* However they must be properly aligned to the real sector size of the
* physical disk, which is reported in the "physical-sector-size" node in
* the backend xenbus info. Also the xenbus "sectors" node is expressed in
* 512-byte units.
* NB. 'first_sect' and 'last_sect' in blkif_request_segment, as well as
* 'sector_number' in blkif_request, blkif_request_discard and
* blkif_request_indirect are sector-based quantities. See the description
* of the "feature-large-sector-size" frontend xenbus node above for
* more information.
*/
struct blkif_request_segment {
grant_ref_t gref; /* reference to I/O buffer frame */

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
/******************************************************************************
* console.h
*
*
* Console I/O interface for Xen guest OSes.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -38,6 +38,11 @@ struct xencons_interface {
XENCONS_RING_IDX out_cons, out_prod;
};
#ifdef XEN_WANT_FLEX_CONSOLE_RING
#include "ring.h"
DEFINE_XEN_FLEX_RING(xencons);
#endif
#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
/*

View File

@ -0,0 +1,872 @@
/******************************************************************************
* displif.h
*
* Unified display device I/O interface for Xen guest OSes.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2016-2017 EPAM Systems Inc.
*
* Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
* Oleksandr Grytsov <oleksandr_grytsov@epam.com>
*/
#ifndef __XEN_PUBLIC_IO_DISPLIF_H__
#define __XEN_PUBLIC_IO_DISPLIF_H__
#include "ring.h"
#include "../grant_table.h"
/*
******************************************************************************
* Protocol version
******************************************************************************
*/
#define XENDISPL_PROTOCOL_VERSION "1"
/*
******************************************************************************
* Main features provided by the protocol
******************************************************************************
* This protocol aims to provide a unified protocol which fits more
* sophisticated use-cases than a framebuffer device can handle. At the
* moment basic functionality is supported with the intention to be extended:
* o multiple dynamically allocated/destroyed framebuffers
* o buffers of arbitrary sizes
* o buffer allocation at either back or front end
* o better configuration options including multiple display support
*
* Note: existing fbif can be used together with displif running at the
* same time, e.g. on Linux one provides framebuffer and another DRM/KMS
*
* Note: display resolution (XenStore's "resolution" property) defines
* visible area of the virtual display. At the same time resolution of
* the display and frame buffers may differ: buffers can be smaller, equal
* or bigger than the visible area. This is to enable use-cases, where backend
* may do some post-processing of the display and frame buffers supplied,
* e.g. those buffers can be just a part of the final composition.
*
******************************************************************************
* Direction of improvements
******************************************************************************
* Future extensions to the existing protocol may include:
* o display/connector cloning
* o allocation of objects other than display buffers
* o plane/overlay support
* o scaling support
* o rotation support
*
******************************************************************************
* Feature and Parameter Negotiation
******************************************************************************
*
* Front->back notifications: when enqueuing a new request, sending a
* notification can be made conditional on xendispl_req (i.e., the generic
* hold-off mechanism provided by the ring macros). Backends must set
* xendispl_req appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
*
* Back->front notifications: when enqueuing a new response, sending a
* notification can be made conditional on xendispl_resp (i.e., the generic
* hold-off mechanism provided by the ring macros). Frontends must set
* xendispl_resp appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
*
* The two halves of a para-virtual display driver utilize nodes within
* XenStore to communicate capabilities and to negotiate operating parameters.
* This section enumerates these nodes which reside in the respective front and
* backend portions of XenStore, following the XenBus convention.
*
* All data in XenStore is stored as strings. Nodes specifying numeric
* values are encoded in decimal. Integer value ranges listed below are
* expressed as fixed sized integer types capable of storing the conversion
* of a properly formated node string, without loss of information.
*
******************************************************************************
* Example configuration
******************************************************************************
*
* Note: depending on the use-case backend can expose more display connectors
* than the underlying HW physically has by employing SW graphics compositors
*
* This is an example of backend and frontend configuration:
*
*--------------------------------- Backend -----------------------------------
*
* /local/domain/0/backend/vdispl/1/0/frontend-id = "1"
* /local/domain/0/backend/vdispl/1/0/frontend = "/local/domain/1/device/vdispl/0"
* /local/domain/0/backend/vdispl/1/0/state = "4"
* /local/domain/0/backend/vdispl/1/0/versions = "1,2"
*
*--------------------------------- Frontend ----------------------------------
*
* /local/domain/1/device/vdispl/0/backend-id = "0"
* /local/domain/1/device/vdispl/0/backend = "/local/domain/0/backend/vdispl/1/0"
* /local/domain/1/device/vdispl/0/state = "4"
* /local/domain/1/device/vdispl/0/version = "1"
* /local/domain/1/device/vdispl/0/be-alloc = "1"
*
*-------------------------- Connector 0 configuration ------------------------
*
* /local/domain/1/device/vdispl/0/0/resolution = "1920x1080"
* /local/domain/1/device/vdispl/0/0/req-ring-ref = "2832"
* /local/domain/1/device/vdispl/0/0/req-event-channel = "15"
* /local/domain/1/device/vdispl/0/0/evt-ring-ref = "387"
* /local/domain/1/device/vdispl/0/0/evt-event-channel = "16"
*
*-------------------------- Connector 1 configuration ------------------------
*
* /local/domain/1/device/vdispl/0/1/resolution = "800x600"
* /local/domain/1/device/vdispl/0/1/req-ring-ref = "2833"
* /local/domain/1/device/vdispl/0/1/req-event-channel = "17"
* /local/domain/1/device/vdispl/0/1/evt-ring-ref = "388"
* /local/domain/1/device/vdispl/0/1/evt-event-channel = "18"
*
******************************************************************************
* Backend XenBus Nodes
******************************************************************************
*
*----------------------------- Protocol version ------------------------------
*
* versions
* Values: <string>
*
* List of XENDISPL_LIST_SEPARATOR separated protocol versions supported
* by the backend. For example "1,2,3".
*
******************************************************************************
* Frontend XenBus Nodes
******************************************************************************
*
*-------------------------------- Addressing ---------------------------------
*
* dom-id
* Values: <uint16_t>
*
* Domain identifier.
*
* dev-id
* Values: <uint16_t>
*
* Device identifier.
*
* conn-idx
* Values: <uint8_t>
*
* Zero based contigous index of the connector.
* /local/domain/<dom-id>/device/vdispl/<dev-id>/<conn-idx>/...
*
*----------------------------- Protocol version ------------------------------
*
* version
* Values: <string>
*
* Protocol version, chosen among the ones supported by the backend.
*
*------------------------- Backend buffer allocation -------------------------
*
* be-alloc
* Values: "0", "1"
*
* If value is set to "1", then backend can be a buffer provider/allocator
* for this domain during XENDISPL_OP_DBUF_CREATE operation (see below
* for negotiation).
* If value is not "1" or omitted frontend must allocate buffers itself.
*
*----------------------------- Connector settings ----------------------------
*
* unique-id
* Values: <string>
*
* After device instance initialization each connector is assigned a
* unique ID, so it can be identified by the backend by this ID.
* This can be UUID or such.
*
* resolution
* Values: <width, uint32_t>x<height, uint32_t>
*
* Width and height of the connector in pixels separated by
* XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the
* display.
*
*------------------ Connector Request Transport Parameters -------------------
*
* This communication path is used to deliver requests from frontend to backend
* and get the corresponding responses from backend to frontend,
* set up per connector.
*
* req-event-channel
* Values: <uint32_t>
*
* The identifier of the Xen connector's control event channel
* used to signal activity in the ring buffer.
*
* req-ring-ref
* Values: <uint32_t>
*
* The Xen grant reference granting permission for the backend to map
* a sole page of connector's control ring buffer.
*
*------------------- Connector Event Transport Parameters --------------------
*
* This communication path is used to deliver asynchronous events from backend
* to frontend, set up per connector.
*
* evt-event-channel
* Values: <uint32_t>
*
* The identifier of the Xen connector's event channel
* used to signal activity in the ring buffer.
*
* evt-ring-ref
* Values: <uint32_t>
*
* The Xen grant reference granting permission for the backend to map
* a sole page of connector's event ring buffer.
*/
/*
******************************************************************************
* STATE DIAGRAMS
******************************************************************************
*
* Tool stack creates front and back state nodes with initial state
* XenbusStateInitialising.
* Tool stack creates and sets up frontend display configuration
* nodes per domain.
*
*-------------------------------- Normal flow --------------------------------
*
* Front Back
* ================================= =====================================
* XenbusStateInitialising XenbusStateInitialising
* o Query backend device identification
* data.
* o Open and validate backend device.
* |
* |
* V
* XenbusStateInitWait
*
* o Query frontend configuration
* o Allocate and initialize
* event channels per configured
* connector.
* o Publish transport parameters
* that will be in effect during
* this connection.
* |
* |
* V
* XenbusStateInitialised
*
* o Query frontend transport parameters.
* o Connect to the event channels.
* |
* |
* V
* XenbusStateConnected
*
* o Create and initialize OS
* virtual display connectors
* as per configuration.
* |
* |
* V
* XenbusStateConnected
*
* XenbusStateUnknown
* XenbusStateClosed
* XenbusStateClosing
* o Remove virtual display device
* o Remove event channels
* |
* |
* V
* XenbusStateClosed
*
*------------------------------- Recovery flow -------------------------------
*
* In case of frontend unrecoverable errors backend handles that as
* if frontend goes into the XenbusStateClosed state.
*
* In case of backend unrecoverable errors frontend tries removing
* the virtualized device. If this is possible at the moment of error,
* then frontend goes into the XenbusStateInitialising state and is ready for
* new connection with backend. If the virtualized device is still in use and
* cannot be removed, then frontend goes into the XenbusStateReconfiguring state
* until either the virtualized device is removed or backend initiates a new
* connection. On the virtualized device removal frontend goes into the
* XenbusStateInitialising state.
*
* Note on XenbusStateReconfiguring state of the frontend: if backend has
* unrecoverable errors then frontend cannot send requests to the backend
* and thus cannot provide functionality of the virtualized device anymore.
* After backend is back to normal the virtualized device may still hold some
* state: configuration in use, allocated buffers, client application state etc.
* In most cases, this will require frontend to implement complex recovery
* reconnect logic. Instead, by going into XenbusStateReconfiguring state,
* frontend will make sure no new clients of the virtualized device are
* accepted, allow existing client(s) to exit gracefully by signaling error
* state etc.
* Once all the clients are gone frontend can reinitialize the virtualized
* device and get into XenbusStateInitialising state again signaling the
* backend that a new connection can be made.
*
* There are multiple conditions possible under which frontend will go from
* XenbusStateReconfiguring into XenbusStateInitialising, some of them are OS
* specific. For example:
* 1. The underlying OS framework may provide callbacks to signal that the last
* client of the virtualized device has gone and the device can be removed
* 2. Frontend can schedule a deferred work (timer/tasklet/workqueue)
* to periodically check if this is the right time to re-try removal of
* the virtualized device.
* 3. By any other means.
*
******************************************************************************
* REQUEST CODES
******************************************************************************
* Request codes [0; 15] are reserved and must not be used
*/
#define XENDISPL_OP_DBUF_CREATE 0x10
#define XENDISPL_OP_DBUF_DESTROY 0x11
#define XENDISPL_OP_FB_ATTACH 0x12
#define XENDISPL_OP_FB_DETACH 0x13
#define XENDISPL_OP_SET_CONFIG 0x14
#define XENDISPL_OP_PG_FLIP 0x15
/*
******************************************************************************
* EVENT CODES
******************************************************************************
*/
#define XENDISPL_EVT_PG_FLIP 0x00
/*
******************************************************************************
* XENSTORE FIELD AND PATH NAME STRINGS, HELPERS
******************************************************************************
*/
#define XENDISPL_DRIVER_NAME "vdispl"
#define XENDISPL_LIST_SEPARATOR ","
#define XENDISPL_RESOLUTION_SEPARATOR "x"
#define XENDISPL_FIELD_BE_VERSIONS "versions"
#define XENDISPL_FIELD_FE_VERSION "version"
#define XENDISPL_FIELD_REQ_RING_REF "req-ring-ref"
#define XENDISPL_FIELD_REQ_CHANNEL "req-event-channel"
#define XENDISPL_FIELD_EVT_RING_REF "evt-ring-ref"
#define XENDISPL_FIELD_EVT_CHANNEL "evt-event-channel"
#define XENDISPL_FIELD_RESOLUTION "resolution"
#define XENDISPL_FIELD_BE_ALLOC "be-alloc"
#define XENDISPL_FIELD_UNIQUE_ID "unique-id"
/*
******************************************************************************
* STATUS RETURN CODES
******************************************************************************
*
* Status return code is zero on success and -XEN_EXX on failure.
*
******************************************************************************
* Assumptions
******************************************************************************
* o usage of grant reference 0 as invalid grant reference:
* grant reference 0 is valid, but never exposed to a PV driver,
* because of the fact it is already in use/reserved by the PV console.
* o all references in this document to page sizes must be treated
* as pages of size XEN_PAGE_SIZE unless otherwise noted.
*
******************************************************************************
* Description of the protocol between frontend and backend driver
******************************************************************************
*
* The two halves of a Para-virtual display driver communicate with
* each other using shared pages and event channels.
* Shared page contains a ring with request/response packets.
*
* All reserved fields in the structures below must be 0.
* Display buffers's cookie of value 0 is treated as invalid.
* Framebuffer's cookie of value 0 is treated as invalid.
*
* For all request/response/event packets that use cookies:
* dbuf_cookie - uint64_t, unique to guest domain value used by the backend
* to map remote display buffer to its local one
* fb_cookie - uint64_t, unique to guest domain value used by the backend
* to map remote framebuffer to its local one
*
*---------------------------------- Requests ---------------------------------
*
* All requests/responses, which are not connector specific, must be sent over
* control ring of the connector which has the index value of 0:
* /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
*
* All request packets have the same length (64 octets)
* All request packets have common header:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | operation | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* id - uint16_t, private guest value, echoed in response
* operation - uint8_t, operation code, XENDISPL_OP_???
*
* Request dbuf creation - request creation of a display buffer.
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id |_OP_DBUF_CREATE | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | dbuf_cookie low 32-bit | 12
* +----------------+----------------+----------------+----------------+
* | dbuf_cookie high 32-bit | 16
* +----------------+----------------+----------------+----------------+
* | width | 20
* +----------------+----------------+----------------+----------------+
* | height | 24
* +----------------+----------------+----------------+----------------+
* | bpp | 28
* +----------------+----------------+----------------+----------------+
* | buffer_sz | 32
* +----------------+----------------+----------------+----------------+
* | flags | 36
* +----------------+----------------+----------------+----------------+
* | gref_directory | 40
* +----------------+----------------+----------------+----------------+
* | reserved | 44
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* Must be sent over control ring of the connector which has the index
* value of 0:
* /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
* All unused bits in flags field must be set to 0.
*
* An attempt to create multiple display buffers with the same dbuf_cookie is
* an error. dbuf_cookie can be re-used after destroying the corresponding
* display buffer.
*
* Width and height of the display buffers can be smaller, equal or bigger
* than the connector's resolution. Depth/pixel format of the individual
* buffers can differ as well.
*
* width - uint32_t, width in pixels
* height - uint32_t, height in pixels
* bpp - uint32_t, bits per pixel
* buffer_sz - uint32_t, buffer size to be allocated, octets
* flags - uint32_t, flags of the operation
* o XENDISPL_DBUF_FLG_REQ_ALLOC - if set, then backend is requested
* to allocate the buffer with the parameters provided in this request.
* Page directory is handled as follows:
* Frontend on request:
* o allocates pages for the directory (gref_directory,
* gref_dir_next_page(s)
* o grants permissions for the pages of the directory to the backend
* o sets gref_dir_next_page fields
* Backend on response:
* o grants permissions for the pages of the buffer allocated to
* the frontend
* o fills in page directory with grant references
* (gref[] in struct xendispl_page_directory)
* gref_directory - grant_ref_t, a reference to the first shared page
* describing shared buffer references. At least one page exists. If shared
* buffer size (buffer_sz) exceeds what can be addressed by this single page,
* then reference to the next page must be supplied (see gref_dir_next_page
* below)
*/
#define XENDISPL_DBUF_FLG_REQ_ALLOC (1 << 0)
struct xendispl_dbuf_create_req {
uint64_t dbuf_cookie;
uint32_t width;
uint32_t height;
uint32_t bpp;
uint32_t buffer_sz;
uint32_t flags;
grant_ref_t gref_directory;
};
/*
* Shared page for XENDISPL_OP_DBUF_CREATE buffer descriptor (gref_directory in
* the request) employs a list of pages, describing all pages of the shared
* data buffer:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | gref_dir_next_page | 4
* +----------------+----------------+----------------+----------------+
* | gref[0] | 8
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | gref[i] | i*4+8
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | gref[N - 1] | N*4+8
* +----------------+----------------+----------------+----------------+
*
* gref_dir_next_page - grant_ref_t, reference to the next page describing
* page directory. Must be 0 if there are no more pages in the list.
* gref[i] - grant_ref_t, reference to a shared page of the buffer
* allocated at XENDISPL_OP_DBUF_CREATE
*
* Number of grant_ref_t entries in the whole page directory is not
* passed, but instead can be calculated as:
* num_grefs_total = (XENDISPL_OP_DBUF_CREATE.buffer_sz + XEN_PAGE_SIZE - 1) /
* XEN_PAGE_SIZE
*/
struct xendispl_page_directory {
grant_ref_t gref_dir_next_page;
grant_ref_t gref[1]; /* Variable length */
};
/*
* Request dbuf destruction - destroy a previously allocated display buffer:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id |_OP_DBUF_DESTROY| reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | dbuf_cookie low 32-bit | 12
* +----------------+----------------+----------------+----------------+
* | dbuf_cookie high 32-bit | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* Must be sent over control ring of the connector which has the index
* value of 0:
* /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
*/
struct xendispl_dbuf_destroy_req {
uint64_t dbuf_cookie;
};
/*
* Request framebuffer attachment - request attachment of a framebuffer to
* previously created display buffer.
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | _OP_FB_ATTACH | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | dbuf_cookie low 32-bit | 12
* +----------------+----------------+----------------+----------------+
* | dbuf_cookie high 32-bit | 16
* +----------------+----------------+----------------+----------------+
* | fb_cookie low 32-bit | 20
* +----------------+----------------+----------------+----------------+
* | fb_cookie high 32-bit | 24
* +----------------+----------------+----------------+----------------+
* | width | 28
* +----------------+----------------+----------------+----------------+
* | height | 32
* +----------------+----------------+----------------+----------------+
* | pixel_format | 36
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* Must be sent over control ring of the connector which has the index
* value of 0:
* /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
* Width and height can be smaller, equal or bigger than the connector's
* resolution.
*
* An attempt to create multiple frame buffers with the same fb_cookie is
* an error. fb_cookie can be re-used after destroying the corresponding
* frame buffer.
*
* width - uint32_t, width in pixels
* height - uint32_t, height in pixels
* pixel_format - uint32_t, pixel format of the framebuffer, FOURCC code
*/
struct xendispl_fb_attach_req {
uint64_t dbuf_cookie;
uint64_t fb_cookie;
uint32_t width;
uint32_t height;
uint32_t pixel_format;
};
/*
* Request framebuffer detach - detach a previously
* attached framebuffer from the display buffer in request:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | _OP_FB_DETACH | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | fb_cookie low 32-bit | 12
* +----------------+----------------+----------------+----------------+
* | fb_cookie high 32-bit | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* Must be sent over control ring of the connector which has the index
* value of 0:
* /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
*/
struct xendispl_fb_detach_req {
uint64_t fb_cookie;
};
/*
* Request configuration set/reset - request to set or reset
* the configuration/mode of the display:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | _OP_SET_CONFIG | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | fb_cookie low 32-bit | 12
* +----------------+----------------+----------------+----------------+
* | fb_cookie high 32-bit | 16
* +----------------+----------------+----------------+----------------+
* | x | 20
* +----------------+----------------+----------------+----------------+
* | y | 24
* +----------------+----------------+----------------+----------------+
* | width | 28
* +----------------+----------------+----------------+----------------+
* | height | 32
* +----------------+----------------+----------------+----------------+
* | bpp | 40
* +----------------+----------------+----------------+----------------+
* | reserved | 44
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* Pass all zeros to reset, otherwise command is treated as
* configuration set.
* Framebuffer's cookie defines which framebuffer/dbuf must be
* displayed while enabling display (applying configuration).
* x, y, width and height are bound by the connector's resolution and must not
* exceed it.
*
* x - uint32_t, starting position in pixels by X axis
* y - uint32_t, starting position in pixels by Y axis
* width - uint32_t, width in pixels
* height - uint32_t, height in pixels
* bpp - uint32_t, bits per pixel
*/
struct xendispl_set_config_req {
uint64_t fb_cookie;
uint32_t x;
uint32_t y;
uint32_t width;
uint32_t height;
uint32_t bpp;
};
/*
* Request page flip - request to flip a page identified by the framebuffer
* cookie:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | _OP_PG_FLIP | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | fb_cookie low 32-bit | 12
* +----------------+----------------+----------------+----------------+
* | fb_cookie high 32-bit | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*/
struct xendispl_page_flip_req {
uint64_t fb_cookie;
};
/*
*---------------------------------- Responses --------------------------------
*
* All response packets have the same length (64 octets)
*
* All response packets have common header:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | status | 8
* +----------------+----------------+----------------+----------------+
* | reserved | 12
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*
* id - uint16_t, private guest value, echoed from request
* status - int32_t, response status, zero on success and -XEN_EXX on failure
*
*----------------------------------- Events ----------------------------------
*
* Events are sent via a shared page allocated by the front and propagated by
* evt-event-channel/evt-ring-ref XenStore entries
* All event packets have the same length (64 octets)
* All event packets have common header:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | type | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
*
* id - uint16_t, event id, may be used by front
* type - uint8_t, type of the event
*
*
* Page flip complete event - event from back to front on page flip completed:
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | id | _EVT_PG_FLIP | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | fb_cookie low 32-bit | 12
* +----------------+----------------+----------------+----------------+
* | fb_cookie high 32-bit | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 64
* +----------------+----------------+----------------+----------------+
*/
struct xendispl_pg_flip_evt {
uint64_t fb_cookie;
};
struct xendispl_req {
uint16_t id;
uint8_t operation;
uint8_t reserved[5];
union {
struct xendispl_dbuf_create_req dbuf_create;
struct xendispl_dbuf_destroy_req dbuf_destroy;
struct xendispl_fb_attach_req fb_attach;
struct xendispl_fb_detach_req fb_detach;
struct xendispl_set_config_req set_config;
struct xendispl_page_flip_req pg_flip;
uint8_t reserved[56];
} op;
};
struct xendispl_resp {
uint16_t id;
uint8_t operation;
uint8_t reserved;
int32_t status;
uint8_t reserved1[56];
};
struct xendispl_evt {
uint16_t id;
uint8_t type;
uint8_t reserved[5];
union {
struct xendispl_pg_flip_evt pg_flip;
uint8_t reserved[56];
} op;
};
DEFINE_RING_TYPES(xen_displif, struct xendispl_req, struct xendispl_resp);
/*
******************************************************************************
* Back to front events delivery
******************************************************************************
* In order to deliver asynchronous events from back to front a shared page is
* allocated by front and its granted reference propagated to back via
* XenStore entries (evt-ring-ref/evt-event-channel).
* This page has a common header used by both front and back to synchronize
* access and control event's ring buffer, while back being a producer of the
* events and front being a consumer. The rest of the page after the header
* is used for event packets.
*
* Upon reception of an event(s) front may confirm its reception
* for either each event, group of events or none.
*/
struct xendispl_event_page {
uint32_t in_cons;
uint32_t in_prod;
uint8_t reserved[56];
};
#define XENDISPL_EVENT_PAGE_SIZE 4096
#define XENDISPL_IN_RING_OFFS (sizeof(struct xendispl_event_page))
#define XENDISPL_IN_RING_SIZE (XENDISPL_EVENT_PAGE_SIZE - XENDISPL_IN_RING_OFFS)
#define XENDISPL_IN_RING_LEN (XENDISPL_IN_RING_SIZE / sizeof(struct xendispl_evt))
#define XENDISPL_IN_RING(page) \
((struct xendispl_evt *)((char *)(page) + XENDISPL_IN_RING_OFFS))
#define XENDISPL_IN_RING_REF(page, idx) \
(XENDISPL_IN_RING((page))[(idx) % XENDISPL_IN_RING_LEN])
#endif /* __XEN_PUBLIC_IO_DISPLIF_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -1,8 +1,8 @@
/******************************************************************************
* fsif.h
*
*
* Interface to FS level split device drivers.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -120,7 +120,7 @@ struct fsif_list_request {
#define ERROR_SIZE 32 /* 32 bits for the error mask */
#define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT)
#define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT)
#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE)
#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE)
#define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT)
struct fsif_chmod_request {
@ -136,6 +136,7 @@ struct fsif_sync_request {
uint32_t fd;
};
/* FS operation request */
struct fsif_request {
uint8_t type; /* Type of the request */
@ -187,4 +188,5 @@ DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response);
#define STATE_CLOSING "closing"
#define STATE_CLOSED "closed"
#endif

View File

@ -26,46 +26,475 @@
#ifndef __XEN_PUBLIC_IO_KBDIF_H__
#define __XEN_PUBLIC_IO_KBDIF_H__
/* In events (backend -> frontend) */
/*
*****************************************************************************
* Feature and Parameter Negotiation
*****************************************************************************
*
* The two halves of a para-virtual driver utilize nodes within
* XenStore to communicate capabilities and to negotiate operating parameters.
* This section enumerates these nodes which reside in the respective front and
* backend portions of XenStore, following XenBus convention.
*
* All data in XenStore is stored as strings. Nodes specifying numeric
* values are encoded in decimal. Integer value ranges listed below are
* expressed as fixed sized integer types capable of storing the conversion
* of a properly formated node string, without loss of information.
*
*****************************************************************************
* Backend XenBus Nodes
*****************************************************************************
*
*---------------------------- Features supported ----------------------------
*
* Capable backend advertises supported features by publishing
* corresponding entries in XenStore and puts 1 as the value of the entry.
* If a feature is not supported then 0 must be set or feature entry omitted.
*
* feature-disable-keyboard
* Values: <uint>
*
* If there is no need to expose a virtual keyboard device by the
* frontend then this must be set to 1.
*
* feature-disable-pointer
* Values: <uint>
*
* If there is no need to expose a virtual pointer device by the
* frontend then this must be set to 1.
*
* feature-abs-pointer
* Values: <uint>
*
* Backends, which support reporting of absolute coordinates for pointer
* device should set this to 1.
*
* feature-multi-touch
* Values: <uint>
*
* Backends, which support reporting of multi-touch events
* should set this to 1.
*
* feature-raw-pointer
* Values: <uint>
*
* Backends, which support reporting raw (unscaled) absolute coordinates
* for pointer devices should set this to 1. Raw (unscaled) values have
* a range of [0, 0x7fff].
*
*----------------------- Device Instance Parameters ------------------------
*
* unique-id
* Values: <string>
*
* After device instance initialization it is assigned a unique ID,
* so every instance of the frontend can be identified by the backend
* by this ID. This can be UUID or such.
*
*------------------------- Pointer Device Parameters ------------------------
*
* width
* Values: <uint>
*
* Maximum X coordinate (width) to be used by the frontend
* while reporting input events, pixels, [0; UINT32_MAX].
*
* height
* Values: <uint>
*
* Maximum Y coordinate (height) to be used by the frontend
* while reporting input events, pixels, [0; UINT32_MAX].
*
*----------------------- Multi-touch Device Parameters ----------------------
*
* multi-touch-num-contacts
* Values: <uint>
*
* Number of simultaneous touches reported.
*
* multi-touch-width
* Values: <uint>
*
* Width of the touch area to be used by the frontend
* while reporting input events, pixels, [0; UINT32_MAX].
*
* multi-touch-height
* Values: <uint>
*
* Height of the touch area to be used by the frontend
* while reporting input events, pixels, [0; UINT32_MAX].
*
*****************************************************************************
* Frontend XenBus Nodes
*****************************************************************************
*
*------------------------------ Feature request -----------------------------
*
* Capable frontend requests features from backend via setting corresponding
* entries to 1 in XenStore. Requests for features not advertised as supported
* by the backend have no effect.
*
* request-abs-pointer
* Values: <uint>
*
* Request backend to report absolute pointer coordinates
* (XENKBD_TYPE_POS) instead of relative ones (XENKBD_TYPE_MOTION).
*
* request-multi-touch
* Values: <uint>
*
* Request backend to report multi-touch events.
*
* request-raw-pointer
* Values: <uint>
*
* Request backend to report raw unscaled absolute pointer coordinates.
* This option is only valid if request-abs-pointer is also set.
* Raw unscaled coordinates have the range [0, 0x7fff]
*
*----------------------- Request Transport Parameters -----------------------
*
* event-channel
* Values: <uint>
*
* The identifier of the Xen event channel used to signal activity
* in the ring buffer.
*
* page-gref
* Values: <uint>
*
* The Xen grant reference granting permission for the backend to map
* a sole page in a single page sized event ring buffer.
*
* page-ref
* Values: <uint>
*
* OBSOLETE, not recommended for use.
* PFN of the shared page.
*/
/*
* EVENT CODES.
*/
#define XENKBD_TYPE_MOTION 1
#define XENKBD_TYPE_RESERVED 2
#define XENKBD_TYPE_KEY 3
#define XENKBD_TYPE_POS 4
#define XENKBD_TYPE_MTOUCH 5
/* Multi-touch event sub-codes */
#define XENKBD_MT_EV_DOWN 0
#define XENKBD_MT_EV_UP 1
#define XENKBD_MT_EV_MOTION 2
#define XENKBD_MT_EV_SYN 3
#define XENKBD_MT_EV_SHAPE 4
#define XENKBD_MT_EV_ORIENT 5
/*
* CONSTANTS, XENSTORE FIELD AND PATH NAME STRINGS, HELPERS.
*/
#define XENKBD_DRIVER_NAME "vkbd"
#define XENKBD_FIELD_FEAT_DSBL_KEYBRD "feature-disable-keyboard"
#define XENKBD_FIELD_FEAT_DSBL_POINTER "feature-disable-pointer"
#define XENKBD_FIELD_FEAT_ABS_POINTER "feature-abs-pointer"
#define XENKBD_FIELD_FEAT_RAW_POINTER "feature-raw-pointer"
#define XENKBD_FIELD_FEAT_MTOUCH "feature-multi-touch"
#define XENKBD_FIELD_REQ_ABS_POINTER "request-abs-pointer"
#define XENKBD_FIELD_REQ_RAW_POINTER "request-raw-pointer"
#define XENKBD_FIELD_REQ_MTOUCH "request-multi-touch"
#define XENKBD_FIELD_RING_GREF "page-gref"
#define XENKBD_FIELD_EVT_CHANNEL "event-channel"
#define XENKBD_FIELD_WIDTH "width"
#define XENKBD_FIELD_HEIGHT "height"
#define XENKBD_FIELD_MT_WIDTH "multi-touch-width"
#define XENKBD_FIELD_MT_HEIGHT "multi-touch-height"
#define XENKBD_FIELD_MT_NUM_CONTACTS "multi-touch-num-contacts"
#define XENKBD_FIELD_UNIQUE_ID "unique-id"
/* OBSOLETE, not recommended for use */
#define XENKBD_FIELD_RING_REF "page-ref"
/*
*****************************************************************************
* Description of the protocol between frontend and backend driver.
*****************************************************************************
*
* The two halves of a Para-virtual driver communicate with
* each other using a shared page and an event channel.
* Shared page contains a ring with event structures.
*
* All reserved fields in the structures below must be 0.
*
*****************************************************************************
* Backend to frontend events
*****************************************************************************
*
* Frontends should ignore unknown in events.
* All event packets have the same length (40 octets)
* All event packets have common header:
*
* 0 octet
* +-----------------+
* | type |
* +-----------------+
* type - uint8_t, event code, XENKBD_TYPE_???
*
*
* Pointer relative movement event
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MOTION | reserved | 4
* +----------------+----------------+----------------+----------------+
* | rel_x | 8
* +----------------+----------------+----------------+----------------+
* | rel_y | 12
* +----------------+----------------+----------------+----------------+
* | rel_z | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* rel_x - int32_t, relative X motion
* rel_y - int32_t, relative Y motion
* rel_z - int32_t, relative Z motion (wheel)
*/
/* Pointer movement event */
#define XENKBD_TYPE_MOTION 1
/* Event type 2 currently not used */
/* Key event (includes pointer buttons) */
#define XENKBD_TYPE_KEY 3
/*
* Pointer position event
* Capable backend sets feature-abs-pointer in xenstore.
* Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
* request-abs-update in xenstore.
*/
#define XENKBD_TYPE_POS 4
struct xenkbd_motion
{
uint8_t type; /* XENKBD_TYPE_MOTION */
int32_t rel_x; /* relative X motion */
int32_t rel_y; /* relative Y motion */
int32_t rel_z; /* relative Z motion (wheel) */
uint8_t type;
int32_t rel_x;
int32_t rel_y;
int32_t rel_z;
};
/*
* Key event (includes pointer buttons)
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_KEY | pressed | reserved | 4
* +----------------+----------------+----------------+----------------+
* | keycode | 8
* +----------------+----------------+----------------+----------------+
* | reserved | 12
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* pressed - uint8_t, 1 if pressed; 0 otherwise
* keycode - uint32_t, KEY_* from linux/input.h
*/
struct xenkbd_key
{
uint8_t type; /* XENKBD_TYPE_KEY */
uint8_t pressed; /* 1 if pressed; 0 otherwise */
uint32_t keycode; /* KEY_* from linux/input.h */
uint8_t type;
uint8_t pressed;
uint32_t keycode;
};
/*
* Pointer absolute position event
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_POS | reserved | 4
* +----------------+----------------+----------------+----------------+
* | abs_x | 8
* +----------------+----------------+----------------+----------------+
* | abs_y | 12
* +----------------+----------------+----------------+----------------+
* | rel_z | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* abs_x - int32_t, absolute X position (in FB pixels)
* abs_y - int32_t, absolute Y position (in FB pixels)
* rel_z - int32_t, relative Z motion (wheel)
*/
struct xenkbd_position
{
uint8_t type; /* XENKBD_TYPE_POS */
int32_t abs_x; /* absolute X position (in FB pixels) */
int32_t abs_y; /* absolute Y position (in FB pixels) */
int32_t rel_z; /* relative Z motion (wheel) */
uint8_t type;
int32_t abs_x;
int32_t abs_y;
int32_t rel_z;
};
/*
* Multi-touch event and its sub-types
*
* All multi-touch event packets have common header:
*
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MTOUCH | event_type | contact_id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
*
* event_type - unt8_t, multi-touch event sub-type, XENKBD_MT_EV_???
* contact_id - unt8_t, ID of the contact
*
* Touch interactions can consist of one or more contacts.
* For each contact, a series of events is generated, starting
* with a down event, followed by zero or more motion events,
* and ending with an up event. Events relating to the same
* contact point can be identified by the ID of the sequence: contact ID.
* Contact ID may be reused after XENKBD_MT_EV_UP event and
* is in the [0; XENKBD_FIELD_NUM_CONTACTS - 1] range.
*
* For further information please refer to documentation on Wayland [1],
* Linux [2] and Windows [3] multi-touch support.
*
* [1] https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml
* [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt
* [3] https://msdn.microsoft.com/en-us/library/jj151564(v=vs.85).aspx
*
*
* Multi-touch down event - sent when a new touch is made: touch is assigned
* a unique contact ID, sent with this and consequent events related
* to this touch.
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MTOUCH | _MT_EV_DOWN | contact_id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | abs_x | 12
* +----------------+----------------+----------------+----------------+
* | abs_y | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* abs_x - int32_t, absolute X position, in pixels
* abs_y - int32_t, absolute Y position, in pixels
*
* Multi-touch contact release event
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MTOUCH | _MT_EV_UP | contact_id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* Multi-touch motion event
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MTOUCH | _MT_EV_MOTION | contact_id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | abs_x | 12
* +----------------+----------------+----------------+----------------+
* | abs_y | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* abs_x - int32_t, absolute X position, in pixels,
* abs_y - int32_t, absolute Y position, in pixels,
*
* Multi-touch input synchronization event - shows end of a set of events
* which logically belong together.
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MTOUCH | _MT_EV_SYN | contact_id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* Multi-touch shape event - touch point's shape has changed its shape.
* Shape is approximated by an ellipse through the major and minor axis
* lengths: major is the longer diameter of the ellipse and minor is the
* shorter one. Center of the ellipse is reported via
* XENKBD_MT_EV_DOWN/XENKBD_MT_EV_MOTION events.
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MTOUCH | _MT_EV_SHAPE | contact_id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | major | 12
* +----------------+----------------+----------------+----------------+
* | minor | 16
* +----------------+----------------+----------------+----------------+
* | reserved | 20
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* major - unt32_t, length of the major axis, pixels
* minor - unt32_t, length of the minor axis, pixels
*
* Multi-touch orientation event - touch point's shape has changed
* its orientation: calculated as a clockwise angle between the major axis
* of the ellipse and positive Y axis in degrees, [-180; +180].
* 0 1 2 3 octet
* +----------------+----------------+----------------+----------------+
* | _TYPE_MTOUCH | _MT_EV_ORIENT | contact_id | reserved | 4
* +----------------+----------------+----------------+----------------+
* | reserved | 8
* +----------------+----------------+----------------+----------------+
* | orientation | reserved | 12
* +----------------+----------------+----------------+----------------+
* | reserved | 16
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
* | reserved | 40
* +----------------+----------------+----------------+----------------+
*
* orientation - int16_t, clockwise angle of the major axis
*/
struct xenkbd_mtouch {
uint8_t type; /* XENKBD_TYPE_MTOUCH */
uint8_t event_type; /* XENKBD_MT_EV_??? */
uint8_t contact_id;
uint8_t reserved[5]; /* reserved for the future use */
union {
struct {
int32_t abs_x; /* absolute X position, pixels */
int32_t abs_y; /* absolute Y position, pixels */
} pos;
struct {
uint32_t major; /* length of the major axis, pixels */
uint32_t minor; /* length of the minor axis, pixels */
} shape;
int16_t orientation; /* clockwise angle of the major axis */
} u;
};
#define XENKBD_IN_EVENT_SIZE 40
@ -76,15 +505,26 @@ union xenkbd_in_event
struct xenkbd_motion motion;
struct xenkbd_key key;
struct xenkbd_position pos;
struct xenkbd_mtouch mtouch;
char pad[XENKBD_IN_EVENT_SIZE];
};
/* Out events (frontend -> backend) */
/*
*****************************************************************************
* Frontend to backend events
*****************************************************************************
*
* Out events may be sent only when requested by backend, and receipt
* of an unknown out event is an error.
* No out events currently defined.
* All event packets have the same length (40 octets)
* All event packets have common header:
* 0 octet
* +-----------------+
* | type |
* +-----------------+
* type - uint8_t, event code
*/
#define XENKBD_OUT_EVENT_SIZE 40
@ -95,7 +535,11 @@ union xenkbd_out_event
char pad[XENKBD_OUT_EVENT_SIZE];
};
/* shared page */
/*
*****************************************************************************
* Shared page
*****************************************************************************
*/
#define XENKBD_IN_RING_SIZE 2048
#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
@ -119,7 +563,7 @@ struct xenkbd_page
uint32_t out_cons, out_prod;
};
#endif
#endif /* __XEN_PUBLIC_IO_KBDIF_H__ */
/*
* Local variables:

View File

@ -98,3 +98,4 @@ struct vchan_interface {
*/
uint32_t grants[0];
};

View File

@ -1,8 +1,8 @@
/******************************************************************************
* netif.h
*
*
* Unified network-device I/O interface for Xen guest OSes.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -136,35 +136,649 @@
*/
/*
* "feature-multicast-control" advertises the capability to filter ethernet
* multicast packets in the backend. To enable use of this capability the
* frontend must set "request-multicast-control" before moving into the
* connected state.
* "feature-multicast-control" and "feature-dynamic-multicast-control"
* advertise the capability to filter ethernet multicast packets in the
* backend. If the frontend wishes to take advantage of this feature then
* it may set "request-multicast-control". If the backend only advertises
* "feature-multicast-control" then "request-multicast-control" must be set
* before the frontend moves into the connected state. The backend will
* sample the value on this state transition and any subsequent change in
* value will have no effect. However, if the backend also advertises
* "feature-dynamic-multicast-control" then "request-multicast-control"
* may be set by the frontend at any time. In this case, the backend will
* watch the value and re-sample on watch events.
*
* If "request-multicast-control" is set then the backend transmit side should
* no longer flood multicast packets to the frontend, it should instead drop any
* multicast packet that does not match in a filter list. The list is
* amended by the frontend by sending dummy transmit requests containing
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as specified below.
* Once enabled by the frontend, the feature cannot be disabled except by
* closing and re-connecting to the backend.
* If the sampled value of "request-multicast-control" is set then the
* backend transmit side should no longer flood multicast packets to the
* frontend, it should instead drop any multicast packet that does not
* match in a filter list.
* The list is amended by the frontend by sending dummy transmit requests
* containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as
* specified below.
* Note that the filter list may be amended even if the sampled value of
* "request-multicast-control" is not set, however the filter should only
* be applied if it is set.
*/
/*
* This is the 'wire' format for packets:
* Request 1: netif_tx_request_t -- NETTXF_* (any flags)
* [Request 2: netif_extra_info_t] (only if request 1 has NETTXF_extra_info)
* [Request 3: netif_extra_info_t] (only if request 2 has XEN_NETIF_EXTRA_MORE)
* Request 4: netif_tx_request_t -- NETTXF_more_data
* Request 5: netif_tx_request_t -- NETTXF_more_data
* ...
* Request N: netif_tx_request_t -- 0
* Control ring
* ============
*
* Some features, such as hashing (detailed below), require a
* significant amount of out-of-band data to be passed from frontend to
* backend. Use of xenstore is not suitable for large quantities of data
* because of quota limitations and so a dedicated 'control ring' is used.
* The ability of the backend to use a control ring is advertised by
* setting:
*
* /local/domain/X/backend/vif/<domid>/<vif>/feature-ctrl-ring = "1"
*
* The frontend provides a control ring to the backend by setting:
*
* /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref>
* /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port>
*
* where <gref> is the grant reference of the shared page used to
* implement the control ring and <port> is an event channel to be used
* as a mailbox interrupt. These keys must be set before the frontend
* moves into the connected state.
*
* The control ring uses a fixed request/response message size and is
* balanced (i.e. one request to one response), so operationally it is much
* the same as a transmit or receive ring.
* Note that there is no requirement that responses are issued in the same
* order as requests.
*/
/*
* Link state
* ==========
*
* The backend can advertise its current link (carrier) state to the
* frontend using the /local/domain/X/backend/vif/<domid>/<vif>/carrier
* node. If this node is not present, then the frontend should assume that
* the link is up (for compatibility with backends that do not implement
* this feature). If this node is present, then a value of "0" should be
* interpreted by the frontend as the link being down (no carrier) and a
* value of "1" should be interpreted as the link being up (carrier
* present).
*/
/*
* Hash types
* ==========
*
* For the purposes of the definitions below, 'Packet[]' is an array of
* octets containing an IP packet without options, 'Array[X..Y]' means a
* sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is
* used to indicate concatenation of arrays.
*/
/*
* A hash calculated over an IP version 4 header as follows:
*
* Buffer[0..8] = Packet[12..15] (source address) +
* Packet[16..19] (destination address)
*
* Result = Hash(Buffer, 8)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0
#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4)
/*
* A hash calculated over an IP version 4 header and TCP header as
* follows:
*
* Buffer[0..12] = Packet[12..15] (source address) +
* Packet[16..19] (destination address) +
* Packet[20..21] (source port) +
* Packet[22..23] (destination port)
*
* Result = Hash(Buffer, 12)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1
#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
/*
* A hash calculated over an IP version 6 header as follows:
*
* Buffer[0..32] = Packet[8..23] (source address ) +
* Packet[24..39] (destination address)
*
* Result = Hash(Buffer, 32)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2
#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6)
/*
* A hash calculated over an IP version 6 header and TCP header as
* follows:
*
* Buffer[0..36] = Packet[8..23] (source address) +
* Packet[24..39] (destination address) +
* Packet[40..41] (source port) +
* Packet[42..43] (destination port)
*
* Result = Hash(Buffer, 36)
*/
#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3
#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \
(1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
/*
* Hash algorithms
* ===============
*/
#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0
/*
* Toeplitz hash:
*/
#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1
/*
* This algorithm uses a 'key' as well as the data buffer itself.
* (Buffer[] and Key[] are treated as shift-registers where the MSB of
* Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1]
* is the 'right-most').
*
* Value = 0
* For number of bits in Buffer[]
* If (left-most bit of Buffer[] is 1)
* Value ^= left-most 32 bits of Key[]
* Key[] << 1
* Buffer[] << 1
*
* The code below is provided for convenience where an operating system
* does not already provide an implementation.
*/
#ifdef XEN_NETIF_DEFINE_TOEPLITZ
static uint32_t xen_netif_toeplitz_hash(const uint8_t *key,
unsigned int keylen,
const uint8_t *buf,
unsigned int buflen)
{
unsigned int keyi, bufi;
uint64_t prefix = 0;
uint64_t hash = 0;
/* Pre-load prefix with the first 8 bytes of the key */
for (keyi = 0; keyi < 8; keyi++) {
prefix <<= 8;
prefix |= (keyi < keylen) ? key[keyi] : 0;
}
for (bufi = 0; bufi < buflen; bufi++) {
uint8_t byte = buf[bufi];
unsigned int bit;
for (bit = 0; bit < 8; bit++) {
if (byte & 0x80)
hash ^= prefix;
prefix <<= 1;
byte <<=1;
}
/*
* 'prefix' has now been left-shifted by 8, so
* OR in the next byte.
*/
prefix |= (keyi < keylen) ? key[keyi] : 0;
keyi++;
}
/* The valid part of the hash is in the upper 32 bits. */
return hash >> 32;
}
#endif /* XEN_NETIF_DEFINE_TOEPLITZ */
/*
* Control requests (struct xen_netif_ctrl_request)
* ================================================
*
* All requests have the following format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | type | data[0] |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | data[1] | data[2] |
* +-----+-----+-----+-----+-----------------------+
*
* id: the request identifier, echoed in response.
* type: the type of request (see below)
* data[]: any data associated with the request (determined by type)
*/
struct xen_netif_ctrl_request {
uint16_t id;
uint16_t type;
#define XEN_NETIF_CTRL_TYPE_INVALID 0
#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1
#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2
#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3
#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4
#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5
#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6
#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7
#define XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE 8
#define XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING 9
#define XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING 10
uint32_t data[3];
};
/*
* Control responses (struct xen_netif_ctrl_response)
* ==================================================
*
* All responses have the following format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | type | status |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | data |
* +-----+-----+-----+-----+
*
* id: the corresponding request identifier
* type: the type of the corresponding request
* status: the status of request processing
* data: any data associated with the response (determined by type and
* status)
*/
struct xen_netif_ctrl_response {
uint16_t id;
uint16_t type;
uint32_t status;
#define XEN_NETIF_CTRL_STATUS_SUCCESS 0
#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1
#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2
#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3
uint32_t data;
};
/*
* Static Grants (struct xen_netif_gref)
* =====================================
*
* A frontend may provide a fixed set of grant references to be mapped on
* the backend. The message of type XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
* prior its usage in the command ring allows for creation of these mappings.
* The backend will maintain a fixed amount of these mappings.
*
* XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE lets a frontend query how many
* of these mappings can be kept.
*
* Each entry in the XEN_NETIF_CTRL_TYPE_{ADD,DEL}_GREF_MAPPING input table has
* the following format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | grant ref | flags | status |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* grant ref: grant reference (IN)
* flags: flags describing the control operation (IN)
* status: XEN_NETIF_CTRL_STATUS_* (OUT)
*
* 'status' is an output parameter which does not require to be set to zero
* prior to its usage in the corresponding control messages.
*/
struct xen_netif_gref {
grant_ref_t ref;
uint16_t flags;
#define _XEN_NETIF_CTRLF_GREF_readonly 0
#define XEN_NETIF_CTRLF_GREF_readonly (1U<<_XEN_NETIF_CTRLF_GREF_readonly)
uint16_t status;
};
/*
* Control messages
* ================
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
* --------------------------------------
*
* This is sent by the frontend to set the desired hash algorithm.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
* data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not
* supported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
*
* NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables
* hashing and the backend is free to choose how it steers packets
* to queues (which is the default behaviour).
*
* XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
* ----------------------------------
*
* This is sent by the frontend to query the types of hash supported by
* the backend.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
* data[0] = 0
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = supported hash types (if operation was successful)
*
* NOTE: A valid hash algorithm must be selected before this operation can
* succeed.
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
* ----------------------------------
*
* This is sent by the frontend to set the types of hash that the backend
* should calculate. (See above for hash type definitions).
* Note that the 'maximal' type of hash should always be chosen. For
* example, if the frontend sets both IPV4 and IPV4_TCP hash types then
* the latter hash type should be calculated for any TCP packet and the
* former only calculated for non-TCP packets.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
* data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag
* value is invalid or
* unsupported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: A valid hash algorithm must be selected before this operation can
* succeed.
* Also, setting data[0] to zero disables hashing and the backend
* is free to choose how it steers packets to queues.
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
* --------------------------------
*
* This is sent by the frontend to set the key of the hash if the algorithm
* requires it. (See hash algorithms above).
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
* data[0] = grant reference of page containing the key (assumed to
* start at beginning of grant)
* data[1] = size of key in octets
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid
* XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger
* than the backend
* supports
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: Any key octets not specified are assumed to be zero (the key
* is assumed to be empty by default) and specifying a new key
* invalidates any previous key, hence specifying a key size of
* zero will clear the key (which ensures that the calculated hash
* will always be zero).
* The maximum size of key is algorithm and backend specific, but
* is also limited by the single grant reference.
* The grant reference may be read-only and must remain valid until
* the response has been processed.
*
* XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
* -----------------------------------------
*
* This is sent by the frontend to query the maximum size of mapping
* table supported by the backend. The size is specified in terms of
* table entries.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
* data[0] = 0
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = maximum number of entries allowed in the mapping table
* (if operation was successful) or zero if a mapping table is
* not supported (i.e. hash mapping is done only by modular
* arithmetic).
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
* -------------------------------------
*
* This is sent by the frontend to set the actual size of the mapping
* table to be used by the backend. The size is specified in terms of
* table entries.
* Any previous table is invalidated by this message and any new table
* is assumed to be zero filled.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
* data[0] = number of entries in mapping table
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: Setting data[0] to 0 means that hash mapping should be done
* using modular arithmetic.
*
* XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
* ------------------------------------
*
* This is sent by the frontend to set the content of the table mapping
* hash value to queue number. The backend should calculate the hash from
* the packet header, use it as an index into the table (modulo the size
* of the table) and then steer the packet to the queue number found at
* that index.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
* data[0] = grant reference of page containing the mapping (sub-)table
* (assumed to start at beginning of grant)
* data[1] = size of (sub-)table in entries
* data[2] = offset, in entries, of sub-table within overall table
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content
* is invalid
* XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger
* than the backend
* supports
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = 0
*
* NOTE: The overall table has the following format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | mapping[0] | mapping[1] |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | . |
* | . |
* | . |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | mapping[N-2] | mapping[N-1] |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
* message and each mapping must specifies a queue between 0 and
* "multi-queue-num-queues" (see above).
* The backend may support a mapping table larger than can be
* mapped by a single grant reference. Thus sub-tables within a
* larger table can be individually set by sending multiple messages
* with differing offset values. Specifying a new sub-table does not
* invalidate any table data outside that range.
* The grant reference may be read-only and must remain valid until
* the response has been processed.
*
* XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE
* -----------------------------------------
*
* This is sent by the frontend to fetch the number of grefs that can be kept
* mapped in the backend.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_GET_GREF_MAPPING_SIZE
* data[0] = queue index (assumed 0 for single queue)
* data[1] = 0
* data[2] = 0
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The queue index is
* out of range
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = maximum number of entries allowed in the gref mapping table
* (if operation was successful) or zero if it is not supported.
*
* XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
* ------------------------------------
*
* This is sent by the frontend for backend to map a list of grant
* references.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
* data[0] = queue index
* data[1] = grant reference of page containing the mapping list
* (r/w and assumed to start at beginning of page)
* data[2] = size of list in entries
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Operation failed
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
*
* NOTE: Each entry in the input table has the format outlined
* in struct xen_netif_gref.
* Contrary to XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING, the struct
* xen_netif_gref 'status' field is not used and therefore the response
* 'status' determines the success of this operation. In case of
* failure none of grants mappings get added in the backend.
*
* XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING
* ------------------------------------
*
* This is sent by the frontend for backend to unmap a list of grant
* references.
*
* Request:
*
* type = XEN_NETIF_CTRL_TYPE_DEL_GREF_MAPPING
* data[0] = queue index
* data[1] = grant reference of page containing the mapping list
* (r/w and assumed to start at beginning of page)
* data[2] = size of list in entries
*
* Response:
*
* status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
* supported
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Operation failed
* XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
* data = number of entries that were unmapped
*
* NOTE: Each entry in the input table has the format outlined in struct
* xen_netif_gref.
* The struct xen_netif_gref 'status' field determines if the entry
* was successfully removed.
* The entries used are only the ones representing grant references that
* were previously the subject of a XEN_NETIF_CTRL_TYPE_ADD_GREF_MAPPING
* operation. Any other entries will have their status set to
* XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER upon completion.
*/
DEFINE_RING_TYPES(xen_netif_ctrl,
struct xen_netif_ctrl_request,
struct xen_netif_ctrl_response);
/*
* Guest transmit
* ==============
*
* This is the 'wire' format for transmit (frontend -> backend) packets:
*
* Fragment 1: netif_tx_request_t - flags = NETTXF_*
* size = total packet size
* [Extra 1: netif_extra_info_t] - (only if fragment 1 flags include
* NETTXF_extra_info)
* ...
* [Extra N: netif_extra_info_t] - (only if extra N-1 flags include
* XEN_NETIF_EXTRA_MORE)
* ...
* Fragment N: netif_tx_request_t - (only if fragment N-1 flags include
* NETTXF_more_data - flags on preceding
* extras are not relevant here)
* flags = 0
* size = fragment size
*
* NOTE:
*
* This format slightly is different from that used for receive
* (backend -> frontend) packets. Specifically, in a multi-fragment
* packet the actual size of fragment 1 can only be determined by
* subtracting the sizes of fragments 2..N from the total packet size.
*
* Ring slot size is 12 octets, however not all request/response
* structs use the full size.
*
@ -200,6 +814,29 @@
* Guest receive
* =============
*
* This is the 'wire' format for receive (backend -> frontend) packets:
*
* Fragment 1: netif_rx_request_t - flags = NETRXF_*
* size = fragment size
* [Extra 1: netif_extra_info_t] - (only if fragment 1 flags include
* NETRXF_extra_info)
* ...
* [Extra N: netif_extra_info_t] - (only if extra N-1 flags include
* XEN_NETIF_EXTRA_MORE)
* ...
* Fragment N: netif_rx_request_t - (only if fragment N-1 flags include
* NETRXF_more_data - flags on preceding
* extras are not relevant here)
* flags = 0
* size = fragment size
*
* NOTE:
*
* This format slightly is different from that used for transmit
* (frontend -> backend) packets. Specifically, in a multi-fragment
* packet the size of the packet can only be determined by summing the
* sizes of fragments 1..N.
*
* Ring slot size is 8 octets.
*
* rx request (netif_rx_request_t)
@ -226,15 +863,29 @@
* flags: NETRXF_*
* status: -ve: NETIF_RSP_*; +ve: Rx'ed pkt size.
*
* NOTE: Historically, to support GSO on the frontend receive side, Linux
* netfront does not make use of the rx response id (because, as
* described below, extra info structures overlay the id field).
* Instead it assumes that responses always appear in the same ring
* slot as their corresponding request. Thus, to maintain
* compatibility, backends must make sure this is the case.
*
* Extra Info
* ==========
*
* Can be present if initial request has NET{T,R}XF_extra_info, or
* previous extra request has XEN_NETIF_EXTRA_MORE.
* Can be present if initial request or response has NET{T,R}XF_extra_info,
* or previous extra request has XEN_NETIF_EXTRA_MORE.
*
* The struct therefore needs to fit into either a tx or rx slot and
* is therefore limited to 8 octets.
*
* NOTE: Because extra info data overlays the usual request/response
* structures, there is no id information in the opposite direction.
* So, if an extra info overlays an rx response the frontend can
* assume that it is in the same ring slot as the request that was
* consumed to make the slot available, and the backend must ensure
* this assumption is true.
*
* extra info (netif_extra_info_t)
* -------------------------------
*
@ -242,7 +893,7 @@
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags| type specfic data |
* |type |flags| type specific data |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | padding for tx |
* +-----+-----+-----+-----+
@ -250,7 +901,8 @@
* type: XEN_NETIF_EXTRA_TYPE_*
* flags: XEN_NETIF_EXTRA_FLAG_*
* padding for tx: present only in the tx case due to 8 octet limit
* from rx case. Not shown in type specific entries below.
* from rx case. Not shown in type specific entries
* below.
*
* XEN_NETIF_EXTRA_TYPE_GSO:
*
@ -261,9 +913,14 @@
*
* type: Must be XEN_NETIF_EXTRA_TYPE_GSO
* flags: XEN_NETIF_EXTRA_FLAG_*
* size: Maximum payload size of each segment.
* type: XEN_NETIF_GSO_TYPE_*
* features: EN_NETIF_GSO_FEAT_*
* size: Maximum payload size of each segment. For example,
* for TCP this is just the path MSS.
* type: XEN_NETIF_GSO_TYPE_*: This determines the protocol of
* the packet and any extra features required to segment the
* packet properly.
* features: EN_NETIF_GSO_FEAT_*: This specifies any extra GSO
* features required to process this packet, such as ECN
* support for TCPv4.
*
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
*
@ -275,6 +932,25 @@
* type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}
* flags: XEN_NETIF_EXTRA_FLAG_*
* addr: address to add/remove
*
* XEN_NETIF_EXTRA_TYPE_HASH:
*
* A backend that supports teoplitz hashing is assumed to accept
* this type of extra info in transmit packets.
* A frontend that enables hashing is assumed to accept
* this type of extra info in receive packets.
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags|htype| alg |LSB ---- value ---- MSB|
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* type: Must be XEN_NETIF_EXTRA_TYPE_HASH
* flags: XEN_NETIF_EXTRA_FLAG_*
* htype: Hash type (one of _XEN_NETIF_CTRL_HASH_TYPE_* - see above)
* alg: The algorithm used to calculate the hash (one of
* XEN_NETIF_CTRL_HASH_TYPE_ALGORITHM_* - see above)
* value: Hash value
*/
/* Protocol checksum field is blank in the packet (hardware offload)? */
@ -295,11 +971,11 @@
#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
struct netif_tx_request {
grant_ref_t gref; /* Reference to buffer page */
uint16_t offset; /* Offset within buffer page */
uint16_t flags; /* NETTXF_* */
uint16_t id; /* Echoed in response message. */
uint16_t size; /* Packet size in bytes. */
grant_ref_t gref;
uint16_t offset;
uint16_t flags;
uint16_t id;
uint16_t size;
};
typedef struct netif_tx_request netif_tx_request_t;
@ -308,7 +984,8 @@ typedef struct netif_tx_request netif_tx_request_t;
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MAX (4)
#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
#define XEN_NETIF_EXTRA_TYPE_MAX (5)
/* netif_extra_info_t flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@ -324,43 +1001,23 @@ typedef struct netif_tx_request netif_tx_request_t;
* netif_rx_response_t for compatibility.
*/
struct netif_extra_info {
uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
uint8_t type;
uint8_t flags;
union {
/*
* XEN_NETIF_EXTRA_TYPE_GSO:
*/
struct {
/*
* Maximum payload size of each segment. For example, for TCP this
* is just the path MSS.
*/
uint16_t size;
/*
* GSO type. This determines the protocol of the packet and any
* extra features required to segment the packet properly.
*/
uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
/* Future expansion. */
uint8_t type;
uint8_t pad;
/*
* GSO features. This specifies any extra GSO features required
* to process this packet, such as ECN support for TCPv4.
*/
uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
uint16_t features;
} gso;
/*
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
*/
struct {
uint8_t addr[6]; /* Address to add/remove. */
uint8_t addr[6];
} mcast;
struct {
uint8_t type;
uint8_t algorithm;
uint8_t value[4];
} hash;
uint16_t pad[3];
} u;
};
@ -368,14 +1025,14 @@ typedef struct netif_extra_info netif_extra_info_t;
struct netif_tx_response {
uint16_t id;
int16_t status; /* NETIF_RSP_* */
int16_t status;
};
typedef struct netif_tx_response netif_tx_response_t;
struct netif_rx_request {
uint16_t id; /* Echoed in response message. */
uint16_t pad;
grant_ref_t gref; /* Reference to incoming granted frame */
grant_ref_t gref;
};
typedef struct netif_rx_request netif_rx_request_t;
@ -395,11 +1052,15 @@ typedef struct netif_rx_request netif_rx_request_t;
#define _NETRXF_extra_info (3)
#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
/* Packet has GSO prefix. Deprecated but included for compatibility */
#define _NETRXF_gso_prefix (4)
#define NETRXF_gso_prefix (1U<<_NETRXF_gso_prefix)
struct netif_rx_response {
uint16_t id;
uint16_t offset; /* Offset in page of start of received packet */
uint16_t flags; /* NETRXF_* */
int16_t status; /* -ve: NETIF_RSP_* ; +ve: Rx'ed pkt size. */
uint16_t offset;
uint16_t flags;
int16_t status;
};
typedef struct netif_rx_response netif_rx_response_t;

View File

@ -1,6 +1,6 @@
/******************************************************************************
* protocols.h
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the

View File

@ -0,0 +1,160 @@
/*
* pvcalls.h -- Xen PV Calls Protocol
*
* Refer to docs/misc/pvcalls.markdown for the specification
*
* The header is provided as a C reference for the specification. In
* case of conflict, the specification is authoritative.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
*/
#ifndef __XEN_PUBLIC_IO_PVCALLS_H__
#define __XEN_PUBLIC_IO_PVCALLS_H__
#include "../grant_table.h"
#include "ring.h"
/*
* See docs/misc/pvcalls.markdown in xen.git for the full specification:
* https://xenbits.xen.org/docs/unstable/misc/pvcalls.html
*/
struct pvcalls_data_intf {
RING_IDX in_cons, in_prod, in_error;
uint8_t pad1[52];
RING_IDX out_cons, out_prod, out_error;
uint8_t pad2[52];
RING_IDX ring_order;
grant_ref_t ref[];
};
DEFINE_XEN_FLEX_RING(pvcalls);
#define PVCALLS_SOCKET 0
#define PVCALLS_CONNECT 1
#define PVCALLS_RELEASE 2
#define PVCALLS_BIND 3
#define PVCALLS_LISTEN 4
#define PVCALLS_ACCEPT 5
#define PVCALLS_POLL 6
struct xen_pvcalls_request {
uint32_t req_id; /* private to guest, echoed in response */
uint32_t cmd; /* command to execute */
union {
struct xen_pvcalls_socket {
uint64_t id;
uint32_t domain;
uint32_t type;
uint32_t protocol;
uint8_t pad[4];
} socket;
struct xen_pvcalls_connect {
uint64_t id;
uint8_t addr[28];
uint32_t len;
uint32_t flags;
grant_ref_t ref;
uint32_t evtchn;
uint8_t pad[4];
} connect;
struct xen_pvcalls_release {
uint64_t id;
uint8_t reuse;
uint8_t pad[7];
} release;
struct xen_pvcalls_bind {
uint64_t id;
uint8_t addr[28];
uint32_t len;
} bind;
struct xen_pvcalls_listen {
uint64_t id;
uint32_t backlog;
uint8_t pad[4];
} listen;
struct xen_pvcalls_accept {
uint64_t id;
uint64_t id_new;
grant_ref_t ref;
uint32_t evtchn;
} accept;
struct xen_pvcalls_poll {
uint64_t id;
} poll;
/* dummy member to force sizeof(struct xen_pvcalls_request)
* to match across archs */
struct xen_pvcalls_dummy {
uint8_t dummy[56];
} dummy;
} u;
};
struct xen_pvcalls_response {
uint32_t req_id;
uint32_t cmd;
int32_t ret;
uint32_t pad;
union {
struct _xen_pvcalls_socket {
uint64_t id;
} socket;
struct _xen_pvcalls_connect {
uint64_t id;
} connect;
struct _xen_pvcalls_release {
uint64_t id;
} release;
struct _xen_pvcalls_bind {
uint64_t id;
} bind;
struct _xen_pvcalls_listen {
uint64_t id;
} listen;
struct _xen_pvcalls_accept {
uint64_t id;
} accept;
struct _xen_pvcalls_poll {
uint64_t id;
} poll;
struct _xen_pvcalls_dummy {
uint8_t dummy[8];
} dummy;
} u;
};
DEFINE_RING_TYPES(xen_pvcalls, struct xen_pvcalls_request,
struct xen_pvcalls_response);
#endif
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -1,6 +1,6 @@
/******************************************************************************
* ring.h
*
*
* Shared producer-consumer ring macros.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@ -27,6 +27,21 @@
#ifndef __XEN_PUBLIC_IO_RING_H__
#define __XEN_PUBLIC_IO_RING_H__
/*
* When #include'ing this header, you need to provide the following
* declaration upfront:
* - standard integers types (uint8_t, uint16_t, etc)
* They are provided by stdint.h of the standard headers.
*
* In addition, if you intend to use the FLEX macros, you also need to
* provide the following, before invoking the FLEX macros:
* - size_t
* - memcpy
* - grant_ref_t
* These declarations are provided by string.h of the standard headers,
* and grant_table.h from the Xen public headers.
*/
#include "../xen-compat.h"
#if __XEN_INTERFACE_VERSION__ < 0x00030208
@ -47,7 +62,7 @@ typedef unsigned int RING_IDX;
/*
* Calculate size of a shared ring, given the total available space for the
* ring and indexes (_sz), and the name tag of the request/response structure.
* A ring contains as many entries as will fit, rounded down to the nearest
* A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
#define __CONST_RING_SIZE(_s, _sz) \
@ -61,7 +76,7 @@ typedef unsigned int RING_IDX;
/*
* Macros to make the correct C datatypes for a new kind of ring.
*
*
* To make a new ring datatype, you need to have two message structures,
* let's say request_t, and response_t already defined.
*
@ -71,7 +86,7 @@ typedef unsigned int RING_IDX;
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
*
*
* mytag_sring_t - The shared ring.
* mytag_front_ring_t - The 'front' half of the ring.
* mytag_back_ring_t - The 'back' half of the ring.
@ -139,15 +154,15 @@ typedef struct __name##_back_ring __name##_back_ring_t
/*
* Macros for manipulating rings.
*
* FRONT_RING_whatever works on the "front end" of a ring: here
*
* FRONT_RING_whatever works on the "front end" of a ring: here
* requests are pushed on to the ring and responses taken off it.
*
* BACK_RING_whatever works on the "back end" of a ring: here
*
* BACK_RING_whatever works on the "back end" of a ring: here
* requests are taken off the ring and responses put on.
*
* N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
* This is OK in 1-for-1 request-response situations where the
*
* N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
* This is OK in 1-for-1 request-response situations where the
* requestor (front end) never has more than RING_SIZE()-1
* outstanding requests.
*/
@ -160,20 +175,24 @@ typedef struct __name##_back_ring __name##_back_ring_t
(void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
#define FRONT_RING_INIT(_r, _s, __size) do { \
(_r)->req_prod_pvt = 0; \
(_r)->rsp_cons = 0; \
#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
(_r)->req_prod_pvt = (_i); \
(_r)->rsp_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) do { \
(_r)->rsp_prod_pvt = 0; \
(_r)->req_cons = 0; \
#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
(_r)->rsp_prod_pvt = (_i); \
(_r)->req_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
/* How big is this ring? */
#define RING_SIZE(_r) \
((_r)->nr_ents)
@ -212,6 +231,20 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
/*
* Get a local copy of a request.
*
* Use this in preference to RING_GET_REQUEST() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
* to be ineffective where _req is a struct which consists of only bitfields.
*/
#define RING_COPY_REQUEST(_r, _idx, _req) do { \
/* Use volatile to force the copy into _req. */ \
*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
} while (0)
#define RING_GET_RESPONSE(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
@ -235,26 +268,26 @@ typedef struct __name##_back_ring __name##_back_ring_t
/*
* Notification hold-off (req_event and rsp_event):
*
*
* When queueing requests or responses on a shared ring, it may not always be
* necessary to notify the remote end. For example, if requests are in flight
* in a backend, the front may be able to queue further requests without
* notifying the back (if the back checks for new requests when it queues
* responses).
*
*
* When enqueuing requests or responses:
*
*
* Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
* is a boolean return value. True indicates that the receiver requires an
* asynchronous notification.
*
*
* After dequeuing requests or responses (before sleeping the connection):
*
*
* Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
* The second argument is a boolean return value. True indicates that there
* are pending messages on the ring (i.e., the connection should not be put
* to sleep).
*
*
* These macros will set the req_event/rsp_event field to trigger a
* notification on the very next message that is enqueued. If you want to
* create batches of work (i.e., only receive a notification after several
@ -299,6 +332,149 @@ typedef struct __name##_back_ring __name##_back_ring_t
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
} while (0)
/*
* DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
* functions to check if there is data on the ring, and to read and
* write to them.
*
* DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
* does not define the indexes page. As different protocols can have
* extensions to the basic format, this macro allow them to define their
* own struct.
*
* XEN_FLEX_RING_SIZE
* Convenience macro to calculate the size of one of the two rings
* from the overall order.
*
* $NAME_mask
* Function to apply the size mask to an index, to reduce the index
* within the range [0-size].
*
* $NAME_read_packet
* Function to read data from the ring. The amount of data to read is
* specified by the "size" argument.
*
* $NAME_write_packet
* Function to write data to the ring. The amount of data to write is
* specified by the "size" argument.
*
* $NAME_get_ring_ptr
* Convenience function that returns a pointer to read/write to the
* ring at the right location.
*
* $NAME_data_intf
* Indexes page, shared between frontend and backend. It also
* contains the array of grant refs.
*
* $NAME_queued
* Function to calculate how many bytes are currently on the ring,
* ready to be read. It can also be used to calculate how much free
* space is currently on the ring (XEN_FLEX_RING_SIZE() -
* $NAME_queued()).
*/
#ifndef XEN_PAGE_SHIFT
/* The PAGE_SIZE for ring protocols and hypercall interfaces is always
* 4K, regardless of the architecture, and page granularity chosen by
* operating systems.
*/
#define XEN_PAGE_SHIFT 12
#endif
#define XEN_FLEX_RING_SIZE(order) \
(1UL << ((order) + XEN_PAGE_SHIFT - 1))
#define DEFINE_XEN_FLEX_RING(name) \
static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
{ \
return idx & (ring_size - 1); \
} \
\
static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \
RING_IDX idx, \
RING_IDX ring_size) \
{ \
return buf + name##_mask(idx, ring_size); \
} \
\
static inline void name##_read_packet(void *opaque, \
const unsigned char *buf, \
size_t size, \
RING_IDX masked_prod, \
RING_IDX *masked_cons, \
RING_IDX ring_size) \
{ \
if (*masked_cons < masked_prod || \
size <= ring_size - *masked_cons) { \
memcpy(opaque, buf + *masked_cons, size); \
} else { \
memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
size - (ring_size - *masked_cons)); \
} \
*masked_cons = name##_mask(*masked_cons + size, ring_size); \
} \
\
static inline void name##_write_packet(unsigned char *buf, \
const void *opaque, \
size_t size, \
RING_IDX *masked_prod, \
RING_IDX masked_cons, \
RING_IDX ring_size) \
{ \
if (*masked_prod < masked_cons || \
size <= ring_size - *masked_prod) { \
memcpy(buf + *masked_prod, opaque, size); \
} else { \
memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
size - (ring_size - *masked_prod)); \
} \
*masked_prod = name##_mask(*masked_prod + size, ring_size); \
} \
\
static inline RING_IDX name##_queued(RING_IDX prod, \
RING_IDX cons, \
RING_IDX ring_size) \
{ \
RING_IDX size; \
\
if (prod == cons) \
return 0; \
\
prod = name##_mask(prod, ring_size); \
cons = name##_mask(cons, ring_size); \
\
if (prod == cons) \
return ring_size; \
\
if (prod > cons) \
size = prod - cons; \
else \
size = ring_size - (cons - prod); \
return size; \
} \
\
struct name##_data { \
unsigned char *in; /* half of the allocation */ \
unsigned char *out; /* half of the allocation */ \
}
#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
struct name##_data_intf { \
RING_IDX in_cons, in_prod; \
\
uint8_t pad1[56]; \
\
RING_IDX out_cons, out_prod; \
\
uint8_t pad2[56]; \
\
RING_IDX ring_order; \
grant_ref_t ref[]; \
}; \
DEFINE_XEN_FLEX_RING(name)
#endif /* __XEN_PUBLIC_IO_RING_H__ */
/*

1091
sys/xen/interface/io/sndif.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -102,6 +102,7 @@ enum tpmif_state {
* to use atomic operations.
*/
/* The shared page for vTPM request/response packets looks like:
*
* Offset Contents

View File

@ -170,6 +170,7 @@ enum usb_spec_version {
#define USBIF_MAX_SEGMENTS_PER_REQUEST (16)
#define USBIF_MAX_PORTNR 31
#define USBIF_RING_SIZE 4096
/*
* RING for transferring urbs.
@ -187,6 +188,7 @@ struct usbif_urb_request {
/* basic urb parameter */
uint32_t pipe;
uint16_t transfer_flags;
#define USBIF_SHORT_NOT_OK 0x0001
uint16_t buffer_length;
union {
uint8_t ctrl[8]; /* setup_packet (Ctrl) */
@ -225,7 +227,7 @@ struct usbif_urb_response {
typedef struct usbif_urb_response usbif_urb_response_t;
DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response);
#define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, PAGE_SIZE)
#define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, USBIF_RING_SIZE)
/*
* RING for notifying connect/disconnect events to frontend
@ -247,6 +249,6 @@ struct usbif_conn_response {
typedef struct usbif_conn_response usbif_conn_response_t;
DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, struct usbif_conn_response);
#define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, PAGE_SIZE)
#define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, USBIF_RING_SIZE)
#endif /* __XEN_PUBLIC_IO_USBIF_H__ */

View File

@ -60,7 +60,7 @@
*
* A string specifying the backend device: either a 4-tuple "h:c:t:l"
* (host, controller, target, lun, all integers), or a WWN (e.g.
* "naa.60014054ac780582").
* "naa.60014054ac780582:0").
*
* v-dev
* Values: string
@ -104,6 +104,75 @@
* response structures.
*/
/*
* Xenstore format in practice
* ===========================
*
* The backend driver uses a single_host:many_devices notation to manage domU
* devices. Everything is stored in /local/domain/<backend_domid>/backend/vscsi/.
* The xenstore layout looks like this (dom0 is assumed to be the backend_domid):
*
* <domid>/<vhost>/feature-host = "0"
* <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
* <domid>/<vhost>/frontend-id = "<domid>"
* <domid>/<vhost>/online = "1"
* <domid>/<vhost>/state = "4"
* <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1" or "naa.wwn:lun"
* <domid>/<vhost>/vscsi-devs/dev-0/state = "4"
* <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
* <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
* <domid>/<vhost>/vscsi-devs/dev-1/state = "4"
* <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
*
* The frontend driver maintains its state in
* /local/domain/<domid>/device/vscsi/.
*
* <vhost>/backend = "/local/domain/0/backend/vscsi/<domid>/<vhost>"
* <vhost>/backend-id = "0"
* <vhost>/event-channel = "20"
* <vhost>/ring-ref = "43"
* <vhost>/state = "4"
* <vhost>/vscsi-devs/dev-0/state = "4"
* <vhost>/vscsi-devs/dev-1/state = "4"
*
* In addition to the entries for backend and frontend these flags are stored
* for the toolstack:
*
* <domid>/<vhost>/vscsi-devs/dev-1/p-devname = "/dev/$device"
* <domid>/<vhost>/libxl_ctrl_index = "0"
*
*
* Backend/frontend protocol
* =========================
*
* To create a vhost along with a device:
* <domid>/<vhost>/feature-host = "0"
* <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0"
* <domid>/<vhost>/frontend-id = "<domid>"
* <domid>/<vhost>/online = "1"
* <domid>/<vhost>/state = "1"
* <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1"
* <domid>/<vhost>/vscsi-devs/dev-0/state = "1"
* <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0"
* Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-0/state become 4
*
* To add another device to a vhost:
* <domid>/<vhost>/state = "7"
* <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2"
* <domid>/<vhost>/vscsi-devs/dev-1/state = "1"
* <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0"
* Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-1/state become 4
*
* To remove a device from a vhost:
* <domid>/<vhost>/state = "7"
* <domid>/<vhost>/vscsi-devs/dev-1/state = "5"
* Wait for <domid>/<vhost>/state to become 4
* Wait for <domid>/<vhost>/vscsi-devs/dev-1/state become 6
* Remove <domid>/<vhost>/vscsi-devs/dev-1/{state,p-dev,v-dev,p-devname}
* Remove <domid>/<vhost>/vscsi-devs/dev-1/
*
*/
/* Requests from the frontend to the backend */
/*
@ -179,6 +248,7 @@
*/
#define VSCSIIF_MAX_COMMAND_SIZE 16
#define VSCSIIF_SENSE_BUFFERSIZE 96
#define VSCSIIF_PAGE_SIZE 4096
struct scsiif_request_segment {
grant_ref_t gref;
@ -187,7 +257,7 @@ struct scsiif_request_segment {
};
typedef struct scsiif_request_segment vscsiif_segment_t;
#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
#define VSCSIIF_SG_PER_PAGE (VSCSIIF_PAGE_SIZE / sizeof(struct scsiif_request_segment))
/* Size of one request is 252 bytes */
struct vscsiif_request {
@ -239,7 +309,7 @@ struct vscsiif_response {
uint8_t sense_len;
uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
int32_t rslt;
uint32_t residual_len; /* request bufflen -
uint32_t residual_len; /* request bufflen -
return the value from physical device */
uint32_t reserved[36];
};
@ -247,6 +317,7 @@ typedef struct vscsiif_response vscsiif_response_t;
DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
/*
* Local variables:

View File

@ -28,7 +28,8 @@
enum xsd_sockmsg_type
{
XS_DEBUG,
XS_CONTROL,
#define XS_DEBUG XS_CONTROL
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
@ -48,8 +49,11 @@ enum xsd_sockmsg_type
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
XS_RESTRICT,
XS_RESET_WATCHES,
/* XS_RESTRICT has been removed */
XS_RESET_WATCHES = XS_SET_TARGET + 2,
XS_DIRECTORY_PART,
XS_TYPE_COUNT, /* Number of valid types. */
XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};

View File

@ -1,6 +1,6 @@
/******************************************************************************
* kexec.h - Public portion
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -18,7 +18,7 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*
* Xen port written by:
* - Simon 'Horms' Horman <horms@verge.net.au>
* - Magnus Damm <magnus@valinux.co.jp>
@ -27,9 +27,10 @@
#ifndef _XEN_PUBLIC_KEXEC_H
#define _XEN_PUBLIC_KEXEC_H
/* This file describes the Kexec / Kdump hypercall interface for Xen.
*
* Kexec under vanilla Linux allows a user to reboot the physical machine
* Kexec under vanilla Linux allows a user to reboot the physical machine
* into a new user-specified kernel. The Xen port extends this idea
* to allow rebooting of the machine from dom0. When kexec for dom0
* is used to reboot, both the hypervisor and the domains get replaced
@ -40,8 +41,8 @@
* types of hypercall operations:
*
* 1) Range information:
* This is used by the dom0 kernel to ask the hypervisor about various
* address information. This information is needed to allow kexec-tools
* This is used by the dom0 kernel to ask the hypervisor about various
* address information. This information is needed to allow kexec-tools
* to fill in the ELF headers for /proc/vmcore properly.
*
* 2) Load and unload of images:
@ -64,7 +65,7 @@
/*
* Prototype for this hypercall is:
* int kexec_op(int cmd, void *args)
* @cmd == KEXEC_CMD_...
* @cmd == KEXEC_CMD_...
* KEXEC operation to perform
* @args == Operation-specific extra arguments (NULL if none).
*/
@ -82,6 +83,7 @@
#define KEXEC_TYPE_DEFAULT 0
#define KEXEC_TYPE_CRASH 1
/* The kexec implementation for Xen allows the user to load two
* types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
* All data needed for a kexec reboot is kept in one xen_kexec_image_t
@ -225,6 +227,20 @@ typedef struct xen_kexec_unload {
} xen_kexec_unload_t;
DEFINE_XEN_GUEST_HANDLE(xen_kexec_unload_t);
/*
* Figure out whether we have an image loaded. A return value of
* zero indicates no image loaded. A return value of one
* indicates an image is loaded. A negative return value
* indicates an error.
*
* Type must be one of KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
*/
#define KEXEC_CMD_kexec_status 6
typedef struct xen_kexec_status {
uint8_t type;
} xen_kexec_status_t;
DEFINE_XEN_GUEST_HANDLE(xen_kexec_status_t);
#else /* __XEN_INTERFACE_VERSION__ < 0x00040400 */
#define KEXEC_CMD_kexec_load KEXEC_CMD_kexec_load_v1

View File

@ -1,8 +1,8 @@
/******************************************************************************
* memory.h
*
*
* Memory reservation and information.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -41,9 +41,9 @@
#if __XEN_INTERFACE_VERSION__ >= 0x00030209
/*
* Maximum # bits addressable by the user of the allocated region (e.g., I/O
* devices often have a 32-bit limitation even in 64-bit systems). If zero
* then the user has no addressing restriction. This field is not used by
* Maximum # bits addressable by the user of the allocated region (e.g., I/O
* devices often have a 32-bit limitation even in 64-bit systems). If zero
* then the user has no addressing restriction. This field is not used by
* XENMEM_decrease_reservation.
*/
#define XENMEMF_address_bits(x) (x)
@ -61,6 +61,7 @@
#endif
struct xen_memory_reservation {
/*
* XENMEM_increase_reservation:
* OUT: MFN (*not* GMFN) bases of extents that were allocated
@ -101,6 +102,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
* Returns zero on complete success, otherwise a negative error code.
* On complete success then always @nr_exchanged == @in.nr_extents.
* On partial success @nr_exchanged indicates how much work was done.
*
* Note that only PV guests can use this operation.
*/
#define XENMEM_exchange 11
struct xen_memory_exchange {
@ -114,7 +117,7 @@ struct xen_memory_exchange {
* [IN/OUT] Details of new memory extents.
* We require that:
* 1. @in.domid == @out.domid
* 2. @in.nr_extents << @in.extent_order ==
* 2. @in.nr_extents << @in.extent_order ==
* @out.nr_extents << @out.extent_order
* 3. @in.extent_start and @out.extent_start lists must not overlap
* 4. @out.extent_start lists GPFN bases to be populated
@ -219,11 +222,16 @@ DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
* XENMEM_add_to_physmap_batch only. */
#define XENMAPSPACE_dev_mmio 5 /* device mmio region
ARM only; the region is mapped in
Stage-2 using the Normal Memory
Inner/Outer Write-Back Cacheable
memory attribute. */
/* ` } */
/*
* Sets the GPFN at which a particular page appears in the specified guest's
* pseudophysical address space.
* physical address space (translated guests only).
* arg == addr of xen_add_to_physmap_t.
*/
#define XENMEM_add_to_physmap 7
@ -257,7 +265,15 @@ struct xen_add_to_physmap_batch {
/* Number of pages to go through */
uint16_t size;
domid_t foreign_domid; /* IFF gmfn_foreign */
#if __XEN_INTERFACE_VERSION__ < 0x00040700
domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
#else
union xen_add_to_physmap_batch_extra {
domid_t foreign_domid; /* gmfn_foreign */
uint16_t res0; /* All the other spaces. Should be 0 */
} u;
#endif
/* Indexes into space being mapped. */
XEN_GUEST_HANDLE(xen_ulong_t) idxs;
@ -282,7 +298,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* physical address space (translated guests only).
* arg == addr of xen_remove_from_physmap_t.
*/
#define XENMEM_remove_from_physmap 15
@ -325,6 +341,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
/*
* Returns the real physical memory map. Passes the same structure as
* XENMEM_memory_map.
* Specifying buffer as NULL will return the number of entries required
* to store the complete memory map.
* arg == addr of xen_memory_map_t.
*/
#define XENMEM_machine_memory_map 10
@ -364,7 +382,7 @@ typedef struct xen_pod_target xen_pod_target_t;
/*
* Get the number of MFNs saved through memory sharing.
* The call never fails.
* The call never fails.
*/
#define XENMEM_get_sharing_freed_pages 18
#define XENMEM_get_sharing_shared_pages 19
@ -378,10 +396,10 @@ struct xen_mem_paging_op {
uint8_t op; /* XENMEM_paging_op_* */
domid_t domain;
/* PAGING_PREP IN: buffer to immediately fill page in */
uint64_aligned_t buffer;
/* Other OPs */
uint64_aligned_t gfn; /* IN: gfn of page being operated on */
/* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
XEN_GUEST_HANDLE_64(const_uint8) buffer;
/* IN: gfn of page being operated on */
uint64_aligned_t gfn;
};
typedef struct xen_mem_paging_op xen_mem_paging_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
@ -389,8 +407,14 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
#define XENMEM_access_op 21
#define XENMEM_access_op_set_access 0
#define XENMEM_access_op_get_access 1
#define XENMEM_access_op_enable_emulate 2
#define XENMEM_access_op_disable_emulate 3
/*
* XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
* currently unused, but since they have been in use please do not reuse them.
*
* #define XENMEM_access_op_enable_emulate 2
* #define XENMEM_access_op_disable_emulate 3
*/
#define XENMEM_access_op_set_access_multi 4
typedef enum {
XENMEM_access_n,
@ -423,7 +447,8 @@ struct xen_mem_access_op {
uint8_t access;
domid_t domid;
/*
* Number of pages for set op
* Number of pages for set op (or size of pfn_list for
* XENMEM_access_op_set_access_multi)
* Ignored on setting default access and other ops
*/
uint32_t nr;
@ -433,6 +458,16 @@ struct xen_mem_access_op {
* ~0ull is used to set and get the default access for pages
*/
uint64_aligned_t pfn;
/*
* List of pfns to set access for
* Used only with XENMEM_access_op_set_access_multi
*/
XEN_GUEST_HANDLE(const_uint64) pfn_list;
/*
* Corresponding list of access settings for pfn_list
* Used only with XENMEM_access_op_set_access_multi
*/
XEN_GUEST_HANDLE(const_uint8) access_list;
};
typedef struct xen_mem_access_op xen_mem_access_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
@ -446,15 +481,18 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
#define XENMEM_sharing_op_debug_gref 5
#define XENMEM_sharing_op_add_physmap 6
#define XENMEM_sharing_op_audit 7
#define XENMEM_sharing_op_range_share 8
#define XENMEM_sharing_op_fork 9
#define XENMEM_sharing_op_fork_reset 10
#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
/* The following allows sharing of grant refs. This is useful
* for sharing utilities sitting as "filters" in IO backends
* (e.g. memshr + blktap(2)). The IO backend is only exposed
* (e.g. memshr + blktap(2)). The IO backend is only exposed
* to grant references, and this allows sharing of the grefs */
#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (1ULL << 62)
#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
(field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
@ -481,7 +519,14 @@ struct xen_mem_sharing_op {
uint64_aligned_t client_gfn; /* IN: the client gfn */
uint64_aligned_t client_handle; /* IN: handle to the client page */
domid_t client_domain; /* IN: the client domain id */
} share;
} share;
struct mem_sharing_op_range { /* OP_RANGE_SHARE */
uint64_aligned_t first_gfn; /* IN: the first gfn */
uint64_aligned_t last_gfn; /* IN: the last gfn */
uint64_aligned_t opaque; /* Must be set to 0 */
domid_t client_domain; /* IN: the client domain id */
uint16_t _pad[3]; /* Must be set to 0 */
} range;
struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
union {
uint64_aligned_t gfn; /* IN: gfn to debug */
@ -489,6 +534,15 @@ struct xen_mem_sharing_op {
uint32_t gref; /* IN: gref to debug */
} u;
} debug;
struct mem_sharing_op_fork { /* OP_FORK */
domid_t parent_domain; /* IN: parent's domain id */
/* Only makes sense for short-lived forks */
#define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
/* Only makes sense for short-lived forks */
#define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
uint16_t flags; /* IN: optional settings */
uint32_t pad; /* Must be set to 0 */
} fork;
} u;
};
typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
@ -510,8 +564,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
*
* Note that a valid claim may be staked even after memory has been
* allocated for a domain. In this case, the claim is not incremental,
* i.e. if the domain's tot_pages is 3, and a claim is staked for 10,
* only 7 additional pages are claimed.
* i.e. if the domain's total page count is 3, and a claim is staked
* for 10, only 7 additional pages are claimed.
*
* Caller must be privileged or the hypercall fails.
*/
@ -519,7 +573,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
/*
* XENMEM_claim_pages flags - the are no flags at this time.
* The zero value is appropiate.
* The zero value is appropriate.
*/
/*
@ -558,6 +612,65 @@ DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
/*
* Get the pages for a particular guest resource, so that they can be
* mapped directly by a tools domain.
*/
#define XENMEM_acquire_resource 28
struct xen_mem_acquire_resource {
/* IN - The domain whose resource is to be mapped */
domid_t domid;
/* IN - the type of resource */
uint16_t type;
#define XENMEM_resource_ioreq_server 0
#define XENMEM_resource_grant_table 1
/*
* IN - a type-specific resource identifier, which must be zero
* unless stated otherwise.
*
* type == XENMEM_resource_ioreq_server -> id == ioreq server id
* type == XENMEM_resource_grant_table -> id defined below
*/
uint32_t id;
#define XENMEM_resource_grant_table_id_shared 0
#define XENMEM_resource_grant_table_id_status 1
/*
* IN/OUT - As an IN parameter number of frames of the resource
* to be mapped. However, if the specified value is 0 and
* frame_list is NULL then this field will be set to the
* maximum value supported by the implementation on return.
*/
uint32_t nr_frames;
uint32_t pad;
/*
* IN - the index of the initial frame to be mapped. This parameter
* is ignored if nr_frames is 0.
*/
uint64_t frame;
#define XENMEM_resource_ioreq_server_frame_bufioreq 0
#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
/*
* IN/OUT - If the tools domain is PV then, upon return, frame_list
* will be populated with the MFNs of the resource.
* If the tools domain is HVM then it is expected that, on
* entry, frame_list will be populated with a list of GFNs
* that will be mapped to the MFNs of the resource.
* If -EIO is returned then the frame_list has only been
* partially mapped and it is up to the caller to unmap all
* the GFNs.
* This parameter may be NULL if nr_frames is 0.
*/
XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
};
typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t);
/*
* XENMEM_get_vnumainfo used by guest to get
* vNUMA topology from hypervisor.
@ -607,7 +720,7 @@ struct xen_vnuma_topology_info {
typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
/* Next available subop number is 28 */
/* Next available subop number is 29 */
#endif /* __XEN_PUBLIC_MEMORY_H__ */

View File

@ -1,8 +1,8 @@
/******************************************************************************
* nmi.h
*
*
* NMI callback registration and reason codes.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the

View File

@ -192,7 +192,7 @@ struct physdev_manage_pci {
/* IN */
uint8_t bus;
uint8_t devfn;
};
};
typedef struct physdev_manage_pci physdev_manage_pci_t;
DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
@ -258,7 +258,7 @@ DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t);
* the hypercall returns a free pirq */
#define PHYSDEVOP_get_free_pirq 23
struct physdev_get_free_pirq {
/* IN */
/* IN */
int type;
/* OUT */
uint32_t pirq;
@ -300,11 +300,7 @@ struct physdev_pci_device_add {
* First element ([0]) is PXM domain associated with the device (if
* XEN_PCI_DEV_PXM is set)
*/
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint32_t optarr[];
#elif defined(__GNUC__)
uint32_t optarr[0];
#endif
uint32_t optarr[XEN_FLEX_ARRAY_DIM];
};
typedef struct physdev_pci_device_add physdev_pci_device_add_t;
DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t);

View File

@ -1,8 +1,8 @@
/******************************************************************************
* platform.h
*
*
* Hardware platform operations. Intended for use by domain-0 kernel.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -240,6 +240,7 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_efi_runtime_call_t);
#define XEN_FW_EFI_MEM_INFO 3
#define XEN_FW_EFI_RT_VERSION 4
#define XEN_FW_EFI_PCI_ROM 5
#define XEN_FW_EFI_APPLE_PROPERTIES 6
#define XEN_FW_KBD_SHIFT_FLAGS 5
struct xenpf_firmware_info {
/* IN variables. */
@ -299,6 +300,11 @@ struct xenpf_firmware_info {
uint64_t address;
xen_ulong_t size;
} pci_rom;
struct {
/* OUT variables */
uint64_t address;
xen_ulong_t size;
} apple_properties;
} efi_info; /* XEN_FW_EFI_INFO */
/* Int16, Fn02: Get keyboard shift flags. */

View File

@ -84,9 +84,19 @@ DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
/*
* PMU features:
* - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
* - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
* - XENPMU_FEATURE_IPC_ONLY: Restrict PMCs to the most minimum set possible.
* Instructions, cycles, and ref cycles. Can be
* used to calculate instructions-per-cycle (IPC)
* (ignored on AMD).
* - XENPMU_FEATURE_ARCH_ONLY: Restrict PMCs to the Intel Pre-Defined
* Architectural Performance Events exposed by
* cpuid and listed in the Intel developer's manual
* (ignored on AMD).
*/
#define XENPMU_FEATURE_INTEL_BTS 1
#define XENPMU_FEATURE_INTEL_BTS (1<<0)
#define XENPMU_FEATURE_IPC_ONLY (1<<1)
#define XENPMU_FEATURE_ARCH_ONLY (1<<2)
/*
* Shared PMU data between hypervisor and PV(H) domains.

View File

@ -118,6 +118,18 @@
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
/*
* Override the current vcpu affinity by pinning it to one physical cpu or
* undo this override restoring the previous affinity.
* @arg == pointer to sched_pin_override_t structure.
*
* A negative pcpu value will undo a previous pin override and restore the
* previous cpu affinity.
* This call is allowed for the hardware domain only and requires the cpu
* to be part of the domain's cpupool.
*/
#define SCHEDOP_pin_override 7
/* ` } */
struct sched_shutdown {
@ -148,6 +160,12 @@ struct sched_watchdog {
typedef struct sched_watchdog sched_watchdog_t;
DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
struct sched_pin_override {
int32_t pcpu;
};
typedef struct sched_pin_override sched_pin_override_t;
DEFINE_XEN_GUEST_HANDLE(sched_pin_override_t);
/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does
@ -159,7 +177,16 @@ DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
#define SHUTDOWN_MAX 4 /* Maximum valid shutdown reason. */
/*
* Domain asked to perform 'soft reset' for it. The expected behavior is to
* reset internal Xen state for the domain returning it to the point where it
* was created but leaving the domain's memory contents and vCPU contexts
* intact. This will allow the domain to start over and set up all Xen specific
* interfaces again.
*/
#define SHUTDOWN_soft_reset 5
#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */
/* ` } */
#endif /* __XEN_PUBLIC_SCHED_H__ */

View File

@ -1,8 +1,8 @@
/******************************************************************************
* sysctl.h
*
*
* System management operations. For use by node control stack.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -34,9 +34,8 @@
#include "xen.h"
#include "domctl.h"
#include "physdev.h"
#include "tmem.h"
#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000C
#define XEN_SYSCTL_INTERFACE_VERSION 0x00000013
/*
* Read console content from Xen buffer ring.
@ -52,14 +51,12 @@ struct xen_sysctl_readconsole {
* IN: Start index for consuming from ring buffer (if @incremental);
* OUT: End index after consuming from ring buffer.
*/
uint32_t index;
uint32_t index;
/* IN: Virtual address to write console data. */
XEN_GUEST_HANDLE_64(char) buffer;
/* IN: Size of buffer; OUT: Bytes written to buffer. */
uint32_t count;
};
typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
/* Get trace buffers machine base address */
/* XEN_SYSCTL_tbuf_op */
@ -79,19 +76,34 @@ struct xen_sysctl_tbuf_op {
uint64_aligned_t buffer_mfn;
uint32_t size; /* Also an IN variable! */
};
typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
/*
* Get physical information about the host machine
*/
/* XEN_SYSCTL_physinfo */
/* (x86) The platform supports HVM guests. */
/* The platform supports HVM guests. */
#define _XEN_SYSCTL_PHYSCAP_hvm 0
#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
/* (x86) The platform supports HVM-guest direct access to I/O devices. */
#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
#define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
/* The platform supports PV guests. */
#define _XEN_SYSCTL_PHYSCAP_pv 1
#define XEN_SYSCTL_PHYSCAP_pv (1u<<_XEN_SYSCTL_PHYSCAP_pv)
/* The platform supports direct access to I/O devices with IOMMU. */
#define _XEN_SYSCTL_PHYSCAP_directio 2
#define XEN_SYSCTL_PHYSCAP_directio (1u<<_XEN_SYSCTL_PHYSCAP_directio)
/* The platform supports Hardware Assisted Paging. */
#define _XEN_SYSCTL_PHYSCAP_hap 3
#define XEN_SYSCTL_PHYSCAP_hap (1u<<_XEN_SYSCTL_PHYSCAP_hap)
/* The platform supports software paging. */
#define _XEN_SYSCTL_PHYSCAP_shadow 4
#define XEN_SYSCTL_PHYSCAP_shadow (1u<<_XEN_SYSCTL_PHYSCAP_shadow)
/* The platform supports sharing of HAP page tables with the IOMMU. */
#define _XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share 5
#define XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share \
(1u << _XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share)
/* Max XEN_SYSCTL_PHYSCAP_* constant. Used for ABI checking. */
#define XEN_SYSCTL_PHYSCAP_MAX XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share
struct xen_sysctl_physinfo {
uint32_t threads_per_core;
uint32_t cores_per_socket;
@ -100,17 +112,14 @@ struct xen_sysctl_physinfo {
uint32_t nr_nodes; /* # nodes currently online */
uint32_t max_node_id; /* Largest possible node ID on this host */
uint32_t cpu_khz;
uint32_t capabilities;/* XEN_SYSCTL_PHYSCAP_??? */
uint64_aligned_t total_pages;
uint64_aligned_t free_pages;
uint64_aligned_t scrub_pages;
uint64_aligned_t outstanding_pages;
uint64_aligned_t max_mfn; /* Largest possible MFN on this host */
uint32_t hw_cap[8];
/* XEN_SYSCTL_PHYSCAP_??? */
uint32_t capabilities;
};
typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
/*
* Get the ID of the current scheduler.
@ -120,8 +129,6 @@ struct xen_sysctl_sched_id {
/* OUT variable */
uint32_t sched_id;
};
typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
/* Interface for controlling Xen software performance counters. */
/* XEN_SYSCTL_perfc_op */
@ -148,8 +155,6 @@ struct xen_sysctl_perfc_op {
/* counter values (or NULL) */
XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
};
typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
/* XEN_SYSCTL_getdomaininfolist */
struct xen_sysctl_getdomaininfolist {
@ -160,18 +165,14 @@ struct xen_sysctl_getdomaininfolist {
/* OUT variables. */
uint32_t num_domains;
};
typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
/* Inject debug keys into Xen. */
/* XEN_SYSCTL_debug_keys */
struct xen_sysctl_debug_keys {
/* IN variables. */
XEN_GUEST_HANDLE_64(char) keys;
XEN_GUEST_HANDLE_64(const_char) keys;
uint32_t nr_keys;
};
typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
/* Get physical CPU information. */
/* XEN_SYSCTL_getcpuinfo */
@ -179,16 +180,14 @@ struct xen_sysctl_cpuinfo {
uint64_aligned_t idletime;
};
typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
struct xen_sysctl_getcpuinfo {
/* IN variables. */
uint32_t max_cpus;
XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
/* OUT variables. */
uint32_t nr_cpus;
};
typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
};
/* XEN_SYSCTL_availheap */
struct xen_sysctl_availheap {
@ -199,8 +198,6 @@ struct xen_sysctl_availheap {
/* OUT variables. */
uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
};
typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
/* XEN_SYSCTL_get_pmstat */
struct pm_px_val {
@ -219,8 +216,6 @@ struct pm_px_stat {
XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
};
typedef struct pm_px_stat pm_px_stat_t;
DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
struct pm_cx_stat {
uint32_t nr; /* entry nr in triggers & residencies, including C0 */
@ -259,29 +254,43 @@ struct xen_sysctl_get_pmstat {
/* other struct for tx, etc */
} u;
};
typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
/* XEN_SYSCTL_cpu_hotplug */
struct xen_sysctl_cpu_hotplug {
/* IN variables */
uint32_t cpu; /* Physical cpu. */
/* Single CPU enable/disable. */
#define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0
#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
/*
* SMT enable/disable.
*
* These two ops loop over all present CPUs, and either online or offline
* every non-primary sibling thread (those with a thread id which is not
* 0). This behaviour is chosen to simplify the implementation.
*
* They are intended as a shorthand for identifying and feeding the cpu
* numbers individually to HOTPLUG_{ON,OFF}LINE.
*
* These are not expected to be used in conjunction with debugging options
* such as `maxcpus=` or when other manual configuration of offline cpus
* is in use.
*/
#define XEN_SYSCTL_CPU_HOTPLUG_SMT_ENABLE 2
#define XEN_SYSCTL_CPU_HOTPLUG_SMT_DISABLE 3
uint32_t op; /* hotplug opcode */
};
typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
/*
* Get/set xen power management, include
* Get/set xen power management, include
* 1. cpufreq governors and related parameters
*/
/* XEN_SYSCTL_pm_op */
struct xen_userspace {
uint32_t scaling_setspeed;
};
typedef struct xen_userspace xen_userspace_t;
struct xen_ondemand {
uint32_t sampling_rate_max;
@ -290,10 +299,9 @@ struct xen_ondemand {
uint32_t sampling_rate;
uint32_t up_threshold;
};
typedef struct xen_ondemand xen_ondemand_t;
/*
* cpufreq para name of this structure named
/*
* cpufreq para name of this structure named
* same as sysfs file name of native linux
*/
#define CPUFREQ_NAME_LEN 16
@ -356,7 +364,11 @@ struct xen_sysctl_pm_op {
/* set/reset scheduler power saving option */
#define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21
/* cpuidle max_cstate access command */
/*
* cpuidle max C-state and max C-sub-state access command:
* Set cpuid to 0 for max C-state.
* Set cpuid to 1 for max C-sub-state.
*/
#define XEN_SYSCTL_pm_op_get_max_cstate 0x22
#define XEN_SYSCTL_pm_op_set_max_cstate 0x23
@ -376,10 +388,9 @@ struct xen_sysctl_pm_op {
struct xen_set_cpufreq_para set_para;
uint64_aligned_t get_avgfreq;
uint32_t set_sched_opt_smt;
#define XEN_SYSCTL_CX_UNLIMITED 0xffffffff
uint32_t get_max_cstate;
uint32_t set_max_cstate;
uint32_t get_vcpu_migration_delay;
uint32_t set_vcpu_migration_delay;
} u;
};
@ -461,8 +472,6 @@ struct xen_sysctl_lockprof_op {
/* profile information (or NULL) */
XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
};
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
/* XEN_SYSCTL_cputopoinfo */
#define XEN_INVALID_CORE_ID (~0U)
@ -493,8 +502,6 @@ struct xen_sysctl_cputopoinfo {
uint32_t num_cpus;
XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
};
typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
/* XEN_SYSCTL_numainfo */
#define XEN_INVALID_MEM_SZ (~0U)
@ -535,8 +542,6 @@ struct xen_sysctl_numainfo {
*/
XEN_GUEST_HANDLE_64(uint32) distance;
};
typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
/* XEN_SYSCTL_cpupool_op */
#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
@ -556,8 +561,42 @@ struct xen_sysctl_cpupool_op {
uint32_t n_dom; /* OUT: I */
struct xenctl_bitmap cpumap; /* OUT: IF */
};
typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
/*
* Error return values of cpupool operations:
*
* -EADDRINUSE:
* XEN_SYSCTL_CPUPOOL_OP_RMCPU: A vcpu is temporarily pinned to the cpu
* which is to be removed from a cpupool.
* -EADDRNOTAVAIL:
* XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: A previous
* request to remove a cpu from a cpupool was terminated with -EAGAIN
* and has not been retried using the same parameters.
* -EAGAIN:
* XEN_SYSCTL_CPUPOOL_OP_RMCPU: The cpu can't be removed from the cpupool
* as it is active in the hypervisor. A retry will succeed soon.
* -EBUSY:
* XEN_SYSCTL_CPUPOOL_OP_DESTROY, XEN_SYSCTL_CPUPOOL_OP_RMCPU: A cpupool
* can't be destroyed or the last cpu can't be removed as there is still
* a running domain in that cpupool.
* -EEXIST:
* XEN_SYSCTL_CPUPOOL_OP_CREATE: A cpupool_id was specified and is already
* existing.
* -EINVAL:
* XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: An illegal
* cpu was specified (cpu does not exist).
* XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN: An illegal domain was specified
* (domain id illegal or not suitable for operation).
* -ENODEV:
* XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: The specified
* cpu is either not free (add) or not member of the specified cpupool
* (remove).
* -ENOENT:
* all: The cpupool with the specified cpupool_id doesn't exist.
*
* Some common error return values like -ENOMEM and -EFAULT are possible for
* all the operations.
*/
#define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64
/*
@ -587,18 +626,30 @@ struct xen_sysctl_arinc653_schedule {
typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t);
/*
* Valid range for context switch rate limit (in microseconds).
* Applicable to Credit and Credit2 schedulers.
*/
#define XEN_SYSCTL_SCHED_RATELIMIT_MAX 500000
#define XEN_SYSCTL_SCHED_RATELIMIT_MIN 100
struct xen_sysctl_credit_schedule {
/* Length of timeslice in milliseconds */
#define XEN_SYSCTL_CSCHED_TSLICE_MAX 1000
#define XEN_SYSCTL_CSCHED_TSLICE_MIN 1
unsigned tslice_ms;
/* Rate limit (minimum timeslice) in microseconds */
#define XEN_SYSCTL_SCHED_RATELIMIT_MAX 500000
#define XEN_SYSCTL_SCHED_RATELIMIT_MIN 100
unsigned ratelimit_us;
/*
* How long we consider a vCPU to be cache-hot on the
* CPU where it has run (max 100ms, in microseconds)
*/
#define XEN_SYSCTL_CSCHED_MGR_DLY_MAX_US (100 * 1000)
unsigned vcpu_migr_delay_us;
};
struct xen_sysctl_credit2_schedule {
unsigned ratelimit_us;
};
typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t);
/* XEN_SYSCTL_scheduler_op */
/* Set or get info? */
@ -613,45 +664,47 @@ struct xen_sysctl_scheduler_op {
XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule;
} sched_arinc653;
struct xen_sysctl_credit_schedule sched_credit;
struct xen_sysctl_credit2_schedule sched_credit2;
} u;
};
typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
/* XEN_SYSCTL_coverage_op */
/*
* Get total size of information, to help allocate
* the buffer. The pointer points to a 32 bit value.
*/
#define XEN_SYSCTL_COVERAGE_get_total_size 0
/*
* Read coverage information in a single run
* You must use a tool to split them.
* Output format of gcov data:
*
* XEN_GCOV_FORMAT_MAGIC XEN_GCOV_RECORD ... XEN_GCOV_RECORD
*
* That is, one magic number followed by 0 or more record.
*
* The magic number is stored as an uint32_t field.
*
* The record is packed and variable in length. It has the form:
*
* filename: a NULL terminated path name extracted from gcov, used to
* create the name of gcda file.
* size: a uint32_t field indicating the size of the payload, the
* unit is byte.
* payload: the actual payload, length is `size' bytes.
*
* Userspace tool will split the record to different files.
*/
#define XEN_SYSCTL_COVERAGE_read 1
#define XEN_GCOV_FORMAT_MAGIC 0x58434f56 /* XCOV */
/*
* Reset all the coverage counters to 0
* No parameters.
* Ouput format of LLVM coverage data is just a raw stream, as would be
* written by the compiler_rt run time library into a .profraw file. There
* are no special Xen tags or delimiters because none are needed.
*/
#define XEN_SYSCTL_COVERAGE_reset 2
/*
* Like XEN_SYSCTL_COVERAGE_read but reset also
* counters to 0 in a single call.
*/
#define XEN_SYSCTL_COVERAGE_read_and_reset 3
#define XEN_SYSCTL_COVERAGE_get_size 0 /* Get total size of output data */
#define XEN_SYSCTL_COVERAGE_read 1 /* Read output data */
#define XEN_SYSCTL_COVERAGE_reset 2 /* Reset all counters */
struct xen_sysctl_coverage_op {
uint32_t cmd; /* XEN_SYSCTL_COVERAGE_* */
union {
uint32_t total_size; /* OUT */
XEN_GUEST_HANDLE_64(uint8) raw_info; /* OUT */
} u;
uint32_t cmd;
uint32_t size; /* IN/OUT: size of the buffer */
XEN_GUEST_HANDLE_64(char) buffer; /* OUT */
};
typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
#define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
#define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
@ -670,14 +723,12 @@ struct xen_sysctl_psr_cmt_op {
} l3_cache;
} u;
};
typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
/* XEN_SYSCTL_pcitopoinfo */
#define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
struct xen_sysctl_pcitopoinfo {
/*
* IN: Number of elements in 'pcitopo' and 'nodes' arrays.
* IN: Number of elements in 'devs' and 'nodes' arrays.
* OUT: Number of processed elements of those arrays.
*/
uint32_t num_devs;
@ -694,75 +745,322 @@ struct xen_sysctl_pcitopoinfo {
*/
XEN_GUEST_HANDLE_64(uint32) nodes;
};
typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
#define XEN_SYSCTL_PSR_CAT_get_l3_info 0
struct xen_sysctl_psr_cat_op {
uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CAT_* */
#define XEN_SYSCTL_PSR_get_l3_info 0
#define XEN_SYSCTL_PSR_get_l2_info 1
#define XEN_SYSCTL_PSR_get_mba_info 2
struct xen_sysctl_psr_alloc {
uint32_t cmd; /* IN: XEN_SYSCTL_PSR_* */
uint32_t target; /* IN */
union {
struct {
uint32_t cbm_len; /* OUT: CBM length */
uint32_t cos_max; /* OUT: Maximum COS */
} l3_info;
#define XEN_SYSCTL_PSR_CAT_L3_CDP (1u << 0)
uint32_t flags; /* OUT: CAT flags */
} cat_info;
struct {
uint32_t thrtl_max; /* OUT: Maximum throttle */
uint32_t cos_max; /* OUT: Maximum COS */
#define XEN_SYSCTL_PSR_MBA_LINEAR (1u << 0)
uint32_t flags; /* OUT: MBA flags */
} mba_info;
} u;
};
typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
#define XEN_SYSCTL_TMEM_OP_THAW 0
#define XEN_SYSCTL_TMEM_OP_FREEZE 1
#define XEN_SYSCTL_TMEM_OP_FLUSH 2
#define XEN_SYSCTL_TMEM_OP_DESTROY 3
#define XEN_SYSCTL_TMEM_OP_LIST 4
#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT 5
#define XEN_SYSCTL_TMEM_OP_SET_CAP 6
#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS 7
#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION 11
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS 12
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP 14
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS 15
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS 16
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES 17
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID 18
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20
#define XEN_SYSCTL_TMEM_OP_SAVE_END 21
#define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN 30
#define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE 32
#define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE 33
/*
* XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_[PAGE|INV] override the 'buf' in
* xen_sysctl_tmem_op with this structure - sometimes with an extra
* page tackled on.
* XEN_SYSCTL_get_cpu_levelling_caps (x86 specific)
*
* Return hardware capabilities concerning masking or faulting of the cpuid
* instruction for PV guests.
*/
struct tmem_handle {
uint32_t pool_id;
uint32_t index;
xen_tmem_oid_t oid;
struct xen_sysctl_cpu_levelling_caps {
#define XEN_SYSCTL_CPU_LEVELCAP_faulting (1ul << 0) /* CPUID faulting */
#define XEN_SYSCTL_CPU_LEVELCAP_ecx (1ul << 1) /* 0x00000001.ecx */
#define XEN_SYSCTL_CPU_LEVELCAP_edx (1ul << 2) /* 0x00000001.edx */
#define XEN_SYSCTL_CPU_LEVELCAP_extd_ecx (1ul << 3) /* 0x80000001.ecx */
#define XEN_SYSCTL_CPU_LEVELCAP_extd_edx (1ul << 4) /* 0x80000001.edx */
#define XEN_SYSCTL_CPU_LEVELCAP_xsave_eax (1ul << 5) /* 0x0000000D:1.eax */
#define XEN_SYSCTL_CPU_LEVELCAP_thermal_ecx (1ul << 6) /* 0x00000006.ecx */
#define XEN_SYSCTL_CPU_LEVELCAP_l7s0_eax (1ul << 7) /* 0x00000007:0.eax */
#define XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx (1ul << 8) /* 0x00000007:0.ebx */
uint32_t caps;
};
struct xen_sysctl_tmem_op {
uint32_t cmd; /* IN: XEN_SYSCTL_TMEM_OP_* . */
int32_t pool_id; /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
uint32_t cli_id; /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
for all others can be the domain id or
XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
uint32_t arg1; /* IN: If not applicable to command use 0. */
uint32_t arg2; /* IN: If not applicable to command use 0. */
uint32_t pad; /* Padding so structure is the same under 32 and 64. */
xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. */
/*
* XEN_SYSCTL_get_cpu_featureset (x86 specific)
*
* Return information about featuresets available on this host.
* - Raw: The real cpuid values.
* - Host: The values Xen is using, (after command line overrides, etc).
* - PV: Maximum set of features which can be given to a PV guest.
* - HVM: Maximum set of features which can be given to a HVM guest.
* May fail with -EOPNOTSUPP if querying for PV or HVM data when support is
* compiled out of Xen.
*/
struct xen_sysctl_cpu_featureset {
#define XEN_SYSCTL_cpu_featureset_raw 0
#define XEN_SYSCTL_cpu_featureset_host 1
#define XEN_SYSCTL_cpu_featureset_pv 2
#define XEN_SYSCTL_cpu_featureset_hvm 3
uint32_t index; /* IN: Which featureset to query? */
uint32_t nr_features; /* IN/OUT: Number of entries in/written to
* 'features', or the maximum number of features if
* the guest handle is NULL. NB. All featuresets
* come from the same numberspace, so have the same
* maximum length. */
XEN_GUEST_HANDLE_64(uint32) features; /* OUT: */
};
typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
/*
* XEN_SYSCTL_LIVEPATCH_op
*
* Refer to the docs/unstable/misc/livepatch.markdown
* for the design details of this hypercall.
*
* There are four sub-ops:
* XEN_SYSCTL_LIVEPATCH_UPLOAD (0)
* XEN_SYSCTL_LIVEPATCH_GET (1)
* XEN_SYSCTL_LIVEPATCH_LIST (2)
* XEN_SYSCTL_LIVEPATCH_ACTION (3)
*
* The normal sequence of sub-ops is to:
* 1) XEN_SYSCTL_LIVEPATCH_UPLOAD to upload the payload. If errors STOP.
* 2) XEN_SYSCTL_LIVEPATCH_GET to check the `->rc`. If -XEN_EAGAIN spin.
* If zero go to next step.
* 3) XEN_SYSCTL_LIVEPATCH_ACTION with LIVEPATCH_ACTION_APPLY to apply the patch.
* 4) XEN_SYSCTL_LIVEPATCH_GET to check the `->rc`. If in -XEN_EAGAIN spin.
* If zero exit with success.
*/
#define LIVEPATCH_PAYLOAD_VERSION 2
/*
* .livepatch.funcs structure layout defined in the `Payload format`
* section in the Live Patch design document.
*
* We guard this with __XEN__ as toolstacks SHOULD not use it.
*/
#ifdef __XEN__
#define LIVEPATCH_OPAQUE_SIZE 31
struct livepatch_expectation {
uint8_t enabled : 1;
uint8_t len : 5; /* Length of data up to LIVEPATCH_OPAQUE_SIZE
(5 bits is enough for now) */
uint8_t rsv : 2; /* Reserved. Zero value */
uint8_t data[LIVEPATCH_OPAQUE_SIZE]; /* Same size as opaque[] buffer of
struct livepatch_func. This is the
max number of bytes to be patched */
};
typedef struct livepatch_expectation livepatch_expectation_t;
typedef enum livepatch_func_state {
LIVEPATCH_FUNC_NOT_APPLIED,
LIVEPATCH_FUNC_APPLIED
} livepatch_func_state_t;
struct livepatch_func {
const char *name; /* Name of function to be patched. */
void *new_addr;
void *old_addr;
uint32_t new_size;
uint32_t old_size;
uint8_t version; /* MUST be LIVEPATCH_PAYLOAD_VERSION. */
uint8_t opaque[LIVEPATCH_OPAQUE_SIZE];
uint8_t applied;
uint8_t _pad[7];
livepatch_expectation_t expect;
};
typedef struct livepatch_func livepatch_func_t;
#endif
/*
* Structure describing an ELF payload. Uniquely identifies the
* payload. Should be human readable.
* Recommended length is upto XEN_LIVEPATCH_NAME_SIZE.
* Includes the NUL terminator.
*/
#define XEN_LIVEPATCH_NAME_SIZE 128
struct xen_livepatch_name {
XEN_GUEST_HANDLE_64(char) name; /* IN: pointer to name. */
uint16_t size; /* IN: size of name. May be upto
XEN_LIVEPATCH_NAME_SIZE. */
uint16_t pad[3]; /* IN: MUST be zero. */
};
/*
* Upload a payload to the hypervisor. The payload is verified
* against basic checks and if there are any issues the proper return code
* will be returned. The payload is not applied at this time - that is
* controlled by XEN_SYSCTL_LIVEPATCH_ACTION.
*
* The return value is zero if the payload was succesfully uploaded.
* Otherwise an EXX return value is provided. Duplicate `name` are not
* supported.
*
* The payload at this point is verified against basic checks.
*
* The `payload` is the ELF payload as mentioned in the `Payload format`
* section in the Live Patch design document.
*/
#define XEN_SYSCTL_LIVEPATCH_UPLOAD 0
struct xen_sysctl_livepatch_upload {
struct xen_livepatch_name name; /* IN, name of the patch. */
uint64_t size; /* IN, size of the ELF file. */
XEN_GUEST_HANDLE_64(uint8) payload; /* IN, the ELF file. */
};
/*
* Retrieve an status of an specific payload.
*
* Upon completion the `struct xen_livepatch_status` is updated.
*
* The return value is zero on success and XEN_EXX on failure. This operation
* is synchronous and does not require preemption.
*/
#define XEN_SYSCTL_LIVEPATCH_GET 1
struct xen_livepatch_status {
#define LIVEPATCH_STATE_CHECKED 1
#define LIVEPATCH_STATE_APPLIED 2
uint32_t state; /* OUT: LIVEPATCH_STATE_*. */
int32_t rc; /* OUT: 0 if no error, otherwise -XEN_EXX. */
};
typedef struct xen_livepatch_status xen_livepatch_status_t;
DEFINE_XEN_GUEST_HANDLE(xen_livepatch_status_t);
struct xen_sysctl_livepatch_get {
struct xen_livepatch_name name; /* IN, name of the payload. */
struct xen_livepatch_status status; /* IN/OUT, state of it. */
};
/*
* Retrieve an array of abbreviated status, names and metadata of payloads that
* are loaded in the hypervisor.
*
* If the hypercall returns an positive number, it is the number (up to `nr`)
* of the payloads returned, along with `nr` updated with the number of remaining
* payloads, `version` updated (it may be the same across hypercalls. If it varies
* the data is stale and further calls could fail), `name_total_size` and
* `metadata_total_size` containing total sizes of transferred data for both the
* arrays.
* The `status`, `name`, `len`, `metadata` and `metadata_len` are updated at their
* designed index value (`idx`) with the returned value of data.
*
* If the hypercall returns E2BIG the `nr` is too big and should be
* lowered. The upper limit of `nr` is left to the implemention.
*
* Note that due to the asynchronous nature of hypercalls the domain might have
* added or removed the number of payloads making this information stale. It is
* the responsibility of the toolstack to use the `version` field to check
* between each invocation. if the version differs it should discard the stale
* data and start from scratch. It is OK for the toolstack to use the new
* `version` field.
*/
#define XEN_SYSCTL_LIVEPATCH_LIST 2
struct xen_sysctl_livepatch_list {
uint32_t version; /* OUT: Hypervisor stamps value.
If varies between calls, we are
* getting stale data. */
uint32_t idx; /* IN: Index into hypervisor list. */
uint32_t nr; /* IN: How many status, name, and len
should fill out. Can be zero to get
amount of payloads and version.
OUT: How many payloads left. */
uint32_t pad; /* IN: Must be zero. */
uint32_t name_total_size; /* OUT: Total size of all transfer names */
uint32_t metadata_total_size; /* OUT: Total size of all transfer metadata */
XEN_GUEST_HANDLE_64(xen_livepatch_status_t) status; /* OUT. Must have enough
space allocate for nr of them. */
XEN_GUEST_HANDLE_64(char) name; /* OUT: Array of names. Each member
may have an arbitrary length up to
XEN_LIVEPATCH_NAME_SIZE bytes. Must have
nr of them. */
XEN_GUEST_HANDLE_64(uint32) len; /* OUT: Array of lengths of name's.
Must have nr of them. */
XEN_GUEST_HANDLE_64(char) metadata; /* OUT: Array of metadata strings. Each
member may have an arbitrary length.
Must have nr of them. */
XEN_GUEST_HANDLE_64(uint32) metadata_len; /* OUT: Array of lengths of metadata's.
Must have nr of them. */
};
/*
* Perform an operation on the payload structure referenced by the `name` field.
* The operation request is asynchronous and the status should be retrieved
* by using either XEN_SYSCTL_LIVEPATCH_GET or XEN_SYSCTL_LIVEPATCH_LIST hypercall.
*/
#define XEN_SYSCTL_LIVEPATCH_ACTION 3
struct xen_sysctl_livepatch_action {
struct xen_livepatch_name name; /* IN, name of the patch. */
#define LIVEPATCH_ACTION_UNLOAD 1
#define LIVEPATCH_ACTION_REVERT 2
#define LIVEPATCH_ACTION_APPLY 3
#define LIVEPATCH_ACTION_REPLACE 4
uint32_t cmd; /* IN: LIVEPATCH_ACTION_*. */
uint32_t timeout; /* IN: If zero then uses */
/* hypervisor default. */
/* Or upper bound of time (ns) */
/* for operation to take. */
/*
* Override default inter-module buildid dependency chain enforcement.
* Check only if module is built for given hypervisor by comparing buildid.
*/
#define LIVEPATCH_ACTION_APPLY_NODEPS (1 << 0)
uint32_t flags; /* IN: action flags. */
/* Provide additional parameters */
/* for an action. */
uint32_t pad; /* IN: Always zero. */
};
struct xen_sysctl_livepatch_op {
uint32_t cmd; /* IN: XEN_SYSCTL_LIVEPATCH_*. */
uint32_t pad; /* IN: Always zero. */
union {
struct xen_sysctl_livepatch_upload upload;
struct xen_sysctl_livepatch_list list;
struct xen_sysctl_livepatch_get get;
struct xen_sysctl_livepatch_action action;
} u;
};
#if defined(__i386__) || defined(__x86_64__)
/*
* XEN_SYSCTL_get_cpu_policy (x86 specific)
*
* Return information about CPUID and MSR policies available on this host.
* - Raw: The real H/W values.
* - Host: The values Xen is using, (after command line overrides, etc).
* - Max_*: Maximum set of features a PV or HVM guest can use. Includes
* experimental features outside of security support.
* - Default_*: Default set of features a PV or HVM guest can use. This is
* the security supported set.
* May fail with -EOPNOTSUPP if querying for PV or HVM data when support is
* compiled out of Xen.
*/
struct xen_sysctl_cpu_policy {
#define XEN_SYSCTL_cpu_policy_raw 0
#define XEN_SYSCTL_cpu_policy_host 1
#define XEN_SYSCTL_cpu_policy_pv_max 2
#define XEN_SYSCTL_cpu_policy_hvm_max 3
#define XEN_SYSCTL_cpu_policy_pv_default 4
#define XEN_SYSCTL_cpu_policy_hvm_default 5
uint32_t index; /* IN: Which policy to query? */
uint32_t nr_leaves; /* IN/OUT: Number of leaves in/written to
* 'cpuid_policy', or the maximum number of leaves
* if the guest handle is NULL. */
uint32_t nr_msrs; /* IN/OUT: Number of MSRs in/written to
* 'msr_policy', or the maximum number of MSRs if
* the guest handle is NULL. */
uint32_t _rsvd; /* Must be zero. */
XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) cpuid_policy; /* OUT */
XEN_GUEST_HANDLE_64(xen_msr_entry_t) msr_policy; /* OUT */
};
typedef struct xen_sysctl_cpu_policy xen_sysctl_cpu_policy_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_policy_t);
#endif
struct xen_sysctl {
uint32_t cmd;
@ -787,8 +1085,13 @@ struct xen_sysctl {
#define XEN_SYSCTL_coverage_op 20
#define XEN_SYSCTL_psr_cmt_op 21
#define XEN_SYSCTL_pcitopoinfo 22
#define XEN_SYSCTL_psr_cat_op 23
#define XEN_SYSCTL_tmem_op 24
#define XEN_SYSCTL_psr_alloc 23
/* #define XEN_SYSCTL_tmem_op 24 */
#define XEN_SYSCTL_get_cpu_levelling_caps 25
#define XEN_SYSCTL_get_cpu_featureset 26
#define XEN_SYSCTL_livepatch_op 27
/* #define XEN_SYSCTL_set_parameter 28 */
#define XEN_SYSCTL_get_cpu_policy 29
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
@ -812,8 +1115,13 @@ struct xen_sysctl {
struct xen_sysctl_scheduler_op scheduler_op;
struct xen_sysctl_coverage_op coverage_op;
struct xen_sysctl_psr_cmt_op psr_cmt_op;
struct xen_sysctl_psr_cat_op psr_cat_op;
struct xen_sysctl_tmem_op tmem_op;
struct xen_sysctl_psr_alloc psr_alloc;
struct xen_sysctl_cpu_levelling_caps cpu_levelling_caps;
struct xen_sysctl_cpu_featureset cpu_featureset;
struct xen_sysctl_livepatch_op livepatch;
#if defined(__i386__) || defined(__x86_64__)
struct xen_sysctl_cpu_policy cpu_policy;
#endif
uint8_t pad[128];
} u;
};

View File

@ -1,8 +1,8 @@
/******************************************************************************
* tmem.h
*
*
* Guest OS interface to Xen Transcendent Memory.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -29,15 +29,11 @@
#include "xen.h"
#if __XEN_INTERFACE_VERSION__ < 0x00041300
/* version of ABI */
#define TMEM_SPEC_VERSION 1
/* Commands to HYPERVISOR_tmem_op() */
#ifdef __XEN__
#define TMEM_CONTROL 0 /* Now called XEN_SYSCTL_tmem_op */
#else
#undef TMEM_CONTROL
#endif
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
#define TMEM_PUT_PAGE 4
@ -51,9 +47,9 @@
#define TMEM_XCHG 10
#endif
/* Privileged commands to HYPERVISOR_tmem_op() */
#define TMEM_AUTH 101
#define TMEM_RESTORE_NEW 102
/* Privileged commands now called via XEN_SYSCTL_tmem_op. */
#define TMEM_AUTH 101 /* as XEN_SYSCTL_TMEM_OP_SET_AUTH. */
#define TMEM_RESTORE_NEW 102 /* as XEN_SYSCTL_TMEM_OP_SET_POOL. */
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
@ -92,7 +88,7 @@ struct tmem_op {
uint64_t uuid[2];
uint32_t flags;
uint32_t arg1;
} creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
} creat; /* for cmd == TMEM_NEW_POOL. */
struct {
#if __XEN_INTERFACE_VERSION__ < 0x00040600
uint64_t oid[3];
@ -111,6 +107,8 @@ typedef struct tmem_op tmem_op_t;
DEFINE_XEN_GUEST_HANDLE(tmem_op_t);
#endif
#endif /* __XEN_INTERFACE_VERSION__ < 0x00041300 */
#endif /* __XEN_PUBLIC_TMEM_H__ */
/*

View File

@ -1,6 +1,6 @@
/******************************************************************************
* include/public/trace.h
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -78,6 +78,7 @@
/* #define XEN_SCHEDULER_SEDF 2 (Removed) */
#define TRC_SCHED_ARINC653 3
#define TRC_SCHED_RTDS 4
#define TRC_SCHED_SNULL 5
/* Per-scheduler tracing */
#define TRC_SCHED_CLASS_EVT(_c, _e) \
@ -85,6 +86,9 @@
((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
(_e & TRC_SCHED_EVT_MASK) )
/* Trace classes for DOM0 operations */
#define TRC_DOM0_DOMOPS 0x00041000 /* Domains manipulations */
/* Trace classes for Hardware */
#define TRC_HW_PM 0x00801000 /* Power management traces */
#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
@ -112,6 +116,10 @@
#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16)
#define TRC_SCHED_SWITCH_INFCONT (TRC_SCHED_VERBOSE + 17)
#define TRC_DOM0_DOM_ADD (TRC_DOM0_DOMOPS + 1)
#define TRC_DOM0_DOM_REM (TRC_DOM0_DOMOPS + 2)
#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
@ -227,6 +235,8 @@
#define TRC_HVM_TRAP (TRC_HVM_HANDLER + 0x23)
#define TRC_HVM_TRAP_DEBUG (TRC_HVM_HANDLER + 0x24)
#define TRC_HVM_VLAPIC (TRC_HVM_HANDLER + 0x25)
#define TRC_HVM_XCR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x26)
#define TRC_HVM_XCR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x27)
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)

View File

@ -1,8 +1,8 @@
/******************************************************************************
* vcpu.h
*
*
* VCPU initialisation, query, and hotplug.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -38,11 +38,13 @@
*/
/*
* Initialise a VCPU. Each VCPU can be initialised only once. A
* Initialise a VCPU. Each VCPU can be initialised only once. A
* newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
*
* @extra_arg == pointer to vcpu_guest_context structure containing initial
* state for the VCPU.
*
* @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context
* structure containing the initial state for the VCPU. For x86
* HVM based guests this is a pointer to a vcpu_hvm_context
* structure.
*/
#define VCPUOP_initialise 0
@ -81,6 +83,12 @@ struct vcpu_runstate_info {
int state;
/* When was current state entered (system time, ns)? */
uint64_t state_entry_time;
/*
* Update indicator set in state_entry_time:
* When activated via VMASST_TYPE_runstate_update_flag, set during
* updates in guest memory mapped copy of vcpu_runstate_info.
*/
#define XEN_RUNSTATE_UPDATE (xen_mk_ullong(1) << 63)
/*
* Time spent in each RUNSTATE_* (ns). The sum of these times is
* guaranteed not to drift from system time.
@ -163,7 +171,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
#define _VCPU_SSHOTTMR_future (0)
#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
/*
/*
* Register a memory location in the guest address space for the
* vcpu_info structure. This allows the guest to place the vcpu_info
* structure in a convenient place, such as in a per-cpu data area.
@ -184,7 +192,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
#define VCPUOP_send_nmi 11
/*
/*
* Get the physical ID information for a pinned vcpu's underlying physical
* processor. The physical ID informmation is architecture-specific.
* On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
@ -199,7 +207,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
/*
/*
* Register a memory location to get a secondary copy of the vcpu time
* parameters. The master copy still exists as part of the vcpu shared
* memory area, and this secondary copy is updated whenever the master copy

View File

@ -1,8 +1,8 @@
/******************************************************************************
* version.h
*
*
* Xen version, type, and compile information.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -30,7 +30,8 @@
#include "xen.h"
/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
/* NB. All ops return zero on success, except XENVER_{version,pagesize}
* XENVER_{version,pagesize,build_id} */
/* arg == NULL; returns major:minor (16:16). */
#define XENVER_version 0
@ -77,12 +78,28 @@ typedef struct xen_feature_info xen_feature_info_t;
/* arg == NULL; returns host memory page size. */
#define XENVER_pagesize 7
/* arg == xen_domain_handle_t. */
/* arg == xen_domain_handle_t.
*
* The toolstack fills it out for guest consumption. It is intended to hold
* the UUID of the guest.
*/
#define XENVER_guest_handle 8
#define XENVER_commandline 9
typedef char xen_commandline_t[1024];
/*
* Return value is the number of bytes written, or XEN_Exx on error.
* Calling with empty parameter returns the size of build_id.
*/
#define XENVER_build_id 10
struct xen_build_id {
uint32_t len; /* IN: size of buf[]. */
unsigned char buf[XEN_FLEX_ARRAY_DIM];
/* OUT: Variable length buffer with build_id. */
};
typedef struct xen_build_id xen_build_id_t;
#endif /* __XEN_PUBLIC_VERSION_H__ */
/*

View File

@ -29,7 +29,7 @@
#include "xen.h"
#define VM_EVENT_INTERFACE_VERSION 0x00000001
#define VM_EVENT_INTERFACE_VERSION 0x00000006
#if defined(__XEN__) || defined(__XEN_TOOLS__)
@ -74,21 +74,51 @@
* VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
*/
#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
/*
* Deny completion of the operation that triggered the event.
* Currently only useful for MSR, CR0, CR3 and CR4 write events.
*/
/*
* Deny completion of the operation that triggered the event.
* Currently only useful for MSR and control-register write events.
* Requires the vCPU to be paused already (synchronous events only).
*/
#define VM_EVENT_FLAG_DENY (1 << 6)
/*
* This flag can be set in a request or a response
*
* On a request, indicates that the event occurred in the alternate p2m specified by
* the altp2m_idx request field.
* On a request, indicates that the event occurred in the alternate p2m
* specified by the altp2m_idx request field.
*
* On a response, indicates that the VCPU should resume in the alternate p2m specified
* by the altp2m_idx response field if possible.
* On a response, indicates that the VCPU should resume in the alternate p2m
* specified by the altp2m_idx response field if possible.
*/
#define VM_EVENT_FLAG_ALTERNATE_P2M (1 << 7)
/*
* Set the vCPU registers to the values in the vm_event response.
* At the moment x86-only, applies to EAX-EDX, ESP, EBP, ESI, EDI, R8-R15,
* EFLAGS, and EIP.
* Requires the vCPU to be paused already (synchronous events only).
*/
#define VM_EVENT_FLAG_SET_REGISTERS (1 << 8)
/*
* Instruction cache is being sent back to the hypervisor in the event response
* to be used by the emulator. This flag is only useful when combined with
* VM_EVENT_FLAG_EMULATE and does not take presedence if combined with
* VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA, (i.e.
* if any of those flags are set, only those will be honored).
*/
#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
/*
* Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
* interrupt pending after resuming the VCPU.
*/
#define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
/*
* Execute fast singlestepping on vm_event response.
* Requires the vCPU to be paused already (synchronous events only).
*
* On a response requires setting the p2midx field of fast_singlestep to which
* Xen will switch the vCPU to on the occurance of the first singlestep, after
* which singlestep gets automatically disabled.
*/
#define VM_EVENT_FLAG_FAST_SINGLESTEP (1 << 11)
/*
* Reasons for the vm event request
@ -112,6 +142,23 @@
#define VM_EVENT_REASON_SINGLESTEP 7
/* An event has been requested via HVMOP_guest_request_vm_event. */
#define VM_EVENT_REASON_GUEST_REQUEST 8
/* A debug exception was caught */
#define VM_EVENT_REASON_DEBUG_EXCEPTION 9
/* CPUID executed */
#define VM_EVENT_REASON_CPUID 10
/*
* Privileged call executed (e.g. SMC).
* Note: event may be generated even if SMC condition check fails on some CPUs.
* As this behavior is CPU-specific, users are advised to not rely on it.
* These kinds of events will be filtered out in future versions.
*/
#define VM_EVENT_REASON_PRIVILEGED_CALL 11
/* An interrupt has been delivered. */
#define VM_EVENT_REASON_INTERRUPT 12
/* A descriptor table register was accessed. */
#define VM_EVENT_REASON_DESCRIPTOR_ACCESS 13
/* Current instruction is not implemented by the emulator */
#define VM_EVENT_REASON_EMUL_UNIMPLEMENTED 14
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0
@ -119,9 +166,15 @@
#define VM_EVENT_X86_CR4 2
#define VM_EVENT_X86_XCR0 3
/* The limit field is right-shifted by 12 bits if .ar.g is set. */
struct vm_event_x86_selector_reg {
uint32_t limit : 20;
uint32_t ar : 12;
};
/*
* Using a custom struct (not hvm_hw_cpu) so as to not fill
* the vm_event ring buffer too quickly.
* Using custom vCPU structs (i.e. not hvm_hw_cpu) for both x86 and ARM
* so as to not fill the vm_event ring buffer too quickly.
*/
struct vm_event_regs_x86 {
uint64_t rax;
@ -141,6 +194,7 @@ struct vm_event_regs_x86 {
uint64_t r14;
uint64_t r15;
uint64_t rflags;
uint64_t dr6;
uint64_t dr7;
uint64_t rip;
uint64_t cr0;
@ -153,9 +207,40 @@ struct vm_event_regs_x86 {
uint64_t msr_efer;
uint64_t msr_star;
uint64_t msr_lstar;
uint64_t gdtr_base;
uint32_t cs_base;
uint32_t ss_base;
uint32_t ds_base;
uint32_t es_base;
uint64_t fs_base;
uint64_t gs_base;
uint32_t cs_arbytes;
struct vm_event_x86_selector_reg cs;
struct vm_event_x86_selector_reg ss;
struct vm_event_x86_selector_reg ds;
struct vm_event_x86_selector_reg es;
struct vm_event_x86_selector_reg fs;
struct vm_event_x86_selector_reg gs;
uint64_t shadow_gs;
uint16_t gdtr_limit;
uint16_t cs_sel;
uint16_t ss_sel;
uint16_t ds_sel;
uint16_t es_sel;
uint16_t fs_sel;
uint16_t gs_sel;
uint16_t _pad;
};
/*
* Only the register 'pc' can be set on a vm_event response using the
* VM_EVENT_FLAG_SET_REGISTERS flag.
*/
struct vm_event_regs_arm {
uint64_t ttbr0;
uint64_t ttbr1;
uint64_t ttbcr;
uint64_t pc;
uint32_t cpsr;
uint32_t _pad;
};
@ -170,16 +255,16 @@ struct vm_event_regs_x86 {
* FAULT_WITH_GLA: If the violation was triggered by accessing gla
* FAULT_IN_GPT: If the violation was triggered during translating gla
*/
#define MEM_ACCESS_R (1 << 0)
#define MEM_ACCESS_W (1 << 1)
#define MEM_ACCESS_X (1 << 2)
#define MEM_ACCESS_RWX (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
#define MEM_ACCESS_RW (MEM_ACCESS_R | MEM_ACCESS_W)
#define MEM_ACCESS_RX (MEM_ACCESS_R | MEM_ACCESS_X)
#define MEM_ACCESS_WX (MEM_ACCESS_W | MEM_ACCESS_X)
#define MEM_ACCESS_GLA_VALID (1 << 3)
#define MEM_ACCESS_FAULT_WITH_GLA (1 << 4)
#define MEM_ACCESS_FAULT_IN_GPT (1 << 5)
#define MEM_ACCESS_R (1 << 0)
#define MEM_ACCESS_W (1 << 1)
#define MEM_ACCESS_X (1 << 2)
#define MEM_ACCESS_RWX (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
#define MEM_ACCESS_RW (MEM_ACCESS_R | MEM_ACCESS_W)
#define MEM_ACCESS_RX (MEM_ACCESS_R | MEM_ACCESS_X)
#define MEM_ACCESS_WX (MEM_ACCESS_W | MEM_ACCESS_X)
#define MEM_ACCESS_GLA_VALID (1 << 3)
#define MEM_ACCESS_FAULT_WITH_GLA (1 << 4)
#define MEM_ACCESS_FAULT_IN_GPT (1 << 5)
struct vm_event_mem_access {
uint64_t gfn;
@ -196,13 +281,59 @@ struct vm_event_write_ctrlreg {
uint64_t old_value;
};
struct vm_event_singlestep {
uint64_t gfn;
};
struct vm_event_fast_singlestep {
uint16_t p2midx;
};
struct vm_event_debug {
uint64_t gfn;
uint64_t pending_dbg; /* Behaves like the VT-x PENDING_DBG field. */
uint32_t insn_length;
uint8_t type; /* HVMOP_TRAP_* */
uint8_t _pad[3];
};
struct vm_event_mov_to_msr {
uint64_t msr;
uint64_t value;
uint64_t new_value;
uint64_t old_value;
};
#define VM_EVENT_DESC_IDTR 1
#define VM_EVENT_DESC_GDTR 2
#define VM_EVENT_DESC_LDTR 3
#define VM_EVENT_DESC_TR 4
struct vm_event_desc_access {
union {
struct {
uint32_t instr_info; /* VMX: VMCS Instruction-Information */
uint32_t _pad1;
uint64_t exit_qualification; /* VMX: VMCS Exit Qualification */
} vmx;
} arch;
uint8_t descriptor; /* VM_EVENT_DESC_* */
uint8_t is_write;
uint8_t _pad[6];
};
struct vm_event_cpuid {
uint32_t insn_length;
uint32_t leaf;
uint32_t subleaf;
uint32_t _pad;
};
struct vm_event_interrupt_x86 {
uint32_t vector;
uint32_t type;
uint32_t error_code;
uint32_t _pad;
uint64_t cr2;
};
#define MEM_PAGING_DROP_PAGE (1 << 0)
@ -226,6 +357,10 @@ struct vm_event_emul_read_data {
uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
};
struct vm_event_emul_insn_data {
uint8_t data[16]; /* Has to be completely filled */
};
typedef struct vm_event_st {
uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
uint32_t flags; /* VM_EVENT_FLAG_* */
@ -240,16 +375,27 @@ typedef struct vm_event_st {
struct vm_event_mem_access mem_access;
struct vm_event_write_ctrlreg write_ctrlreg;
struct vm_event_mov_to_msr mov_to_msr;
struct vm_event_desc_access desc_access;
struct vm_event_singlestep singlestep;
struct vm_event_fast_singlestep fast_singlestep;
struct vm_event_debug software_breakpoint;
struct vm_event_debug singlestep;
struct vm_event_debug debug_exception;
struct vm_event_cpuid cpuid;
union {
struct vm_event_interrupt_x86 x86;
} interrupt;
} u;
union {
union {
struct vm_event_regs_x86 x86;
struct vm_event_regs_arm arm;
} regs;
struct vm_event_emul_read_data emul_read_data;
union {
struct vm_event_emul_read_data read;
struct vm_event_emul_insn_data insn;
} emul;
} data;
} vm_event_request_t, vm_event_response_t;

View File

@ -1,8 +1,8 @@
/******************************************************************************
* xen-compat.h
*
*
* Guest OS interface to Xen. Compatibility layer.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -27,21 +27,20 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040600
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040e00
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
#elif !defined(__XEN_INTERFACE_VERSION__)
/*
* The interface version is not set if and only if xen/xen-os.h is not
* included.
*/
#error "Please include xen/xen-os.h"
/* Guests which do not specify a version get the legacy interface. */
#define __XEN_INTERFACE_VERSION__ 0x00000000
#endif
#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
#error "These header files do not support the requested interface version."
#endif
#define COMPAT_FLEX_ARRAY_DIM XEN_FLEX_ARRAY_DIM
#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */

View File

@ -1,8 +1,8 @@
/******************************************************************************
* xen.h
*
*
* Guest OS interface to Xen.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -52,6 +52,33 @@ DEFINE_XEN_GUEST_HANDLE(void);
DEFINE_XEN_GUEST_HANDLE(uint64_t);
DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
/* Define a variable length array (depends on compiler). */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define XEN_FLEX_ARRAY_DIM
#elif defined(__GNUC__)
#define XEN_FLEX_ARRAY_DIM 0
#else
#define XEN_FLEX_ARRAY_DIM 1 /* variable size */
#endif
/* Turn a plain number into a C unsigned (long (long)) constant. */
#define __xen_mk_uint(x) x ## U
#define __xen_mk_ulong(x) x ## UL
#ifndef __xen_mk_ullong
# define __xen_mk_ullong(x) x ## ULL
#endif
#define xen_mk_uint(x) __xen_mk_uint(x)
#define xen_mk_ulong(x) __xen_mk_ulong(x)
#define xen_mk_ullong(x) __xen_mk_ullong(x)
#else
/* In assembly code we cannot use C numeric constant suffixes. */
#define xen_mk_uint(x) x
#define xen_mk_ulong(x) x
#define xen_mk_ullong(x) x
#endif
/*
@ -100,8 +127,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define __HYPERVISOR_domctl 36
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
#define __HYPERVISOR_argo_op 39
#define __HYPERVISOR_xenpmu_op 40
#define __HYPERVISOR_dm_op 41
#define __HYPERVISOR_hypfs_op 42
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
@ -138,11 +167,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
#endif
/*
/*
* VIRTUAL INTERRUPTS
*
*
* Virtual interrupts that a guest OS may receive from Xen.
*
*
* In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
* global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
* The latter can be allocated only once per guest: they must initially be
@ -158,8 +187,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occurred */
#define VIRQ_ARGO 11 /* G. Argo interdomain message notification */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
#define VIRQ_XENPMU 13 /* V. PMC interrupt */
@ -192,7 +221,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
* (x) encodes the PFD as follows:
* x == 0 => PFD == DOMID_SELF
* x != 0 => PFD == x - 1
*
*
* Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
* -------------
* ptr[1:0] == MMU_NORMAL_PT_UPDATE:
@ -238,17 +267,21 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
* To deallocate the pages, the operations are the reverse of the steps
* mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
* pagetable MUST not be in use (meaning that the cr3 is not set to it).
*
*
* ptr[1:0] == MMU_MACHPHYS_UPDATE:
* Updates an entry in the machine->pseudo-physical mapping table.
* ptr[:2] -- Machine address within the frame whose mapping to modify.
* The frame must belong to the FD, if one is specified.
* val -- Value to write into the mapping entry.
*
*
* ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
* As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
* with those in @val.
*
* ptr[1:0] == MMU_PT_UPDATE_NO_TRANSLATE:
* As MMU_NORMAL_PT_UPDATE above, but @val is not translated though FD
* page tables.
*
* @val is usually the machine frame number along with some attributes.
* The attributes by default follow the architecture defined bits. Meaning that
* if this is a X86_64 machine and four page table layout is used, the layout
@ -315,9 +348,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
*
* PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
#define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */
/* val never translated. */
/*
* MMU EXTENDED OPERATIONS
@ -451,17 +486,38 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
/* ` enum uvm_flags { */
#define UVMF_NONE (0UL<<0) /* No flushing at all. */
#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
#define UVMF_FLUSHTYPE_MASK (3UL<<0)
#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
#define UVMF_NONE (xen_mk_ulong(0)<<0) /* No flushing at all. */
#define UVMF_TLB_FLUSH (xen_mk_ulong(1)<<0) /* Flush entire TLB(s). */
#define UVMF_INVLPG (xen_mk_ulong(2)<<0) /* Flush only one entry. */
#define UVMF_FLUSHTYPE_MASK (xen_mk_ulong(3)<<0)
#define UVMF_MULTI (xen_mk_ulong(0)<<2) /* Flush subset of TLBs. */
#define UVMF_LOCAL (xen_mk_ulong(0)<<2) /* Flush local TLB. */
#define UVMF_ALL (xen_mk_ulong(1)<<2) /* Flush all TLBs. */
/* ` } */
/*
* Commands to HYPERVISOR_console_io().
* ` int
* ` HYPERVISOR_console_io(unsigned int cmd,
* ` unsigned int count,
* ` char buffer[]);
*
* @cmd: Command (see below)
* @count: Size of the buffer to read/write
* @buffer: Pointer in the guest memory
*
* List of commands:
*
* * CONSOLEIO_write: Write the buffer to Xen console.
* For the hardware domain, all the characters in the buffer will
* be written. Characters will be printed directly to the console.
* For all the other domains, only the printable characters will be
* written. Characters may be buffered until a newline (i.e '\n') is
* found.
* @return 0 on success, otherwise return an error code.
* * CONSOLEIO_read: Attempts to read up to @count characters from Xen
* console. The maximum buffer size (i.e. @count) supported is 2GB.
* @return the number of characters read on success, otherwise return
* an error code.
*/
#define CONSOLEIO_write 0
#define CONSOLEIO_read 1
@ -488,6 +544,21 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
/*
* x86 guests: Sane behaviour for virtual iopl
* - virtual iopl updated from do_iret() hypercalls.
* - virtual iopl reported in bounce frames.
* - guest kernels assumed to be level 0 for the purpose of iopl checks.
*/
#define VMASST_TYPE_architectural_iopl 4
/*
* All guests: activate update indicator in vcpu_runstate_info
* Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped
* vcpu_runstate_info during updates of the runstate information.
*/
#define VMASST_TYPE_runstate_update_flag 5
/*
* x86/64 guests: strictly hide M2P from user mode.
* This allows the guest to control respective hypervisor behavior:
@ -504,15 +575,11 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
#define MAX_VMASST_TYPE 3
#endif
#ifndef __ASSEMBLY__
typedef uint16_t domid_t;
/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
#define DOMID_FIRST_RESERVED (0x7FF0U)
#define DOMID_FIRST_RESERVED xen_mk_uint(0x7FF0)
/* DOMID_SELF is used in certain contexts to refer to oneself. */
#define DOMID_SELF (0x7FF0U)
#define DOMID_SELF xen_mk_uint(0x7FF0)
/*
* DOMID_IO is used to restrict page-table updates to mapping I/O memory.
@ -520,28 +587,40 @@ typedef uint16_t domid_t;
* is useful to ensure that no mappings to the OS's own heap are accidentally
* installed. (e.g., in Linux this could cause havoc as reference counts
* aren't adjusted on the I/O-mapping code path).
* This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
* be specified by any calling domain.
* This only makes sense as HYPERVISOR_mmu_update()'s and
* HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument. For
* HYPERVISOR_mmu_update() context it can be specified by any calling domain,
* otherwise it's only permitted if the caller is privileged.
*/
#define DOMID_IO (0x7FF1U)
#define DOMID_IO xen_mk_uint(0x7FF1)
/*
* DOMID_XEN is used to allow privileged domains to map restricted parts of
* Xen's heap space (e.g., the machine_to_phys table).
* This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
* the caller is privileged.
* This only makes sense as
* - HYPERVISOR_mmu_update()'s, HYPERVISOR_mmuext_op()'s, or
* HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument,
* - with XENMAPSPACE_gmfn_foreign,
* and is only permitted if the caller is privileged.
*/
#define DOMID_XEN (0x7FF2U)
#define DOMID_XEN xen_mk_uint(0x7FF2)
/*
* DOMID_COW is used as the owner of sharable pages */
#define DOMID_COW (0x7FF3U)
#define DOMID_COW xen_mk_uint(0x7FF3)
/* DOMID_INVALID is used to identify pages with unknown owner. */
#define DOMID_INVALID (0x7FF4U)
#define DOMID_INVALID xen_mk_uint(0x7FF4)
/* Idle domain. */
#define DOMID_IDLE (0x7FFFU)
#define DOMID_IDLE xen_mk_uint(0x7FFF)
/* Mask for valid domain id values */
#define DOMID_MASK xen_mk_uint(0x7FFF)
#ifndef __ASSEMBLY__
typedef uint16_t domid_t;
/*
* Send an array of these to HYPERVISOR_mmu_update().
@ -601,14 +680,22 @@ struct vcpu_time_info {
*/
uint32_t tsc_to_system_mul;
int8_t tsc_shift;
#if __XEN_INTERFACE_VERSION__ > 0x040600
uint8_t flags;
uint8_t pad1[2];
#else
int8_t pad1[3];
#endif
}; /* 32 bytes */
typedef struct vcpu_time_info vcpu_time_info_t;
#define XEN_PVCLOCK_TSC_STABLE_BIT (1 << 0)
#define XEN_PVCLOCK_GUEST_STOPPED (1 << 1)
struct vcpu_info {
/*
* 'evtchn_upcall_pending' is written non-zero by Xen to indicate
* a pending notification for a particular VCPU. It is then cleared
* a pending notification for a particular VCPU. It is then cleared
* by the guest OS /before/ checking for pending work, thus avoiding
* a set-and-check race. Note that the mask is only accessed by Xen
* on the CPU that is currently hosting the VCPU. This means that the
@ -671,7 +758,7 @@ struct shared_info {
* 3. Virtual interrupts ('events'). A domain can bind an event-channel
* port to a virtual interrupt source, such as the virtual-timer
* device or the emergency console.
*
*
* Event channels are addressed by a "port index". Each channel is
* associated with two bits of information:
* 1. PENDING -- notifies the domain that there is a pending notification
@ -682,7 +769,7 @@ struct shared_info {
* becomes pending while the channel is masked then the 'edge' is lost
* (i.e., when the channel is unmasked, the guest must manually handle
* pending notifications as no upcall will be scheduled by Xen).
*
*
* To expedite scanning of pending notifications, any 0->1 pending
* transition on an unmasked channel causes a corresponding bit in a
* per-vcpu selector word to be set. Each bit in the selector covers a
@ -692,12 +779,17 @@ struct shared_info {
xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
* their gettimeofday() syscall on this wallclock-base value.
* Wallclock time: updated by control software or RTC emulation.
* Guests should base their gettimeofday() syscall on this
* wallclock-base value.
* The values of wc_sec and wc_nsec are offsets from the Unix epoch
* adjusted by the domain's 'time offset' (in seconds) as set either
* by XEN_DOMCTL_settimeoffset, or adjusted via a guest write to the
* emulated RTC.
*/
uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
uint32_t wc_sec;
uint32_t wc_nsec;
#if !defined(__i386__)
uint32_t wc_sec_hi;
# define xen_wc_sec_hi wc_sec_hi
@ -723,7 +815,7 @@ typedef struct shared_info shared_info_t;
* (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
* (unless relocated due to XEN_ELFNOTE_INIT_P2M)
* d. start_info_t structure [register ESI (x86)]
* d. start_info_t structure [register rSI (x86)]
* in case of dom0 this page contains the console info, too
* e. unless dom0: xenstore ring page
* f. unless dom0: console ring page
@ -869,6 +961,11 @@ typedef struct dom0_vga_console_info {
uint32_t gbl_caps;
/* Mode attributes (offset 0x0, VESA command 0x4f01). */
uint16_t mode_attrs;
uint16_t pad;
#endif
#if __XEN_INTERFACE_VERSION__ >= 0x00040d00
/* high 32 bits of lfb_base */
uint32_t ext_lfb_base;
#endif
} vesa_lfb;
} u;
@ -878,19 +975,41 @@ typedef struct dom0_vga_console_info {
typedef uint8_t xen_domain_handle_t[16];
/* Turn a plain number into a C unsigned long constant. */
#define __mk_unsigned_long(x) x ## UL
#define mk_unsigned_long(x) __mk_unsigned_long(x)
__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
#else /* __ASSEMBLY__ */
typedef struct {
uint8_t a[16];
} xen_uuid_t;
/* In assembly code we cannot use C numeric constant suffixes. */
#define mk_unsigned_long(x) x
/*
* XEN_DEFINE_UUID(0x00112233, 0x4455, 0x6677, 0x8899,
* 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)
* will construct UUID 00112233-4455-6677-8899-aabbccddeeff presented as
* {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
* 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff};
*
* NB: This is compatible with Linux kernel and with libuuid, but it is not
* compatible with Microsoft, as they use mixed-endian encoding (some
* components are little-endian, some are big-endian).
*/
#define XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6) \
{{((a) >> 24) & 0xFF, ((a) >> 16) & 0xFF, \
((a) >> 8) & 0xFF, ((a) >> 0) & 0xFF, \
((b) >> 8) & 0xFF, ((b) >> 0) & 0xFF, \
((c) >> 8) & 0xFF, ((c) >> 0) & 0xFF, \
((d) >> 8) & 0xFF, ((d) >> 0) & 0xFF, \
e1, e2, e3, e4, e5, e6}}
#if defined(__STDC_VERSION__) ? __STDC_VERSION__ >= 199901L : defined(__GNUC__)
#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
((xen_uuid_t)XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6))
#else
#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6)
#endif /* __STDC_VERSION__ / __GNUC__ */
#endif /* !__ASSEMBLY__ */

View File

@ -1,9 +1,9 @@
/******************************************************************************
* xenoprof.h
*
*
* Interface for enabling system wide profiling based on hardware performance
* counters
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -68,7 +68,7 @@ struct event_log {
};
/* PC value that indicates a special code */
#define XENOPROF_ESCAPE_CODE (~0ULL)
#define XENOPROF_ESCAPE_CODE (~xen_mk_ullong(0))
/* Transient events for the xenoprof->oprofile cpu buf */
#define XENOPROF_TRACE_BEGIN 1

View File

@ -70,6 +70,7 @@ struct xen_flask_transition {
uint32_t newsid;
};
#if __XEN_INTERFACE_VERSION__ < 0x00040800
struct xen_flask_userlist {
/* IN: starting SID for list */
uint32_t start_sid;
@ -83,6 +84,7 @@ struct xen_flask_userlist {
XEN_GUEST_HANDLE(uint32) sids;
} u;
};
#endif
struct xen_flask_boolean {
/* IN/OUT: numeric identifier for boolean [GET/SET]
@ -167,7 +169,7 @@ struct xen_flask_op {
#define FLASK_ACCESS 6
#define FLASK_CREATE 7
#define FLASK_RELABEL 8
#define FLASK_USER 9
#define FLASK_USER 9 /* No longer implemented */
#define FLASK_POLICYVERS 10
#define FLASK_GETBOOL 11
#define FLASK_SETBOOL 12
@ -193,7 +195,9 @@ struct xen_flask_op {
struct xen_flask_access access;
/* FLASK_CREATE, FLASK_RELABEL, FLASK_MEMBER */
struct xen_flask_transition transition;
#if __XEN_INTERFACE_VERSION__ < 0x00040800
struct xen_flask_userlist userlist;
#endif
/* FLASK_GETBOOL, FLASK_SETBOOL */
struct xen_flask_boolean boolean;
struct xen_flask_setavc_threshold setavc_threshold;