partial update to interface headers to 3.2

MFC after:	1 month
This commit is contained in:
Kip Macy 2008-09-25 07:01:31 +00:00
parent 82c2cf3b05
commit a972cc523a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=183340
13 changed files with 411 additions and 60 deletions

View File

@ -597,6 +597,7 @@ static int xenpic_vector(struct intsrc *isrc);
static int xenpic_source_pending(struct intsrc *isrc);
static void xenpic_suspend(struct pic* pic);
static void xenpic_resume(struct pic* pic);
static void xenpic_assign_cpu(struct intsrc *, u_int apic_id);
struct pic xenpic_dynirq_template = {
@ -620,10 +621,12 @@ struct pic xenpic_pirq_template = {
.pic_vector = xenpic_vector,
.pic_source_pending = xenpic_source_pending,
.pic_suspend = xenpic_suspend,
.pic_resume = xenpic_resume
.pic_resume = xenpic_resume,
.pic_assign_cpu = xenpic_assign_cpu
};
void
xenpic_dynirq_enable_source(struct intsrc *isrc)
{
@ -734,7 +737,13 @@ static void
xenpic_resume(struct pic* pic)
{
TODO;
}
}
static void
xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
{
TODO;
}
void
notify_remote_via_irq(int irq)

View File

@ -15,6 +15,7 @@ __FBSDID("$FreeBSD$");
#include "opt_global.h"
#include "opt_pmap.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
@ -30,15 +31,12 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <machine/xen/hypervisor.h>
#include <machine/xen/synch_bitops.h>
#include <xen/gnttab.h>
#define cmpxchg(a, b, c) atomic_cmpset_int((volatile u_int *)(a),(b),(c))
#if 1
#define ASSERT(_p) \
if ( !(_p) ) { printk("Assertion '%s': line %d, file %s\n", \
@ -463,7 +461,11 @@ static int
gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
unsigned long *frames;
#ifdef __LP64__
uint64_t *frames;
#else
uint32_t *frames;
#endif
unsigned int nr_gframes = end_idx + 1;
int i, rc;

View File

@ -50,14 +50,14 @@ struct gnttab_free_callback {
};
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
int readonly);
int flags);
/*
* End access through the given grant reference, iff the grant entry is no
* longer in use. Return 1 if the grant entry was freed, 0 if it is still in
* use.
*/
int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
int gnttab_end_foreign_access_ref(grant_ref_t ref);
/*
* Eventually end access through the given grant reference, and once that
@ -65,8 +65,7 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
* immediately iff the grant entry is not in use, otherwise it will happen
* some time later. page may be 0, in which case no freeing will occur.
*/
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
void *page);
void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
@ -96,7 +95,7 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
unsigned long frame, int readonly);
unsigned long frame, int flags);
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
unsigned long pfn);
@ -135,4 +134,19 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, vm_paddr_t addr,
unmap->dev_bus_addr = 0;
}
static inline void
gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr,
maddr_t new_addr, grant_handle_t handle)
{
if (xen_feature(XENFEAT_auto_translated_physmap)) {
unmap->host_addr = __pa(addr);
unmap->new_addr = __pa(new_addr);
} else {
unmap->host_addr = addr;
unmap->new_addr = new_addr;
}
unmap->handle = handle;
}
#endif /* __ASM_GNTTAB_H__ */

View File

@ -30,18 +30,6 @@
#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
/* Structural guest handles introduced in 0x00030201. */
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct name { type *p; } __guest_handle_ ## name
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
/*
* Hypercall interface:
* Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
@ -89,6 +77,7 @@
#define MACH2PHYS_VIRT_END_PAE \
mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
/* Non-PAE bounds are obsolete. */
#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
@ -99,16 +88,9 @@
#define MACH2PHYS_VIRT_END_NONPAE \
mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
#ifdef CONFIG_X86_PAE
#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
#else
#warning "not using PAE!!!"
#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_NONPAE
#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_NONPAE
#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_NONPAE
#endif
#ifndef HYPERVISOR_VIRT_START
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
@ -123,8 +105,8 @@
/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#undef __DEFINE_XEN_GUEST_HANDLE
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
#undef ___DEFINE_XEN_GUEST_HANDLE
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } \
__guest_handle_ ## name; \
typedef struct { union { type *p; uint64_aligned_t q; }; } \
@ -135,7 +117,8 @@
(hnd).p = val; \
} while ( 0 )
#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#define XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
#endif
#ifndef __ASSEMBLY__
@ -163,7 +146,7 @@ struct cpu_user_regs {
uint16_t gs, _pad5;
};
typedef struct cpu_user_regs cpu_user_regs_t;
__DEFINE_XEN_GUEST_HANDLE(foobarbaz, cpu_user_regs_t);
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
/*
* Page-directory addresses above 4GB do not fit into architectural %cr3.

View File

@ -97,8 +97,6 @@
#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
#endif
#ifndef __ASSEMBLY__
/*
* int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
* @which == SEGBASE_* ; @base == 64-bit base address
@ -133,13 +131,16 @@
#define _VGCF_in_syscall 8
#define VGCF_in_syscall (1<<_VGCF_in_syscall)
#define VGCF_IN_SYSCALL VGCF_in_syscall
#ifndef __ASSEMBLY__
struct iret_context {
/* Top of stack (%rsp at point of hypercall). */
uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
/* Bottom of iret stack frame. */
};
#ifdef __GNUC__
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
#define __DECL_REG(name) union { \
uint64_t r ## name, e ## name; \

View File

@ -27,6 +27,26 @@
#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
#define __XEN_PUBLIC_ARCH_X86_XEN_H__
/* Structural guest handles introduced in 0x00030201. */
#if __XEN_INTERFACE_VERSION__ >= 0x00030201
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
#else
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef type * __guest_handle_ ## name
#endif
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#if defined(__i386__)
#include <xen/interface/arch-x86/xen-x86_32.h>
#elif defined(__x86_64__)
@ -34,18 +54,7 @@
#endif
#ifndef __ASSEMBLY__
/* Guest handles for primitive C types. */
__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
#if 0
DEFINE_XEN_GUEST_HANDLE(char);
DEFINE_XEN_GUEST_HANDLE(int);
DEFINE_XEN_GUEST_HANDLE(long);
DEFINE_XEN_GUEST_HANDLE(void);
#endif
typedef unsigned long xen_pfn_t;
DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define PRI_xen_pfn "lx"
#endif

View File

@ -36,16 +36,40 @@
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
/* ia64, x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */
#define CALLBACKTYPE_failsafe 1
#define CALLBACKTYPE_syscall 2 /* x86_64 only */
/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
#define CALLBACKTYPE_syscall 2
/*
* sysenter is only available on x86_32 with the
* supervisor_mode_kernel option enabled.
* x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
* feature is enabled. Do not use this callback type in new code.
*/
#define CALLBACKTYPE_sysenter 3
#define CALLBACKTYPE_sysenter_deprecated 3
/* x86: Callback for NMI delivery. */
#define CALLBACKTYPE_nmi 4
/*
* x86: sysenter is only available as follows:
* - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
* - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
* ('32-on-32-on-64', '32-on-64-on-64')
* [nb. also 64-bit guest applications on Intel CPUs
* ('64-on-64-on-64'), but syscall is preferred]
*/
#define CALLBACKTYPE_sysenter 5
/*
* x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
* ('32-on-32-on-64', '32-on-64-on-64')
*/
#define CALLBACKTYPE_syscall32 7
/*
* Disable event deliver during callback? This flag is ignored for event and
* NMI callbacks: event delivery is unconditionally disabled.
@ -79,6 +103,11 @@ struct callback_unregister {
typedef struct callback_unregister callback_unregister_t;
DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
#if __XEN_INTERFACE_VERSION__ < 0x00030207
#undef CALLBACKTYPE_sysenter
#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
#endif
#endif /* __XEN_PUBLIC_CALLBACK_H__ */
/*

View File

@ -53,6 +53,9 @@ struct xen_domctl_createdomain {
/* Is this an HVM guest (as opposed to a PV guest)? */
#define _XEN_DOMCTL_CDF_hvm_guest 0
#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
/* Use hardware-assisted paging if available? */
#define _XEN_DOMCTL_CDF_hap 1
#define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap)
uint32_t flags;
};
typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
@ -373,6 +376,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
define _XEN_DOMAINSETUP_sioemu_guest 2
#define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest)
typedef struct xen_domctl_arch_setup {
uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
#ifdef __ia64__
@ -380,6 +385,7 @@ typedef struct xen_domctl_arch_setup {
uint64_aligned_t maxmem; /* Highest memory address for MDT. */
uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
int8_t vhpt_size_log2; /* Log2 of VHPT size. */
#endif
} xen_domctl_arch_setup_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
@ -432,7 +438,184 @@ struct xen_domctl_sendtrigger {
typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
#define XEN_DOMCTL_assign_device 37
#define XEN_DOMCTL_test_assign_device 45
#define XEN_DOMCTL_deassign_device 47
struct xen_domctl_assign_device {
uint32_t machine_bdf; /* machine PCI ID of assigned device */
};
typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
/* Retrieve sibling devices infomation of machine_bdf */
#define XEN_DOMCTL_get_device_group 50
struct xen_domctl_get_device_group {
uint32_t machine_bdf; /* IN */
uint32_t max_sdevs; /* IN */
uint32_t num_sdevs; /* OUT */
XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */
};
typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
/* Pass-through interrupts: bind real irq -> hvm devfn. */
#define XEN_DOMCTL_bind_pt_irq 38
#define XEN_DOMCTL_unbind_pt_irq 48
typedef enum pt_irq_type_e {
PT_IRQ_TYPE_PCI,
PT_IRQ_TYPE_ISA,
PT_IRQ_TYPE_MSI,
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
pt_irq_type_t irq_type;
uint32_t hvm_domid;
union {
struct {
uint8_t isa_irq;
} isa;
struct {
uint8_t bus;
uint8_t device;
uint8_t intx;
} pci;
struct {
uint8_t gvec;
uint32_t gflags;
} msi;
} u;
};
typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
/* Bind machine I/O address range -> HVM address range. */
#define XEN_DOMCTL_memory_mapping 39
#define DPCI_ADD_MAPPING 1
#define DPCI_REMOVE_MAPPING 0
struct xen_domctl_memory_mapping {
uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
uint64_aligned_t first_mfn; /* first page (machine page) in range */
uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
uint32_t add_mapping; /* add or remove mapping */
uint32_t padding; /* padding for 64-bit aligned structure */
};
typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
/* Bind machine I/O port range -> HVM I/O port range. */
#define XEN_DOMCTL_ioport_mapping 40
struct xen_domctl_ioport_mapping {
uint32_t first_gport; /* first guest IO port*/
uint32_t first_mport; /* first machine IO port */
uint32_t nr_ports; /* size of port range */
uint32_t add_mapping; /* add or remove mapping */
};
typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
/*
* Pin caching type of RAM space for x86 HVM domU.
*/
#define XEN_DOMCTL_pin_mem_cacheattr 41
/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
#define XEN_DOMCTL_MEM_CACHEATTR_UC 0
#define XEN_DOMCTL_MEM_CACHEATTR_WC 1
#define XEN_DOMCTL_MEM_CACHEATTR_WT 4
#define XEN_DOMCTL_MEM_CACHEATTR_WP 5
#define XEN_DOMCTL_MEM_CACHEATTR_WB 6
#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
struct xen_domctl_pin_mem_cacheattr {
uint64_aligned_t start, end;
unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
};
typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
#define XEN_DOMCTL_set_ext_vcpucontext 42
#define XEN_DOMCTL_get_ext_vcpucontext 43
struct xen_domctl_ext_vcpucontext {
/* IN: VCPU that this call applies to. */
uint32_t vcpu;
/*
* SET: Size of struct (IN)
* GET: Size of struct (OUT)
*/
uint32_t size;
#if defined(__i386__) || defined(__x86_64__)
/* SYSCALL from 32-bit mode and SYSENTER callback information. */
/* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */
uint64_aligned_t syscall32_callback_eip;
uint64_aligned_t sysenter_callback_eip;
uint16_t syscall32_callback_cs;
uint16_t sysenter_callback_cs;
uint8_t syscall32_disables_events;
uint8_t sysenter_disables_events;
#endif
};
typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
/*
* Set optimizaton features for a domain
*/
#define XEN_DOMCTL_set_opt_feature 44
struct xen_domctl_set_opt_feature {
#if defined(__ia64__)
struct xen_ia64_opt_feature optf;
#else
/* Make struct non-empty: do not depend on this field name! */
uint64_t dummy;
#endif
};
typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
/*
* Set the target domain for a domain
*/
#define XEN_DOMCTL_set_target 46
struct xen_domctl_set_target {
domid_t target;
};
typedef struct xen_domctl_set_target xen_domctl_set_target_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
#if defined(__i386__) || defined(__x86_64__)
# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
# define XEN_DOMCTL_set_cpuid 49
struct xen_domctl_cpuid {
unsigned int input[2];
unsigned int eax;
unsigned int ebx;
unsigned int ecx;
unsigned int edx;
};
typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
#endif
#define XEN_DOMCTL_subscribe 29
struct xen_domctl_subscribe {
uint32_t port; /* IN */
};
typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
/*
* Define the maximum machine address size which should be allocated
* to a guest.
*/
#define XEN_DOMCTL_set_machine_address_size 51
#define XEN_DOMCTL_get_machine_address_size 52
struct xen_domctl {
uint32_t cmd;
uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
@ -462,6 +645,19 @@ struct xen_domctl {
struct xen_domctl_hvmcontext hvmcontext;
struct xen_domctl_address_size address_size;
struct xen_domctl_sendtrigger sendtrigger;
struct xen_domctl_get_device_group get_device_group;
struct xen_domctl_assign_device assign_device;
struct xen_domctl_bind_pt_irq bind_pt_irq;
struct xen_domctl_memory_mapping memory_mapping;
struct xen_domctl_ioport_mapping ioport_mapping;
struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
struct xen_domctl_ext_vcpucontext ext_vcpucontext;
struct xen_domctl_set_opt_feature set_opt_feature;
struct xen_domctl_set_target set_target;
struct xen_domctl_subscribe subscribe;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
#endif
uint8_t pad[128];
} u;
};

View File

@ -56,6 +56,9 @@
*/
#define XENFEAT_pae_pgdir_above_4gb 4
/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
#define XENFEAT_mmu_pt_update_preserve_ad 5
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */

View File

@ -119,6 +119,7 @@ typedef struct grant_entry grant_entry_t;
* GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
* GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
* GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
* GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
*/
#define _GTF_readonly (2)
#define GTF_readonly (1U<<_GTF_readonly)
@ -126,6 +127,12 @@ typedef struct grant_entry grant_entry_t;
#define GTF_reading (1U<<_GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U<<_GTF_writing)
#define _GTF_PWT (5)
#define GTF_PWT (1U<<_GTF_PWT)
#define _GTF_PCD (6)
#define GTF_PCD (1U<<_GTF_PCD)
#define _GTF_PAT (7)
#define GTF_PAT (1U<<_GTF_PAT)
/*
* Subflags for GTF_accept_transfer:
@ -228,7 +235,11 @@ struct gnttab_setup_table {
uint32_t nr_frames;
/* OUT parameters. */
int16_t status; /* GNTST_* */
XEN_GUEST_HANDLE(ulong) frame_list;
#ifdef __LP64__
XEN_GUEST_HANDLE(uint64_t) frame_list;
#else
XEN_GUEST_HANDLE(uint32_t) frame_list;
#endif
};
typedef struct gnttab_setup_table gnttab_setup_table_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
@ -328,6 +339,29 @@ struct gnttab_query_size {
typedef struct gnttab_query_size gnttab_query_size_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
/*
* GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
* tracked by <handle> but atomically replace the page table entry with one
* pointing to the machine address under <new_addr>. <new_addr> will be
* redirected to the null entry.
* NOTES:
* 1. The call may fail in an undefined manner if either mapping is not
* tracked by <handle>.
* 2. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
#define GNTTABOP_unmap_and_replace 7
struct gnttab_unmap_and_replace {
/* IN parameters. */
uint64_t host_addr;
uint64_t new_addr;
grant_handle_t handle;
/* OUT parameters. */
int16_t status; /* GNTST_* */
};
typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
/*
* Bitfield values for update_pin_status.flags.

View File

@ -47,7 +47,7 @@ struct xen_memory_reservation {
* OUT: GMFN bases of extents that were allocated
* (NB. This command also updates the mach_to_phys translation table)
*/
XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
xen_pfn_t *extent_start;
/* Number of extents, and size/alignment of each (2^extent_order pages). */
xen_ulong_t nr_extents;
@ -152,7 +152,7 @@ struct xen_machphys_mfn_list {
* any large discontiguities in the machine address space, 2MB gaps in
* the machphys table will be represented by an MFN base of zero.
*/
XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
xen_pfn_t extent_start;
/*
* Number of extents written to the above array. This will be smaller
@ -214,13 +214,13 @@ struct xen_translate_gpfn_list {
xen_ulong_t nr_gpfns;
/* List of GPFNs to translate. */
XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
xen_pfn_t gpfn_list;
/*
* Output list to contain MFN translations. May be the same as the input
* list (in which case each input GPFN is overwritten with the output MFN).
*/
XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
xen_pfn_t mfn_list;
};
typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);

View File

@ -81,7 +81,11 @@ DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
#define PHYSDEVOP_set_iobitmap 7
struct physdev_set_iobitmap {
/* IN */
XEN_GUEST_HANDLE_00030205(uint8_t) bitmap;
#if __XEN_INTERFACE_VERSION__ >= 0x00030205
XEN_GUEST_HANDLE(uint8) bitmap;
#else
uint8_t *bitmap;
#endif
uint32_t nr_ports;
};
typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
@ -117,6 +121,52 @@ struct physdev_irq {
};
typedef struct physdev_irq physdev_irq_t;
DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
#define MAP_PIRQ_TYPE_MSI 0x0
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
domid_t domid;
/* IN */
int type;
/* IN */
int index;
/* IN or OUT */
int pirq;
/* IN */
int bus;
/* IN */
int devfn;
/* IN */
int entry_nr;
/* IN */
uint64_t table_base;
};
typedef struct physdev_map_pirq physdev_map_pirq_t;
DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
#define PHYSDEVOP_unmap_pirq 14
struct physdev_unmap_pirq {
domid_t domid;
/* IN */
int pirq;
};
typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
#define PHYSDEVOP_manage_pci_add 15
#define PHYSDEVOP_manage_pci_remove 16
struct physdev_manage_pci {
/* IN */
uint8_t bus;
uint8_t devfn;
};
typedef struct physdev_manage_pci physdev_manage_pci_t;
DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
/*
* Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()

View File

@ -170,7 +170,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
*
* This may be called only once per vcpu.
*/
#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
struct vcpu_register_vcpu_info {
uint64_t mfn; /* mfn of page to place vcpu_info */
uint32_t offset; /* offset within page */
@ -179,6 +179,27 @@ struct vcpu_register_vcpu_info {
typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
#define VCPUOP_send_nmi 11
/*
* Get the physical ID information for a pinned vcpu's underlying physical
* processor. The physical ID informmation is architecture-specific.
* On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
* greater are reserved.
* This command returns -EINVAL if it is not a valid operation for this VCPU.
*/
#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
struct vcpu_get_physid {
uint64_t phys_id;
};
typedef struct vcpu_get_physid vcpu_get_physid_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
#define xen_vcpu_physid_to_x86_apicid(physid) \
((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
#define xen_vcpu_physid_to_x86_acpiid(physid) \
((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
#endif /* __XEN_PUBLIC_VCPU_H__ */
/*