Merge ^/head r288836 through r288925.

This commit is contained in:
Dimitry Andric 2015-10-06 16:25:13 +00:00
commit 88750be440
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang370-import/; revision=288926
95 changed files with 4504 additions and 2810 deletions

View File

@ -40,21 +40,10 @@ SUBDIR= cat \
test \
uuidgen
.if ${MK_RCMDS} != "no"
SUBDIR+= rcp
.endif
.if ${MK_SENDMAIL} != "no"
SUBDIR+= rmail
.endif
.if ${MK_TCSH} != "no"
SUBDIR+= csh
.endif
.if ${MK_TESTS} != "no"
SUBDIR+= tests
.endif
SUBDIR.${MK_RCMDS}+= rcp
SUBDIR.${MK_SENDMAIL}+= rmail
SUBDIR.${MK_TCSH}+= csh
SUBDIR.${MK_TESTS}+= tests
.include <bsd.arch.inc.mk>

View File

@ -72,6 +72,8 @@ create_test_inputs()
atf_check -e empty -s exit:0 touch 0b00001101
atf_check -e empty -s exit:0 touch 0b00001110
atf_check -e empty -s exit:0 touch 0b00001111
atf_check -e empty -s exit:0 sync
}
KB=1024
@ -98,6 +100,8 @@ create_test_inputs2()
count=1 oseek=$(( $filesize / $MB )) conv=sparse
files="${files} ${filesize}.file"
done
atf_check -e empty -s exit:0 sync
}
atf_test_case A_flag
@ -810,14 +814,16 @@ t_flag_body()
atf_check -e empty -o empty -s exit:0 touch a.file
atf_check -e empty -o empty -s exit:0 touch b.file
sync
atf_check -e empty -s exit:0 sync
atf_check -e empty -o match:'a\.file' -s exit:0 sh -c 'ls -lt | tail -n 1'
atf_check -e empty -o match:'b\.file.*a\.file' -s exit:0 ls -Ct
atf_check -e empty -o empty -s exit:0 rm a.file
atf_check -e empty -o empty -s exit:0 sh -c 'echo "i am a" > a.file'
sync
atf_check -e empty -s exit:0 sync
atf_check -e empty -o match:'b\.file' -s exit:0 sh -c 'ls -lt | tail -n 1'
atf_check -e empty -o match:'a\.file.*b\.file' -s exit:0 ls -Ct
@ -834,17 +840,15 @@ u_flag_body()
create_test_dir
atf_check -e empty -o empty -s exit:0 touch a.file
sync
atf_check -e empty -o empty -s exit:0 touch b.file
sync
atf_check -e empty -s exit:0 sync
atf_check -e empty -o match:'b\.file' -s exit:0 sh -c 'ls -lu | tail -n 1'
atf_check -e empty -o match:'a\.file.*b\.file' -s exit:0 ls -Cu
atf_check -e empty -o empty -s exit:0 sh -c 'echo "i am a" > a.file'
sync
atf_check -e empty -o match:'i am a' -s exit:0 cat a.file
sync
atf_check -e empty -s exit:0 sync
atf_check -e empty -o match:'b\.file' -s exit:0 sh -c 'ls -lu | tail -n 1'
atf_check -e empty -o match:'a\.file.*b\.file' -s exit:0 ls -Cu
@ -924,6 +928,7 @@ atf_test_case 1_flag
atf_init_test_cases()
{
export BLOCKSIZE=512
atf_add_test_case A_flag
atf_add_test_case A_flag_implied_when_root

View File

@ -1,8 +1,8 @@
# $FreeBSD$
LIBXO= ${.CURDIR:H:H}/contrib/libxo
LIBXOSRC= ${SRCTOP}/contrib/libxo
.PATH: ${LIBXO}/libxo
.PATH: ${LIBXOSRC}/libxo
LIB= xo
SHLIB_MAJOR=0
@ -11,7 +11,7 @@ SHLIBDIR?= /lib
SRCS= libxo.c xo_encoder.c xo_syslog.c
CFLAGS+=-I${LIBXO}/libxo
CFLAGS+=-I${LIBXOSRC}/libxo
CFLAGS+=-DXO_ENCODERDIR=\"/usr/lib/libxo/encoder\"
INCS= xo.h xo_encoder.h

View File

@ -19,10 +19,10 @@ export BOARDNAME="BANANAPI"
arm_install_uboot() {
UBOOT_DIR="/usr/local/share/u-boot/u-boot-bananapi"
UBOOT_FILES="u-boot.img"
UBOOT_FILES="u-boot-sunxi-with-spl.bin"
FATMOUNT="${DESTDIR%${KERNEL}}/fat"
UFSMOUNT="${DESTDIR%${KERNEL}}/ufs"
chroot ${CHROOTDIR} dd if=${UBOOT_DIR}/u-boot-sunxi-with-spl.bin \
chroot ${CHROOTDIR} dd if=${UBOOT_DIR}/${UBOOT_FILES} \
of=/dev/${mddev} bs=1k seek=8 conv=sync
chroot ${CHROOTDIR} mkdir -p "${FATMOUNT}" "${UFSMOUNT}"
chroot ${CHROOTDIR} mount_msdosfs /dev/${mddev}s1 ${FATMOUNT}

View File

@ -18,10 +18,10 @@ NODOC=1
arm_install_uboot() {
UBOOT_DIR="/usr/local/share/u-boot/u-boot-cubieboard"
UBOOT_FILES="u-boot.img"
UBOOT_FILES="u-boot-sunxi-with-spl.bin"
FATMOUNT="${DESTDIR%${KERNEL}}/fat"
UFSMOUNT="${DESTDIR%${KERNEL}}/ufs"
chroot ${CHROOTDIR} dd if=${UBOOT_DIR}/u-boot-sunxi-with-spl.bin \
chroot ${CHROOTDIR} dd if=${UBOOT_DIR}/${UBOOT_FILES} \
of=/dev/${mddev} bs=1k seek=8 conv=sync
chroot ${CHROOTDIR} mkdir -p "${FATMOUNT}" "${UFSMOUNT}"
chroot ${CHROOTDIR} mount_msdosfs /dev/${mddev}s1 ${FATMOUNT}

View File

@ -19,10 +19,10 @@ export BOARDNAME="CUBIEBOARD2"
arm_install_uboot() {
UBOOT_DIR="/usr/local/share/u-boot/u-boot-cubieboard2"
UBOOT_FILES="u-boot.img"
UBOOT_FILES="u-boot-sunxi-with-spl.bin"
FATMOUNT="${DESTDIR%${KERNEL}}/fat"
UFSMOUNT="${DESTDIR%${KERNEL}}/ufs"
chroot ${CHROOTDIR} dd if=${UBOOT_DIR}/u-boot-sunxi-with-spl.bin \
chroot ${CHROOTDIR} dd if=${UBOOT_DIR}/${UBOOT_FILES} \
of=/dev/${mddev} bs=1k seek=8 conv=sync
chroot ${CHROOTDIR} mkdir -p "${FATMOUNT}" "${UFSMOUNT}"
chroot ${CHROOTDIR} mount_msdosfs /dev/${mddev}s1 ${FATMOUNT}

View File

@ -173,6 +173,9 @@
<para revision="272350">The <literal>MK_ARM_EABI</literal>
&man.src.conf.5; option has been removed.</para>
<para revision="285169">The <application>ntp</application> suite
has been updated to version 4.2.8p3.</para>
</sect2>
<sect2 xml:id="userland-programs">
@ -392,9 +395,63 @@
updated to be able to detect &man.zfs.8; and &man.geli.8;
filesystems.</para>
<para revision="284883">The &man.mkimg.1; utility has been
updated to include support for <literal>NTFS</literal>
filesystems in both <acronym>MBR</acronym> and
<acronym>GPT</acronym> partitioning schemes.</para>
<para revision="285253">The &man.quota.1; utility has been
updated to include support for <acronym>IPv6</acronym>.</para>
<para revision="285420">The &man.jail.8; utility has been
updated to include a new flag, <literal>-l</literal>, which
ensures a clean environment in the target jail when used.
Additionally, &man.jail.8; will run a shell within the target
jail when run no commands are specified.</para>
<para revision="285550">The &man.w.1; utility has been updated
to display the full IPv6 remote address of the host from which
a user is connected.</para>
<para revision="285685">The &man.jail.8; framework has been
updated to allow mounting &man.linprocfs.5; and
&man.linsysfs.5; within a jail.</para>
<para revision="285772" contrib="sponsor"
sponsor="&emcisilon;">The &man.patch.1; utility has been
updated to include a new option to the <literal>-V</literal>
flag, <literal>none</literal>, which disables backup file
creation when applying a patch.</para>
<para revision="286010" contrib="sponsor" sponsor="&ff;">The
&man.ar.1; utility now enables deterministic mode
(<literal>-D</literal>) by default. This behavior can be
disabled by specifying the <literal>-U</literal> flag.</para>
<para revision="286289" contrib="sponsor"
sponsor="&scaleengine;">The &man.xargs.1; utility has been
updated to allow specifying <literal>0</literal> as an
argument to the <literal>-P</literal> (parallel mode) flag,
which allows creating as many concurrent processes as
possible.</para>
<para revision="286795">The &man.patch.1; utility has been
updated to remove the automatic checkout feature.</para>
<para revision="287473" contrib="sponsor" sponsor="&gandi;">A
new utility, &man.sesutil.8;, has been added, which is used
to manage &man.ses.4; devices.</para>
<para revision="287522">The &man.pciconf.8; utility has been
updated to use the PCI ID database from the <filename
role="package">misc/pciids</filename> package, if present,
falling back to the PCI ID database in the &os; base
system.</para>
<para revision="287842" contrib="sponsor"
sponsor="&scaleengine;">The &man.ifconfig.8; utility has been
updated to always exit with an error code if an important
&man.ioctl.2; fails.</para>
</sect2>
<sect2 xml:id="userland-contrib">
@ -407,9 +464,6 @@
<para revision="260445">&man.byacc.1; has been updated to
version 20140101.</para>
<para revision="261071">&man.jemalloc.3; has been updated to
version 3.5.0.</para>
<para revision="261283"><application>libc++</application> has
been updated to version 3.4.</para>
@ -471,28 +525,13 @@
&man.hostapd.8; utilities have been updated to version
2.4.</para>
<para revision="282089">The &man.unbound.8; utility has been
updated to version 1.5.3.</para>
<para revision="282434" contrib="sponsor" sponsor="&ff;">The
&man.resolvconf.8; utility has been updated to version
3.7.0.</para>
<para revision="282613">The &man.nc.1; utility has been updated
to the OpenBSD 5.7 version.</para>
<para revision="283092">The &man.acpi.4; subsystem has been
updated to version 20150515.</para>
<para revision="284237">The &man.file.1; utility has been
updated to version 5.23.</para>
<para revision="284254"><application>bmake</application> has
been updated to version 20150606.</para>
<para revision="284397">Timezone data files have been updated to
version 2015e.</para>
<para revision="285229"><application>sendmail</application> has
been updated to 8.15.2. Starting with &os;&nbsp;11.0 and
sendmail 8.15, sendmail uses uncompressed IPv6 addresses by
@ -509,6 +548,9 @@
<literal>confUSE_COMPRESSED_IPV6_ADDRESSES</literal> or the cf
option <literal>UseCompressedIPv6Addresses</literal>.</para>
<para revision="285275">The &man.tcpdump.1; utility has been
updated to version 4.7.4.</para>
<para revision="285329"><application>OpenSSL</application> has
been updated to version 1.0.1p.</para>
@ -516,6 +558,43 @@
&man.ssh.1; utility has been updated to re-implement hostname
canonicalization before locating the host in
<filename>known_hosts</filename>.</para>
<para revision="285972">The &man.libarchive.3; library has been
updated to properly skip a sparse file entry in a &man.tar.1;
file, which would previously produce errors.</para>
<para revision="286503">The <application>apr</application>
library used by &man.svnlite.1; has been updated to version
1.5.2.</para>
<para revision="286505">The <application>serf</application>
library used by &man.svnlite.1; has been updated to version
1.3.8.</para>
<para revision="286505">The &man.svnlite.1; utility has been
updated to version 1.8.14.</para>
<para revision="286510">The <application>sqlite3</application>
library used by &man.svnlite.1; and &man.kerberos.8; has been
updated to version 3.8.11.1.</para>
<para revision="286750">Timezone data files have been updated to
version 2015f.</para>
<para revision="287168">The &man.acpi.4; subsystem has been
updated to version 20150818.</para>
<para revision="287917">The &man.unbound.8; utility has been
updated to version 1.5.4.</para>
<para revision="288090">&man.jemalloc.3; has been updated to
version 4.0.2.</para>
<para revision="288143">The &man.file.1; utility has been
updated to version 5.25.</para>
<para revision="288303">The &man.nc.1; utility has been updated
to the OpenBSD 5.8 version.</para>
</sect2>
<sect2 xml:id="userland-installer">
@ -590,6 +669,12 @@
<filename>iovctl</filename>, has been added, which allows
automatically starting the &man.iovctl.8; utility at
boot.</para>
<para revision="287576" contrib="sponsor"
sponsor="&scaleengine;">The &man.service.8; utility has been
updated to honor entries within <filename
class="directory">/etc/rc.conf.d/</filename>.</para>
</sect2>
<sect2 xml:id="userland-periodic">
@ -695,6 +780,11 @@
<para revision="284483" contrib="sponsor"
sponsor="&scaleengine;">The Blowfish &man.crypt.3; library
has been updated to support &dollar;2y&dollar; hashes.</para>
<para revision="285277">The &man.execl.3; and &man.execlp.3;
library functions have been updated to use the
<literal>__sentinel</literal> <application>gcc</application>
attribute.</para>
</sect2>
<sect2 xml:id="userland-abi">
@ -765,6 +855,11 @@
<para revision="281261" arch="powerpc">Support for
&man.dtrace.1; has been added for the
Book-E&nbsp;&trade;.</para>
<para revision="287886" contrib="sponsor"
sponsor="&multiplay;">The &man.kqueue.2; system call has been
updated to handle write events to files larger than 2
gigabytes.</para>
</sect2>
<sect2 xml:id="kernel-config">
@ -867,6 +962,32 @@
<para>Multi-queue support in the &man.em.4; driver is not
officially supported by &intel;.</para>
</note>
<para revision="285142" contrib="sponsor"
sponsor="&netgate;">The <filename>GENERIC</filename> kernel
configuration has been updated to include the
<literal>IPSEC</literal> option by default.</para>
<para revision="285387" contrib="sponsor"
sponsor="&norse;, &dell;">Initial <acronym>NUMA</acronym>
affinity and policy configuration has been added. See
&man.numactl.1;, and &man.numa.getaffinity.2;, for usage
details.</para>
<para revision="286231">The &man.pms.4; driver has been added
to the <filename>GENERIC</filename> kernel configuration for
supported architectures.</para>
<para revision="287306" arch="arm">The
<filename>CUBIEBOARD2</filename> kernel configuration has been
renamed to <filename>A20</filename>.</para>
<para revision="288176" contrib="sponsor" sponsor="&ff;">Kernel
debugging symbols are now installed to <filename
class="directory">/usr/lib/debug/boot/kernel/</filename>.
To retain the previous behavior, add
<literal>KERN_DEBUGDIR=""</literal> to
&man.src.conf.5;.</para>
</sect2>
<sect2 xml:id="kernel-sysctl">
@ -915,6 +1036,14 @@
been added, which when set to <literal>1</literal> during
runtime will flush all
<literal>net.inet.tcp.hostcache</literal> entries.</para>
<para revision="285524">A new &man.sysctl.8;,
<literal>hw.model</literal>, has been added, which displays
<acronym>CPU</acronym> model information.</para>
<para revision="286591">The &man.uart.4; driver has been
updated to allow tuning packets per second captured during
runtime.</para>
</sect2>
</sect1>
@ -965,6 +1094,13 @@
<para revision="282783" arch="powerpc">Support for the Freescale
<acronym>PCI</acronym> Root Complex device has been
added.</para>
<para revision="285876">The &man.cyapa.4; driver has been added,
supporting the Cypress APA I2C trackpad.</para>
<para revision="285883">The &man.isl.4; driver has been added,
supporting the Intersil I2C ISL29018 digital ambient light
sensor.</para>
</sect2>
<sect2 xml:id="drivers-storage">
@ -1002,6 +1138,23 @@
<para revision="281387">The &man.hptnr.4; driver has been
updated to version 1.1.1.</para>
<para revision="285662">The &man.pms.4; driver has been added,
providing support for the PMC Sierra line of
<acronym>SAS</acronym>/<acronym>SATA</acronym> host bus
adapters.</para>
<para revision="287117" contrib="sponsor"
sponsor="&emcisilon;">The &man.ioat.4; driver has been added,
providing support for the <acronym>PSE</acronym> (Platform
Storage Extension).</para>
<para revision="287621" contrib="sponsor" sponsor="&ix;">The
<acronym>CTL</acronym> High Availability implementation has
been rewritten.</para>
<para revision="288310">The &man.ctl.4; driver has been updated
to support CD-ROM and removable devices.</para>
</sect2>
<sect2 xml:id="drivers-network">
@ -1049,8 +1202,8 @@
<para revision="272730">The &man.alc.4; driver has been updated
to support AR816x and AR817x ethernet controllers.</para>
<para revision="272906">The &man.pf.4; packet filter default hash
has been changed from <literal>Jenkins</literal> to
<para revision="272906">The &man.pf.4; packet filter default
hash has been changed from <literal>Jenkins</literal> to
<literal>Murmur3</literal>, providing a 3-percent performance
increase in packets-per-second.</para>
@ -1079,6 +1232,25 @@
<para revision="284125">The &man.cdce.4; driver has been updated
to include support for the RTL8153 chipset.</para>
<para revision="286441">The &man.iwm.4; driver has been imported
from OpenBSD, providing support for &intel; 3160/7260/7265
wireless chipsets.</para>
<para revision="286829" contrib="sponsor"
sponsor="&limelight;">The &man.em.4; driver has been updated
to allow disabling <acronym>CRC</acronym> stripping.</para>
<para revision="287222">The &man.pf.4; implementation has been
updated to remove support for the <literal>scrub fragment
crop|drop-ovl</literal> filtering rule. Systems with this
rule in &man.pf.conf.5; will implicitly be converted to the
<literal>scrub fragment reassemble</literal> filtering rule,
without necessary intervention.</para>
<para revision="287469" contrib="sponsor"
sponsor="&intelcorp;">The &man.em.4; driver has been updated
to support the Skylake I219 chipset.</para>
</sect2>
</sect1>
@ -1131,6 +1303,10 @@
<para>The &man.uart.4; driver has been updated to support
<acronym>AMT</acronym> devices on newer systems.</para>
<para revision="285316" contrib="sponsor" sponsor="&ff;"
arch="arm64">Initial <acronym>SMP</acronym> support has been
added to the &os;/&arch.arm64; port.</para>
</sect2>
<sect2 xml:id="hardware-virtualization">
@ -1217,6 +1393,10 @@
<para revision="284746" contrib="sponsor" sponsor="&msostc;">The
&man.hv.netvsc.4; driver has been updated to support checksum
offloading and <acronym>TSO</acronym>.</para>
<para revision="286062">The &man.xen.4; driver has been updated
to include support for <literal>blkif</literal> indirect
segment I/O.</para>
</sect2>
<sect2 xml:id="hardware-arm">
@ -1269,6 +1449,15 @@
sponsor="&ff;">Initial
<acronym>ACPI</acronym> support has been added for
&os;/&arch.arm64;.</para>
<para revision="287225">Support for 1-Wire devices has been
added, providing support for 1-Wire hardware through
&man.gpio.4;. See &man.ow.4;, &man.owc.4;, and
&man.ow.temp.4; for more information.</para>
<para revision="287371" arch="arm64" contrib="sponsor"
sponsor="&abt;">Support for the HiSilicon HI6220 SoC has been
added.</para>
</sect2>
</sect1>
@ -1302,6 +1491,13 @@
&man.auto.master.5; map, <literal>-noauto</literal>, which
handles &man.fstab.5; entries set to
<literal>noauto</literal>.</para>
<para revision="286444">The <acronym>GELI</acronym> class has
been updated to support the <literal>BIO_DELETE</literal>
&man.g.bio.9; <literal>bio_cmd</literal> field, providing
<acronym>TRIM</acronym>/<acronym>UNMAP</acronym> support on
<acronym>GELI</acronym>-backed <acronym>SSD</acronym> storage
providers.</para>
</sect2>
<sect2 xml:id="storage-net">
@ -1333,6 +1529,12 @@
&man.sysctl.8; has been removed, and replaced with the
<literal>kstat.zfs.misc.arcstats.arc_meta_used</literal>
&man.sysctl.8;.</para>
<para revision="287099" contrib="sponsor"
sponsor="&clusterhq;">The &man.zfs.8; <literal>l2arc</literal>
code has been updated to take <literal>ashift</literal> into
account when gathering buffers to be written to the
<literal>l2arc</literal> device.</para>
</sect2>
<sect2 xml:id="storage-geom">
@ -1346,6 +1548,13 @@
<literal>apple-boot</literal>, <literal>apple-hfs</literal>,
and <literal>apple-ufs</literal> <acronym>MBR</acronym>
partitioning schemes have been added to &man.gpart.8;.</para>
<para revision="285594" contrib="sponsor"
sponsor="&scaleengine;">The &man.gpart.8; utility has been
updated to include a new attribute for <acronym>GPT</acronym>
partitions, <literal>lenovofix</literal>, which when set,
which works around <acronym>BIOS</acronym> compatibility
issues reported on several Lenovo&nbsp;&trade; laptops.</para>
</sect2>
</sect1>
@ -1491,6 +1700,16 @@
6864) has been added. Support for this feature can be toggled
with the <literal>net.inet.ip.rfc6864</literal>
&man.sysctl.8;, which is enabled by default.</para>
<para revision="285336" contrib="sponsor"
sponsor="&netgate;">The <acronym>IPSEC</acronym> has been
updated to include support for <acronym>AES</acronym> modes on
both software-only and hardware-backed (&man.aesni.4;)
systems.</para>
<para revision="287798" contrib="sponsor" sponsor="&dell;">The
network stack has been updated to fix handling of
<acronym>IPv6</acronym> On-Link redirects.</para>
</sect2>
</sect1>

View File

@ -8,6 +8,7 @@
-->
<!ENTITY abt "ABT Systems, Ltd.">
<!ENTITY afrl "AFRL">
<!ENTITY chelsio "Chelsio Communications">
@ -21,9 +22,12 @@
<!ENTITY darpa_afrl "DARPA, AFRL">
<!ENTITY dell "Dell, Inc.">
<!ENTITY emcisilon "EMC / Isilon Storage Division">
<!ENTITY ff "The&nbsp;&os;&nbsp;Foundation">
<!ENTITY ff.url "https://www.FreeBSDFoundation.org/">
<!ENTITY gandi "Gandi.net">
<!ENTITY google "Google">
<!ENTITY juniper "Juniper Networks, Inc.">
@ -41,6 +45,7 @@
<!ENTITY netflix "Netflix">
<!ENTITY netgate "Netgate">
<!ENTITY nginx "Nginx, Inc.">
<!ENTITY norse "Norse Corporation">
<!ENTITY sandvine "Sandvine, Inc.">
<!ENTITY scaleengine "ScaleEngine, Inc.">

View File

@ -246,7 +246,10 @@ XZ_CMD?= xz
# overriden by Makefiles, but the user may choose to set this in src.conf(5).
TESTSBASE?= /usr/tests
# Compat for the moment
# Compat for the moment -- old bsd.own.mk only included this when _WITHOUT_SRCCONF
# wasn't defined. bsd.ports.mk and friends depend on this behavior. Remove in 12.
.if !defined(_WITHOUT_SRCCONF)
.include <bsd.compiler.mk>
.endif # !_WITHOUT_SRCCONF
.endif # !target(__<bsd.own.mk>__)

View File

@ -33,7 +33,6 @@
#include "opt_ddb.h"
#include <machine/asmacros.h>
#include <machine/intr_machdep.h>
#include <machine/pmap.h>
#include "assym.s"

View File

@ -53,6 +53,7 @@
#define FIRST_MSI_INT 256
#ifdef XENHVM
#include <xen/xen-os.h>
#include <xen/interface/event_channel.h>
#define NUM_EVTCHN_INTS NR_EVENT_CHANNELS
#define FIRST_EVTCHN_INT \
(FIRST_MSI_INT + NUM_MSI_INTS)

View File

@ -1104,6 +1104,22 @@ ipf_checkv4sum(fin)
return -1;
}
if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
/* Depending on the driver, UDP may have zero checksum */
if (fin->fin_p == IPPROTO_UDP && (fin->fin_flx &
(FI_FRAG|FI_SHORT|FI_BAD)) == 0) {
udphdr_t *udp = fin->fin_dp;
if (udp->uh_sum == 0) {
/*
* we're good no matter what the hardware
* checksum flags and csum_data say (handling
* of csum_data for zero UDP checksum is not
* consistent across all drivers)
*/
fin->fin_cksum = 1;
return 0;
}
}
if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
sum = m->m_pkthdr.csum_data;
else

View File

@ -162,7 +162,7 @@ xbd_free_command(struct xbd_command *cm)
static void
xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
grant_ref_t * gref_head, int otherend_id, int readonly,
grant_ref_t * sg_ref, blkif_request_segment_t * sg)
grant_ref_t * sg_ref, struct blkif_request_segment *sg)
{
struct blkif_request_segment *last_block_sg = sg + nsegs;
vm_paddr_t buffer_ma;

View File

@ -33,7 +33,6 @@
#include <machine/asmacros.h>
#include <machine/cputypes.h>
#include <machine/intr_machdep.h>
#include <machine/pmap.h>
#include <machine/specialreg.h>

View File

@ -53,6 +53,7 @@
#define FIRST_MSI_INT 256
#ifdef XENHVM
#include <xen/xen-os.h>
#include <xen/interface/event_channel.h>
#define NUM_EVTCHN_INTS NR_EVENT_CHANNELS
#define FIRST_EVTCHN_INT \
(FIRST_MSI_INT + NUM_MSI_INTS)

View File

@ -928,25 +928,29 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
switch (req) {
case PT_TO_SCE:
p->p_stops |= S_PT_SCE;
CTR2(KTR_PTRACE,
"PT_TO_SCE: pid %d, stops = %#x", p->p_pid,
p->p_stops);
CTR4(KTR_PTRACE,
"PT_TO_SCE: pid %d, stops = %#x, PC = %#lx, sig = %d",
p->p_pid, p->p_stops,
(u_long)(uintfptr_t)addr, data);
break;
case PT_TO_SCX:
p->p_stops |= S_PT_SCX;
CTR2(KTR_PTRACE,
"PT_TO_SCX: pid %d, stops = %#x", p->p_pid,
p->p_stops);
CTR4(KTR_PTRACE,
"PT_TO_SCX: pid %d, stops = %#x, PC = %#lx, sig = %d",
p->p_pid, p->p_stops,
(u_long)(uintfptr_t)addr, data);
break;
case PT_SYSCALL:
p->p_stops |= S_PT_SCE | S_PT_SCX;
CTR2(KTR_PTRACE,
"PT_SYSCALL: pid %d, stops = %#x", p->p_pid,
p->p_stops);
CTR4(KTR_PTRACE,
"PT_SYSCALL: pid %d, stops = %#x, PC = %#lx, sig = %d",
p->p_pid, p->p_stops,
(u_long)(uintfptr_t)addr, data);
break;
case PT_CONTINUE:
CTR1(KTR_PTRACE,
"PT_CONTINUE: pid %d", p->p_pid);
CTR3(KTR_PTRACE,
"PT_CONTINUE: pid %d, PC = %#lx, sig = %d",
p->p_pid, (u_long)(uintfptr_t)addr, data);
break;
}
break;
@ -969,11 +973,12 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
proc_reparent(p, pp);
if (pp == initproc)
p->p_sigparent = SIGCHLD;
CTR2(KTR_PTRACE,
"PT_DETACH: pid %d reparented to pid %d",
p->p_pid, pp->p_pid);
CTR3(KTR_PTRACE,
"PT_DETACH: pid %d reparented to pid %d, sig %d",
p->p_pid, pp->p_pid, data);
} else
CTR1(KTR_PTRACE, "PT_DETACH: pid %d", p->p_pid);
CTR2(KTR_PTRACE, "PT_DETACH: pid %d, sig %d",
p->p_pid, data);
p->p_oppid = 0;
p->p_stops = 0;
@ -1226,10 +1231,10 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
if (wrap32)
ptrace_lwpinfo_to32(pl, pl32);
#endif
CTR5(KTR_PTRACE,
"PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d",
CTR6(KTR_PTRACE,
"PT_LWPINFO: tid %d (pid %d) event %d flags %#x child pid %d syscall %d",
td2->td_tid, p->p_pid, pl->pl_event, pl->pl_flags,
pl->pl_child_pid);
pl->pl_child_pid, pl->pl_syscall_code);
break;
case PT_GETNUMLWPS:

View File

@ -1034,8 +1034,6 @@ m_pullup(struct mbuf *n, int len)
* the amount of empty space before the data in the new mbuf to be specified
* (in the event that the caller expects to prepend later).
*/
int MSFail;
struct mbuf *
m_copyup(struct mbuf *n, int len, int dstoff)
{
@ -1072,7 +1070,6 @@ m_copyup(struct mbuf *n, int len, int dstoff)
return (m);
bad:
m_freem(n);
MSFail++;
return (NULL);
}

View File

@ -2536,6 +2536,16 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->snd_nxt = onxt;
goto drop;
} else if (V_tcp_do_rfc3042) {
/*
* Process first and second duplicate
* ACKs. Each indicates a segment
* leaving the network, creating room
* for more. Make sure we can send a
* packet on reception of each duplicate
* ACK by increasing snd_cwnd by one
* segment. Restore the original
* snd_cwnd after packet transmission.
*/
cc_ack_received(tp, th, CC_DUPACK);
u_long oldcwnd = tp->snd_cwnd;
tcp_seq oldsndmax = tp->snd_max;

View File

@ -187,6 +187,8 @@ ENTRY(savectx)
stmw %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs */
mfcr %r4 /* Save the condition register */
stw %r4,PCB_CR(%r3)
mflr %r4 /* Save the link register */
stw %r4,PCB_LR(%r3)
blr
/*

View File

@ -255,6 +255,8 @@ ENTRY(savectx)
mfcr %r4 /* Save the condition register */
std %r4,PCB_CR(%r3)
std %r2,PCB_TOC(%r3) /* Save the TOC pointer */
mflr %r4 /* Save the link register */
std %r4,PCB_LR(%r3)
blr
/*

View File

@ -313,8 +313,6 @@ swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
racct_sub_cred(cred, RACCT_SWAP, decr);
}
static void swapdev_strategy(struct buf *, struct swdevt *sw);
#define SWM_FREE 0x02 /* free, period */
#define SWM_POP 0x04 /* pop out */
@ -1308,7 +1306,7 @@ swap_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
* those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
* We need to unbusy the rest on I/O completion.
*/
void
static void
swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
int flags, int *rtvals)
{

View File

@ -386,7 +386,7 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
vm_page_unwire(m, PQ_INACTIVE);
vm_page_unwire(m, PQ_NONE);
vm_page_free(m);
}
VM_OBJECT_WUNLOCK(object);

View File

@ -1,228 +0,0 @@
/*
* acm.h: Xen access control module interface defintions
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Reiner Sailer <sailer@watson.ibm.com>
* Copyright (c) 2005, International Business Machines Corporation.
*/
#ifndef _XEN_PUBLIC_ACM_H
#define _XEN_PUBLIC_ACM_H
#include "xen.h"
/* if ACM_DEBUG defined, all hooks should
* print a short trace message (comment it out
* when not in testing mode )
*/
/* #define ACM_DEBUG */
#ifdef ACM_DEBUG
# define printkd(fmt, args...) printk(fmt,## args)
#else
# define printkd(fmt, args...)
#endif
/* default ssid reference value if not supplied */
#define ACM_DEFAULT_SSID 0x0
#define ACM_DEFAULT_LOCAL_SSID 0x0
/* Internal ACM ERROR types */
#define ACM_OK 0
#define ACM_UNDEF -1
#define ACM_INIT_SSID_ERROR -2
#define ACM_INIT_SOID_ERROR -3
#define ACM_ERROR -4
/* External ACCESS DECISIONS */
#define ACM_ACCESS_PERMITTED 0
#define ACM_ACCESS_DENIED -111
#define ACM_NULL_POINTER_ERROR -200
/*
Error codes reported in when trying to test for a new policy
These error codes are reported in an array of tuples where
each error code is followed by a parameter describing the error
more closely, such as a domain id.
*/
#define ACM_EVTCHN_SHARING_VIOLATION 0x100
#define ACM_GNTTAB_SHARING_VIOLATION 0x101
#define ACM_DOMAIN_LOOKUP 0x102
#define ACM_CHWALL_CONFLICT 0x103
#define ACM_SSIDREF_IN_USE 0x104
/* primary policy in lower 4 bits */
#define ACM_NULL_POLICY 0
#define ACM_CHINESE_WALL_POLICY 1
#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
#define ACM_POLICY_UNDEFINED 15
/* combinations have secondary policy component in higher 4bit */
#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
/* policy: */
#define ACM_POLICY_NAME(X) \
((X) == (ACM_NULL_POLICY)) ? "NULL" : \
((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \
((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
"UNDEFINED"
/* the following policy versions must be increased
* whenever the interpretation of the related
* policy's data structure changes
*/
#define ACM_POLICY_VERSION 3
#define ACM_CHWALL_VERSION 1
#define ACM_STE_VERSION 1
/* defines a ssid reference used by xen */
typedef uint32_t ssidref_t;
/* hooks that are known to domains */
#define ACMHOOK_none 0
#define ACMHOOK_sharing 1
/* -------security policy relevant type definitions-------- */
/* type identifier; compares to "equal" or "not equal" */
typedef uint16_t domaintype_t;
/* CHINESE WALL POLICY DATA STRUCTURES
*
* current accumulated conflict type set:
* When a domain is started and has a type that is in
* a conflict set, the conflicting types are incremented in
* the aggregate set. When a domain is destroyed, the
* conflicting types to its type are decremented.
* If a domain has multiple types, this procedure works over
* all those types.
*
* conflict_aggregate_set[i] holds the number of
* running domains that have a conflict with type i.
*
* running_types[i] holds the number of running domains
* that include type i in their ssidref-referenced type set
*
* conflict_sets[i][j] is "0" if type j has no conflict
* with type i and is "1" otherwise.
*/
/* high-16 = version, low-16 = check magic */
#define ACM_MAGIC 0x0001debc
/* each offset in bytes from start of the struct they
* are part of */
/* V3 of the policy buffer aded a version structure */
struct acm_policy_version
{
uint32_t major;
uint32_t minor;
};
/* each buffer consists of all policy information for
* the respective policy given in the policy code
*
* acm_policy_buffer, acm_chwall_policy_buffer,
* and acm_ste_policy_buffer need to stay 32-bit aligned
* because we create binary policies also with external
* tools that assume packed representations (e.g. the java tool)
*/
struct acm_policy_buffer {
uint32_t policy_version; /* ACM_POLICY_VERSION */
uint32_t magic;
uint32_t len;
uint32_t policy_reference_offset;
uint32_t primary_policy_code;
uint32_t primary_buffer_offset;
uint32_t secondary_policy_code;
uint32_t secondary_buffer_offset;
struct acm_policy_version xml_pol_version; /* add in V3 */
};
struct acm_policy_reference_buffer {
uint32_t len;
};
struct acm_chwall_policy_buffer {
uint32_t policy_version; /* ACM_CHWALL_VERSION */
uint32_t policy_code;
uint32_t chwall_max_types;
uint32_t chwall_max_ssidrefs;
uint32_t chwall_max_conflictsets;
uint32_t chwall_ssid_offset;
uint32_t chwall_conflict_sets_offset;
uint32_t chwall_running_types_offset;
uint32_t chwall_conflict_aggregate_offset;
};
struct acm_ste_policy_buffer {
uint32_t policy_version; /* ACM_STE_VERSION */
uint32_t policy_code;
uint32_t ste_max_types;
uint32_t ste_max_ssidrefs;
uint32_t ste_ssid_offset;
};
struct acm_stats_buffer {
uint32_t magic;
uint32_t len;
uint32_t primary_policy_code;
uint32_t primary_stats_offset;
uint32_t secondary_policy_code;
uint32_t secondary_stats_offset;
};
struct acm_ste_stats_buffer {
uint32_t ec_eval_count;
uint32_t gt_eval_count;
uint32_t ec_denied_count;
uint32_t gt_denied_count;
uint32_t ec_cachehit_count;
uint32_t gt_cachehit_count;
};
struct acm_ssid_buffer {
uint32_t len;
ssidref_t ssidref;
uint32_t policy_reference_offset;
uint32_t primary_policy_code;
uint32_t primary_max_types;
uint32_t primary_types_offset;
uint32_t secondary_policy_code;
uint32_t secondary_max_types;
uint32_t secondary_types_offset;
};
#endif
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -1,159 +0,0 @@
/*
* acm_ops.h: Xen access control module hypervisor commands
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Reiner Sailer <sailer@watson.ibm.com>
* Copyright (c) 2005,2006 International Business Machines Corporation.
*/
#ifndef __XEN_PUBLIC_ACM_OPS_H__
#define __XEN_PUBLIC_ACM_OPS_H__
#include "xen.h"
#include "acm.h"
/*
* Make sure you increment the interface version whenever you modify this file!
* This makes sure that old versions of acm tools will stop working in a
* well-defined way (rather than crashing the machine, for instance).
*/
#define ACM_INTERFACE_VERSION 0xAAAA000A
/************************************************************************/
/*
* Prototype for this hypercall is:
* int acm_op(int cmd, void *args)
* @cmd == ACMOP_??? (access control module operation).
* @args == Operation-specific extra arguments (NULL if none).
*/
#define ACMOP_setpolicy 1
struct acm_setpolicy {
/* IN */
XEN_GUEST_HANDLE_64(void) pushcache;
uint32_t pushcache_size;
};
#define ACMOP_getpolicy 2
struct acm_getpolicy {
/* IN */
XEN_GUEST_HANDLE_64(void) pullcache;
uint32_t pullcache_size;
};
#define ACMOP_dumpstats 3
struct acm_dumpstats {
/* IN */
XEN_GUEST_HANDLE_64(void) pullcache;
uint32_t pullcache_size;
};
#define ACMOP_getssid 4
#define ACM_GETBY_ssidref 1
#define ACM_GETBY_domainid 2
struct acm_getssid {
/* IN */
uint32_t get_ssid_by; /* ACM_GETBY_* */
union {
domaintype_t domainid;
ssidref_t ssidref;
} id;
XEN_GUEST_HANDLE_64(void) ssidbuf;
uint32_t ssidbuf_size;
};
#define ACMOP_getdecision 5
struct acm_getdecision {
/* IN */
uint32_t get_decision_by1; /* ACM_GETBY_* */
uint32_t get_decision_by2; /* ACM_GETBY_* */
union {
domaintype_t domainid;
ssidref_t ssidref;
} id1;
union {
domaintype_t domainid;
ssidref_t ssidref;
} id2;
uint32_t hook;
/* OUT */
uint32_t acm_decision;
};
#define ACMOP_chgpolicy 6
struct acm_change_policy {
/* IN */
XEN_GUEST_HANDLE_64(void) policy_pushcache;
uint32_t policy_pushcache_size;
XEN_GUEST_HANDLE_64(void) del_array;
uint32_t delarray_size;
XEN_GUEST_HANDLE_64(void) chg_array;
uint32_t chgarray_size;
/* OUT */
/* array with error code */
XEN_GUEST_HANDLE_64(void) err_array;
uint32_t errarray_size;
};
#define ACMOP_relabeldoms 7
struct acm_relabel_doms {
/* IN */
XEN_GUEST_HANDLE_64(void) relabel_map;
uint32_t relabel_map_size;
/* OUT */
XEN_GUEST_HANDLE_64(void) err_array;
uint32_t errarray_size;
};
/* future interface to Xen */
struct xen_acmctl {
uint32_t cmd;
uint32_t interface_version;
union {
struct acm_setpolicy setpolicy;
struct acm_getpolicy getpolicy;
struct acm_dumpstats dumpstats;
struct acm_getssid getssid;
struct acm_getdecision getdecision;
struct acm_change_policy change_policy;
struct acm_relabel_doms relabel_doms;
} u;
};
typedef struct xen_acmctl xen_acmctl_t;
DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -27,93 +27,253 @@
#ifndef __XEN_PUBLIC_ARCH_ARM_H__
#define __XEN_PUBLIC_ARCH_ARM_H__
/* hypercall calling convention
* ----------------------------
/*
* `incontents 50 arm_abi Hypercall Calling Convention
*
* A hypercall is issued using the ARM HVC instruction.
*
* A hypercall can take up to 5 arguments. These are passed in
* registers, the first argument in r0, the second argument in r1, the
* third in r2, the forth in r3 and the fifth in r4.
* registers, the first argument in x0/r0 (for arm64/arm32 guests
* respectively irrespective of whether the underlying hypervisor is
* 32- or 64-bit), the second argument in x1/r1, the third in x2/r2,
* the forth in x3/r3 and the fifth in x4/r4.
*
* The hypercall number is passed in r12.
* The hypercall number is passed in r12 (arm) or x16 (arm64). In both
* cases the relevant ARM procedure calling convention specifies this
* is an inter-procedure-call scratch register (e.g. for use in linker
* stubs). This use does not conflict with use during a hypercall.
*
* The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG.
*
* The return value is in r0.
* The return value is in x0/r0.
*
* The hypercall will clobber r12 and the argument registers used by
* that hypercall (except r0 which is the return value) i.e. a 2
* argument hypercall will clobber r1 and a 4 argument hypercall will
* clobber r1, r2 and r3.
* The hypercall will clobber x16/r12 and the argument registers used
* by that hypercall (except r0 which is the return value) i.e. in
* addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a
* 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3.
*
* Parameter structs passed to hypercalls are laid out according to
* the Procedure Call Standard for the ARM Architecture (AAPCS, AKA
* EABI) and Procedure Call Standard for the ARM 64-bit Architecture
* (AAPCS64). Where there is a conflict the 64-bit standard should be
* used regardless of guest type. Structures which are passed as
* hypercall arguments are always little endian.
*
* All memory which is shared with other entities in the system
* (including the hypervisor and other guests) must reside in memory
* which is mapped as Normal Inner-cacheable. This applies to:
* - hypercall arguments passed via a pointer to guest memory.
* - memory shared via the grant table mechanism (including PV I/O
* rings etc).
* - memory shared with the hypervisor (struct shared_info, struct
* vcpu_info, the grant table, etc).
*
* Any Inner cache allocation strategy (Write-Back, Write-Through etc)
* is acceptable. There is no restriction on the Outer-cacheability.
*/
/*
* `incontents 55 arm_hcall Supported Hypercalls
*
* Xen on ARM makes extensive use of hardware facilities and therefore
* only a subset of the potential hypercalls are required.
*
* Since ARM uses second stage paging any machine/physical addresses
* passed to hypercalls are Guest Physical Addresses (Intermediate
* Physical Addresses) unless otherwise noted.
*
* The following hypercalls (and sub operations) are supported on the
* ARM platform. Other hypercalls should be considered
* unavailable/unsupported.
*
* HYPERVISOR_memory_op
* All generic sub-operations
*
* HYPERVISOR_domctl
* All generic sub-operations, with the exception of:
* * XEN_DOMCTL_irq_permission (not yet implemented)
*
* HYPERVISOR_sched_op
* All generic sub-operations, with the exception of:
* * SCHEDOP_block -- prefer wfi hardware instruction
*
* HYPERVISOR_console_io
* All generic sub-operations
*
* HYPERVISOR_xen_version
* All generic sub-operations
*
* HYPERVISOR_event_channel_op
* All generic sub-operations
*
* HYPERVISOR_physdev_op
* No sub-operations are currenty supported
*
* HYPERVISOR_sysctl
* All generic sub-operations, with the exception of:
* * XEN_SYSCTL_page_offline_op
* * XEN_SYSCTL_get_pmstat
* * XEN_SYSCTL_pm_op
*
* HYPERVISOR_hvm_op
* Exactly these sub-operations are supported:
* * HVMOP_set_param
* * HVMOP_get_param
*
* HYPERVISOR_grant_table_op
* All generic sub-operations
*
* HYPERVISOR_vcpu_op
* Exactly these sub-operations are supported:
* * VCPUOP_register_vcpu_info
* * VCPUOP_register_runstate_memory_area
*
*
* Other notes on the ARM ABI:
*
* - struct start_info is not exported to ARM guests.
*
* - struct shared_info is mapped by ARM guests using the
* HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing
* XENMAPSPACE_shared_info as space parameter.
*
* - All the per-cpu struct vcpu_info are mapped by ARM guests using the
* HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0
* struct vcpu_info.
*
* - The grant table is mapped using the HYPERVISOR_memory_op sub-op
* XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space
* parameter. The memory range specified under the Xen compatible
* hypervisor node on device tree can be used as target gpfn for the
* mapping.
*
* - Xenstore is initialized by using the two hvm_params
* HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read
* with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
*
* - The paravirtualized console is initialized by using the two
* hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They
* can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param.
*
* - Event channel notifications are delivered using the percpu GIC
* interrupt specified under the Xen compatible hypervisor node on
* device tree.
*
* - The device tree Xen compatible node is fully described under Linux
* at Documentation/devicetree/bindings/arm/xen.txt.
*/
#define XEN_HYPERCALL_TAG 0XEA1
#define int64_aligned_t int64_t __attribute__((aligned(8)))
#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#ifndef __ASSEMBLY__
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef union { type *p; unsigned long q; } \
__guest_handle_ ## name; \
typedef union { type *p; uint64_aligned_t q; } \
__guest_handle_64_ ## name;
/*
* XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
* in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes
* aligned.
* XEN_GUEST_HANDLE_PARAM represents a guest pointer, when passed as an
* hypercall argument. It is 4 bytes on aarch32 and 8 bytes on aarch64.
*/
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name
#define set_xen_guest_handle_raw(hnd, val) \
do { \
typeof(&(hnd)) _sxghr_tmp = &(hnd); \
_sxghr_tmp->q = 0; \
_sxghr_tmp->p = val; \
} while ( 0 )
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
struct cpu_user_regs
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */
# define __DECL_REG(n64, n32) union { \
uint64_t n64; \
uint32_t n32; \
}
#else
/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */
#define __DECL_REG(n64, n32) uint64_t n64
#endif
struct vcpu_guest_core_regs
{
uint32_t r0;
uint32_t r1;
uint32_t r2;
uint32_t r3;
uint32_t r4;
uint32_t r5;
uint32_t r6;
uint32_t r7;
uint32_t r8;
uint32_t r9;
uint32_t r10;
union {
uint32_t r11;
uint32_t fp;
};
uint32_t r12;
/* Aarch64 Aarch32 */
__DECL_REG(x0, r0_usr);
__DECL_REG(x1, r1_usr);
__DECL_REG(x2, r2_usr);
__DECL_REG(x3, r3_usr);
__DECL_REG(x4, r4_usr);
__DECL_REG(x5, r5_usr);
__DECL_REG(x6, r6_usr);
__DECL_REG(x7, r7_usr);
__DECL_REG(x8, r8_usr);
__DECL_REG(x9, r9_usr);
__DECL_REG(x10, r10_usr);
__DECL_REG(x11, r11_usr);
__DECL_REG(x12, r12_usr);
uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */
__DECL_REG(x13, sp_usr);
__DECL_REG(x14, lr_usr);
__DECL_REG(x15, __unused_sp_hyp);
__DECL_REG(x16, lr_irq);
__DECL_REG(x17, sp_irq);
__DECL_REG(x18, lr_svc);
__DECL_REG(x19, sp_svc);
__DECL_REG(x20, lr_abt);
__DECL_REG(x21, sp_abt);
__DECL_REG(x22, lr_und);
__DECL_REG(x23, sp_und);
__DECL_REG(x24, r8_fiq);
__DECL_REG(x25, r9_fiq);
__DECL_REG(x26, r10_fiq);
__DECL_REG(x27, r11_fiq);
__DECL_REG(x28, r12_fiq);
__DECL_REG(x29, sp_fiq);
__DECL_REG(x30, lr_fiq);
/* Return address and mode */
__DECL_REG(pc64, pc32); /* ELR_EL2 */
uint32_t cpsr; /* SPSR_EL2 */
/* r14 - LR: is the same physical register as LR_usr */
union {
uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */
uint32_t lr_usr;
uint32_t spsr_el1; /* AArch64 */
uint32_t spsr_svc; /* AArch32 */
};
uint32_t pc; /* Return IP */
uint32_t cpsr; /* Return mode */
uint32_t pad0; /* Doubleword-align the kernel half of the frame */
/* AArch32 guests only */
uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
/* Outer guest frame only from here on... */
uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq;
uint32_t sp_usr; /* LR_usr is the same register as LR, see above */
uint32_t sp_svc, sp_abt, sp_und, sp_irq, sp_fiq;
uint32_t lr_svc, lr_abt, lr_und, lr_irq, lr_fiq;
uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq;
uint32_t pad1; /* Doubleword-align the user half of the frame */
/* AArch64 guests only */
uint64_t sp_el0;
uint64_t sp_el1, elr_el1;
};
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);
#undef __DECL_REG
typedef uint64_t xen_pfn_t;
#define PRI_xen_pfn PRIx64
@ -122,30 +282,77 @@ typedef uint64_t xen_pfn_t;
/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
#define XEN_LEGACY_MAX_VCPUS 1
typedef uint32_t xen_ulong_t;
typedef uint64_t xen_ulong_t;
#define PRI_xen_ulong PRIx64
#if defined(__XEN__) || defined(__XEN_TOOLS__)
struct vcpu_guest_context {
struct cpu_user_regs user_regs; /* User-level CPU registers */
#define _VGCF_online 0
#define VGCF_online (1<<_VGCF_online)
uint32_t flags; /* VGCF_* */
struct vcpu_guest_core_regs user_regs; /* Core CPU registers */
uint32_t sctlr;
uint32_t ttbr0, ttbr1, ttbcr;
uint64_t ttbcr, ttbr0, ttbr1;
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
struct arch_vcpu_info { };
/*
* struct xen_arch_domainconfig's ABI is covered by
* XEN_DOMCTL_INTERFACE_VERSION.
*/
#define XEN_DOMCTL_CONFIG_GIC_NATIVE 0
#define XEN_DOMCTL_CONFIG_GIC_V2 1
#define XEN_DOMCTL_CONFIG_GIC_V3 2
struct xen_arch_domainconfig {
/* IN/OUT */
uint8_t gic_version;
/* IN */
uint32_t nr_spis;
/*
* OUT
* Based on the property clock-frequency in the DT timer node.
* The property may be present when the bootloader/firmware doesn't
* set correctly CNTFRQ which hold the timer frequency.
*
* As it's not possible to trap this register, we have to replicate
* the value in the guest DT.
*
* = 0 => property not present
* > 0 => Value of the property
*
*/
uint32_t clock_frequency;
};
#endif /* __XEN__ || __XEN_TOOLS__ */
struct arch_vcpu_info {
};
typedef struct arch_vcpu_info arch_vcpu_info_t;
struct arch_shared_info { };
struct arch_shared_info {
};
typedef struct arch_shared_info arch_shared_info_t;
typedef uint64_t xen_callback_t;
#endif /* ifndef __ASSEMBLY __ */
#endif
/* PSR bits (CPSR, SPSR)*/
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* 0-4: Mode */
#define PSR_MODE_MASK 0x1f
/* PSR bits (CPSR, SPSR) */
#define PSR_THUMB (1<<5) /* Thumb Mode enable */
#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
#define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */
#define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */
#define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */
#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
/* 32 bit modes */
#define PSR_MODE_USR 0x10
#define PSR_MODE_FIQ 0x11
#define PSR_MODE_IRQ 0x12
@ -156,19 +363,102 @@ typedef uint64_t xen_callback_t;
#define PSR_MODE_UND 0x1b
#define PSR_MODE_SYS 0x1f
#define PSR_THUMB (1<<5) /* Thumb Mode enable */
#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
#define PSR_BIG_ENDIAN (1<<9) /* Big Endian Mode */
#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
/* 64 bit modes */
#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */
#define PSR_MODE_EL3h 0x0d
#define PSR_MODE_EL3t 0x0c
#define PSR_MODE_EL2h 0x09
#define PSR_MODE_EL2t 0x08
#define PSR_MODE_EL1h 0x05
#define PSR_MODE_EL1t 0x04
#define PSR_MODE_EL0t 0x00
#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC)
#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)
#define SCTLR_GUEST_INIT 0x00c50078
/*
* Virtual machine platform (memory layout, interrupts)
*
* These are defined for consistency between the tools and the
* hypervisor. Guests must not rely on these hardcoded values but
* should instead use the FDT.
*/
/* Physical Address Space */
/*
* vGIC mappings: Only one set of mapping is used by the guest.
* Therefore they can overlap.
*/
/* vGIC v2 mappings */
#define GUEST_GICD_BASE 0x03001000ULL
#define GUEST_GICD_SIZE 0x00001000ULL
#define GUEST_GICC_BASE 0x03002000ULL
#define GUEST_GICC_SIZE 0x00000100ULL
/* vGIC v3 mappings */
#define GUEST_GICV3_GICD_BASE 0x03001000ULL
#define GUEST_GICV3_GICD_SIZE 0x00010000ULL
#define GUEST_GICV3_RDIST_STRIDE 0x20000ULL
#define GUEST_GICV3_RDIST_REGIONS 1
#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU127 */
#define GUEST_GICV3_GICR0_SIZE 0x01000000ULL
/*
* 16MB == 4096 pages reserved for guest to use as a region to map its
* grant table in.
*/
#define GUEST_GNTTAB_BASE 0x38000000ULL
#define GUEST_GNTTAB_SIZE 0x01000000ULL
#define GUEST_MAGIC_BASE 0x39000000ULL
#define GUEST_MAGIC_SIZE 0x01000000ULL
#define GUEST_RAM_BANKS 2
#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
#define GUEST_RAM0_SIZE 0xc0000000ULL
#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
#define GUEST_RAM1_SIZE 0xfe00000000ULL
#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */
/* Largest amount of actual RAM, not including holes */
#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)
/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */
#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }
#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }
/* Interrupts */
#define GUEST_TIMER_VIRT_PPI 27
#define GUEST_TIMER_PHYS_S_PPI 29
#define GUEST_TIMER_PHYS_NS_PPI 30
#define GUEST_EVTCHN_PPI 31
/* PSCI functions */
#define PSCI_cpu_suspend 0
#define PSCI_cpu_off 1
#define PSCI_cpu_on 2
#define PSCI_migrate 3
#endif
#ifndef __ASSEMBLY__
/* Stub definition of PMU structure */
typedef struct xen_pmu_arch { uint8_t dummy; } xen_pmu_arch_t;
#endif
#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -31,7 +31,7 @@
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -1,120 +0,0 @@
/*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (C) IBM Corp. 2005, 2006
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#include "xen.h"
#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
#define __XEN_PUBLIC_ARCH_PPC_64_H__
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { \
int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
type *p; \
} __attribute__((__aligned__(8))) __guest_handle_ ## name
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define set_xen_guest_handle(hnd, val) \
do { \
if (sizeof ((hnd).__pad)) \
(hnd).__pad[0] = 0; \
(hnd).p = val; \
} while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
#ifndef __ASSEMBLY__
typedef unsigned long long xen_pfn_t;
#define PRI_xen_pfn "llx"
#endif
/*
* Pointers and other address fields inside interface structures are padded to
* 64 bits. This means that field alignments aren't different between 32- and
* 64-bit architectures.
*/
/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
#define __MEMORY_PADDING(_X)
#define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X)
#define MEMORY_PADDING _MEMORY_PADDING(__LINE__)
/* And the trap vector is... */
#define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */
#ifndef __ASSEMBLY__
#define XENCOMM_INLINE_FLAG (1UL << 63)
typedef uint64_t xen_ulong_t;
/* User-accessible registers: nost of these need to be saved/restored
* for every nested Xen invocation. */
struct cpu_user_regs
{
uint64_t gprs[32];
uint64_t lr;
uint64_t ctr;
uint64_t srr0;
uint64_t srr1;
uint64_t pc;
uint64_t msr;
uint64_t fpscr; /* XXX Is this necessary */
uint64_t xer;
uint64_t hid4; /* debug only */
uint64_t dar; /* debug only */
uint32_t dsisr; /* debug only */
uint32_t cr;
uint32_t __pad; /* good spot for another 32bit reg */
uint32_t entry_vector;
};
typedef struct cpu_user_regs cpu_user_regs_t;
typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
/* ONLY used to communicate with dom0! See also struct exec_domain. */
struct vcpu_guest_context {
cpu_user_regs_t user_regs; /* User-level CPU registers */
uint64_t sdr1; /* Pagetable base */
/* XXX etc */
};
typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
struct arch_shared_info {
uint64_t boot_timebase;
};
struct arch_vcpu_info {
};
/* Support for multi-processor guests. */
#define MAX_VIRT_CPUS 32
#endif
#endif

View File

@ -30,12 +30,20 @@
#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
/* Xen identification leaves start at 0x40000000. */
/*
* For compatibility with other hypervisor interfaces, the Xen cpuid leaves
* can be found at the first otherwise unused 0x100 aligned boundary starting
* from 0x40000000.
*
* e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
* leaves will start at 0x40000100
*/
#define XEN_CPUID_FIRST_LEAF 0x40000000
#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
/*
* Leaf 1 (0x40000000)
* Leaf 1 (0x40000x00)
* EAX: Largest Xen-information leaf. All leaves up to an including @EAX
* are supported by the Xen host.
* EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
@ -46,14 +54,14 @@
#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
/*
* Leaf 2 (0x40000001)
* Leaf 2 (0x40000x01)
* EAX[31:16]: Xen major version.
* EAX[15: 0]: Xen minor version.
* EBX-EDX: Reserved (currently all zeroes).
*/
/*
* Leaf 3 (0x40000002)
* Leaf 3 (0x40000x02)
* EAX: Number of hypercall transfer pages. This register is always guaranteed
* to specify one hypercall page.
* EBX: Base address of Xen-specific MSRs.
@ -65,4 +73,18 @@
#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
/*
* Leaf 5 (0x40000x04)
* HVM-specific features
* EAX: Features
* EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
*/
#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */
/* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
#define XEN_CPUID_MAX_NUM_LEAVES 4
#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */

View File

@ -269,15 +269,18 @@ struct hvm_hw_cpu_compat {
};
static inline int _hvm_hw_fix_cpu(void *h) {
struct hvm_hw_cpu *new=h;
struct hvm_hw_cpu_compat *old=h;
union hvm_hw_cpu_union {
struct hvm_hw_cpu nat;
struct hvm_hw_cpu_compat cmp;
} *ucpu = (union hvm_hw_cpu_union *)h;
/* If we copy from the end backwards, we should
* be able to do the modification in-place */
new->error_code=old->error_code;
new->pending_event=old->pending_event;
new->tsc=old->tsc;
new->msr_tsc_aux=0;
ucpu->nat.error_code = ucpu->cmp.error_code;
ucpu->nat.pending_event = ucpu->cmp.pending_event;
ucpu->nat.tsc = ucpu->cmp.tsc;
ucpu->nat.msr_tsc_aux = 0;
return 0;
}
@ -541,7 +544,7 @@ DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
*/
struct hvm_hw_cpu_xsave {
uint64_t xfeature_mask;
uint64_t xfeature_mask; /* Ignored */
uint64_t xcr0; /* Updated by XSETBV */
uint64_t xcr0_accum; /* Updated by XSETBV */
struct {
@ -565,6 +568,8 @@ struct hvm_hw_cpu_xsave {
struct hvm_viridian_domain_context {
uint64_t hypercall_gpa;
uint64_t guest_os_id;
uint64_t time_ref_count;
uint64_t reference_tsc;
};
DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context);
@ -577,13 +582,49 @@ DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
struct hvm_vmce_vcpu {
uint64_t caps;
uint64_t mci_ctl2_bank0;
uint64_t mci_ctl2_bank1;
};
DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
struct hvm_tsc_adjust {
uint64_t tsc_adjust;
};
DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
struct hvm_msr {
uint32_t count;
struct hvm_one_msr {
uint32_t index;
uint32_t _rsvd;
uint64_t val;
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
} msr[];
#elif defined(__GNUC__)
} msr[0];
#else
} msr[1 /* variable size */];
#endif
};
#define CPU_MSR_CODE 20
/*
* Largest type-code in use
*/
#define HVM_SAVE_CODE_MAX 18
#define HVM_SAVE_CODE_MAX 20
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -0,0 +1,167 @@
/*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef __XEN_PUBLIC_ARCH_X86_PMU_H__
#define __XEN_PUBLIC_ARCH_X86_PMU_H__
/* x86-specific PMU definitions */
/* AMD PMU registers and structures */
struct xen_pmu_amd_ctxt {
/*
* Offsets to counter and control MSRs (relative to xen_pmu_arch.c.amd).
* For PV(H) guests these fields are RO.
*/
uint32_t counters;
uint32_t ctrls;
/* Counter MSRs */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint64_t regs[];
#elif defined(__GNUC__)
uint64_t regs[0];
#endif
};
typedef struct xen_pmu_amd_ctxt xen_pmu_amd_ctxt_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_amd_ctxt_t);
/* Intel PMU registers and structures */
struct xen_pmu_cntr_pair {
uint64_t counter;
uint64_t control;
};
typedef struct xen_pmu_cntr_pair xen_pmu_cntr_pair_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_cntr_pair_t);
struct xen_pmu_intel_ctxt {
/*
* Offsets to fixed and architectural counter MSRs (relative to
* xen_pmu_arch.c.intel).
* For PV(H) guests these fields are RO.
*/
uint32_t fixed_counters;
uint32_t arch_counters;
/* PMU registers */
uint64_t global_ctrl;
uint64_t global_ovf_ctrl;
uint64_t global_status;
uint64_t fixed_ctrl;
uint64_t ds_area;
uint64_t pebs_enable;
uint64_t debugctl;
/* Fixed and architectural counter MSRs */
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint64_t regs[];
#elif defined(__GNUC__)
uint64_t regs[0];
#endif
};
typedef struct xen_pmu_intel_ctxt xen_pmu_intel_ctxt_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_intel_ctxt_t);
/* Sampled domain's registers */
struct xen_pmu_regs {
uint64_t ip;
uint64_t sp;
uint64_t flags;
uint16_t cs;
uint16_t ss;
uint8_t cpl;
uint8_t pad[3];
};
typedef struct xen_pmu_regs xen_pmu_regs_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_regs_t);
/* PMU flags */
#define PMU_CACHED (1<<0) /* PMU MSRs are cached in the context */
#define PMU_SAMPLE_USER (1<<1) /* Sample is from user or kernel mode */
#define PMU_SAMPLE_REAL (1<<2) /* Sample is from realmode */
#define PMU_SAMPLE_PV (1<<3) /* Sample from a PV guest */
/*
* Architecture-specific information describing state of the processor at
* the time of PMU interrupt.
* Fields of this structure marked as RW for guest should only be written by
* the guest when PMU_CACHED bit in pmu_flags is set (which is done by the
* hypervisor during PMU interrupt). Hypervisor will read updated data in
* XENPMU_flush hypercall and clear PMU_CACHED bit.
*/
struct xen_pmu_arch {
union {
/*
* Processor's registers at the time of interrupt.
* WO for hypervisor, RO for guests.
*/
struct xen_pmu_regs regs;
/* Padding for adding new registers to xen_pmu_regs in the future */
#define XENPMU_REGS_PAD_SZ 64
uint8_t pad[XENPMU_REGS_PAD_SZ];
} r;
/* WO for hypervisor, RO for guest */
uint64_t pmu_flags;
/*
* APIC LVTPC register.
* RW for both hypervisor and guest.
* Only APIC_LVT_MASKED bit is loaded by the hypervisor into hardware
* during XENPMU_flush or XENPMU_lvtpc_set.
*/
union {
uint32_t lapic_lvtpc;
uint64_t pad;
} l;
/*
* Vendor-specific PMU registers.
* RW for both hypervisor and guest (see exceptions above).
* Guest's updates to this field are verified and then loaded by the
* hypervisor into hardware during XENPMU_flush
*/
union {
struct xen_pmu_amd_ctxt amd;
struct xen_pmu_intel_ctxt intel;
/*
* Padding for contexts (fixed parts only, does not include MSR banks
* that are specified by offsets)
*/
#define XENPMU_CTXT_PAD_SZ 128
uint8_t pad[XENPMU_CTXT_PAD_SZ];
} c;
};
typedef struct xen_pmu_arch xen_pmu_arch_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_arch_t);
#endif /* __XEN_PUBLIC_ARCH_X86_PMU_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -414,7 +414,7 @@ struct xen_mc_mceinject {
struct xen_mc_inject_v2 {
uint32_t flags;
struct xenctl_cpumap cpumap;
struct xenctl_bitmap cpumap;
};
#endif

View File

@ -104,6 +104,7 @@
do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
(hnd).p = val; \
} while ( 0 )
#define int64_aligned_t int64_t __attribute__((aligned(8)))
#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
@ -163,7 +164,7 @@ typedef struct xen_callback xen_callback_t;
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -194,7 +194,7 @@ typedef unsigned long xen_callback_t;
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -38,12 +38,21 @@
typedef type * __guest_handle_ ## name
#endif
/*
* XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
* in a struct in memory.
* XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
* hypercall argument.
* XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but
* they might not be on other architectures.
*/
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
___DEFINE_XEN_GUEST_HANDLE(name, type); \
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
@ -61,8 +70,12 @@ typedef unsigned long xen_pfn_t;
#define PRI_xen_pfn "lx"
#endif
#define XEN_HAVE_PV_GUEST_ENTRY 1
#define XEN_HAVE_PV_UPCALL_MASK 1
/*
* SEGMENT DESCRIPTOR TABLES
* `incontents 200 segdesc Segment Descriptor Tables
*/
/*
* ` enum neg_errnoval
@ -74,17 +87,31 @@ typedef unsigned long xen_pfn_t;
* start of the GDT because some stupid OSes export hard-coded selector values
* in their ABI. These hard-coded values are always near the start of the GDT,
* so Xen places itself out of the way, at the far end of the GDT.
*
* NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op
*/
#define FIRST_RESERVED_GDT_PAGE 14
#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
/*
* ` enum neg_errnoval
* ` HYPERVISOR_update_descriptor(u64 pa, u64 desc);
* `
* ` @pa The machine physical address of the descriptor to
* ` update. Must be either a descriptor page or writable.
* ` @desc The descriptor value to update, in the same format as a
* ` native descriptor table entry.
*/
/* Maximum number of virtual CPUs in legacy multi-processor guests. */
#define XEN_LEGACY_MAX_VCPUS 32
#ifndef __ASSEMBLY__
typedef unsigned long xen_ulong_t;
#define PRI_xen_ulong "lx"
/*
* ` enum neg_errnoval
@ -127,6 +154,15 @@ typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
/*
* The following is all CPU context. Note that the fpu_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
*
* Also note that when calling DOMCTL_setvcpucontext and VCPU_initialise
* for HVM and PVH guests, not all information in this structure is updated:
*
* - For HVM guests, the structures read include: fpu_ctxt (if
* VGCT_I387_VALID is set), flags, user_regs, debugreg[*]
*
* - PVH guests are the same as HVM guests, but additionally use ctrlreg[3] to
* set cr3. All other fields not used should be set to 0.
*/
struct vcpu_guest_context {
/* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
@ -184,14 +220,58 @@ typedef struct vcpu_guest_context vcpu_guest_context_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
struct arch_shared_info {
unsigned long max_pfn; /* max pfn that appears in table */
/* Frame containing list of mfns containing list of mfns containing p2m. */
/*
* Number of valid entries in the p2m table(s) anchored at
* pfn_to_mfn_frame_list_list and/or p2m_vaddr.
*/
unsigned long max_pfn;
/*
* Frame containing list of mfns containing list of mfns containing p2m.
* A value of 0 indicates it has not yet been set up, ~0 indicates it has
* been set to invalid e.g. due to the p2m being too large for the 3-level
* p2m tree. In this case the linear mapper p2m list anchored at p2m_vaddr
* is to be used.
*/
xen_pfn_t pfn_to_mfn_frame_list_list;
unsigned long nmi_reason;
uint64_t pad[32];
/*
* Following three fields are valid if p2m_cr3 contains a value different
* from 0.
* p2m_cr3 is the root of the address space where p2m_vaddr is valid.
* p2m_cr3 is in the same format as a cr3 value in the vcpu register state
* and holds the folded machine frame number (via xen_pfn_to_cr3) of a
* L3 or L4 page table.
* p2m_vaddr holds the virtual address of the linear p2m list. All entries
* in the range [0...max_pfn[ are accessible via this pointer.
* p2m_generation will be incremented by the guest before and after each
* change of the mappings of the p2m list. p2m_generation starts at 0 and
* a value with the least significant bit set indicates that a mapping
* update is in progress. This allows guest external software (e.g. in Dom0)
* to verify that read mappings are consistent and whether they have changed
* since the last check.
* Modifying a p2m element in the linear p2m list is allowed via an atomic
* write only.
*/
unsigned long p2m_cr3; /* cr3 value of the p2m address space */
unsigned long p2m_vaddr; /* virtual address of the p2m list */
unsigned long p2m_generation; /* generation count of p2m mapping */
#ifdef __i386__
/* There's no room for this field in the generic structure. */
uint32_t wc_sec_hi;
#endif
};
typedef struct arch_shared_info arch_shared_info_t;
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/*
* struct xen_arch_domainconfig's ABI is covered by
* XEN_DOMCTL_INTERFACE_VERSION.
*/
struct xen_arch_domainconfig {
char dummy;
};
#endif
#endif /* !__ASSEMBLY__ */
/*
@ -229,7 +309,7 @@ typedef struct arch_shared_info arch_shared_info_t;
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -36,7 +36,7 @@
* @extra_args == Operation-specific extra arguments (NULL if none).
*/
/* ia64, x86: Callback for event delivery. */
/* x86: Callback for event delivery. */
#define CALLBACKTYPE_event 0
/* x86: Failsafe callback when guest state cannot be restored by Xen. */
@ -113,7 +113,7 @@ DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -112,7 +112,7 @@ DEFINE_XEN_GUEST_HANDLE(dom0_op_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -34,8 +34,10 @@
#include "xen.h"
#include "grant_table.h"
#include "hvm/save.h"
#include "memory.h"
#define XEN_DOMCTL_INTERFACE_VERSION 0x00000008
#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000b
/*
* NB. xen_domctl.domain is an IN/OUT parameter for this operation.
@ -46,7 +48,7 @@ struct xen_domctl_createdomain {
/* IN parameters */
uint32_t ssidref;
xen_domain_handle_t handle;
/* Is this an HVM guest (as opposed to a PV guest)? */
/* Is this an HVM guest (as opposed to a PVH or PV guest)? */
#define _XEN_DOMCTL_CDF_hvm_guest 0
#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
/* Use hardware-assisted paging if available? */
@ -58,7 +60,11 @@ struct xen_domctl_createdomain {
/* Disable out-of-sync shadow page tables? */
#define _XEN_DOMCTL_CDF_oos_off 3
#define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off)
/* Is this a PVH guest (as opposed to an HVM or PV guest)? */
#define _XEN_DOMCTL_CDF_pvh_guest 4
#define XEN_DOMCTL_CDF_pvh_guest (1U<<_XEN_DOMCTL_CDF_pvh_guest)
uint32_t flags;
struct xen_arch_domainconfig config;
};
typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
@ -88,17 +94,22 @@ struct xen_domctl_getdomaininfo {
/* Being debugged. */
#define _XEN_DOMINF_debugged 6
#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged)
/* domain is PVH */
#define _XEN_DOMINF_pvh_guest 7
#define XEN_DOMINF_pvh_guest (1U<<_XEN_DOMINF_pvh_guest)
/* XEN_DOMINF_shutdown guest-supplied code. */
#define XEN_DOMINF_shutdownmask 255
#define XEN_DOMINF_shutdownshift 16
uint32_t flags; /* XEN_DOMINF_* */
uint64_aligned_t tot_pages;
uint64_aligned_t max_pages;
uint64_aligned_t outstanding_pages;
uint64_aligned_t shr_pages;
uint64_aligned_t paged_pages;
uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
uint64_aligned_t cpu_time;
uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
#define XEN_INVALID_MAX_VCPU_ID (~0U) /* Domain has no vcpus? */
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
uint32_t ssidref;
xen_domain_handle_t handle;
@ -135,30 +146,9 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
#define XEN_DOMCTL_PFINFO_XALLOC (0xeU<<28) /* allocate-only page */
#define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
#define XEN_DOMCTL_PFINFO_BROKEN (0xdU<<28) /* broken page */
#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
struct xen_domctl_getpageframeinfo {
/* IN variables. */
uint64_aligned_t gmfn; /* GMFN to query */
/* OUT variables. */
/* Is the page PINNED to a type? */
uint32_t type; /* see above type defs */
};
typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
/* XEN_DOMCTL_getpageframeinfo2 */
struct xen_domctl_getpageframeinfo2 {
/* IN variables. */
uint64_aligned_t num;
/* IN/OUT variables. */
XEN_GUEST_HANDLE_64(uint32) array;
};
typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
/* XEN_DOMCTL_getpageframeinfo3 */
struct xen_domctl_getpageframeinfo3 {
/* IN variables. */
@ -278,12 +268,47 @@ typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
/* Get/set the NUMA node(s) with which the guest has affinity with. */
/* XEN_DOMCTL_setnodeaffinity */
/* XEN_DOMCTL_getnodeaffinity */
struct xen_domctl_nodeaffinity {
struct xenctl_bitmap nodemap;/* IN */
};
typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
/* Get/set which physical cpus a vcpu can execute on. */
/* XEN_DOMCTL_setvcpuaffinity */
/* XEN_DOMCTL_getvcpuaffinity */
struct xen_domctl_vcpuaffinity {
uint32_t vcpu; /* IN */
struct xenctl_cpumap cpumap; /* IN/OUT */
/* IN variables. */
uint32_t vcpu;
/* Set/get the hard affinity for vcpu */
#define _XEN_VCPUAFFINITY_HARD 0
#define XEN_VCPUAFFINITY_HARD (1U<<_XEN_VCPUAFFINITY_HARD)
/* Set/get the soft affinity for vcpu */
#define _XEN_VCPUAFFINITY_SOFT 1
#define XEN_VCPUAFFINITY_SOFT (1U<<_XEN_VCPUAFFINITY_SOFT)
uint32_t flags;
/*
* IN/OUT variables.
*
* Both are IN/OUT for XEN_DOMCTL_setvcpuaffinity, in which case they
* contain effective hard or/and soft affinity. That is, upon successful
* return, cpumap_soft, contains the intersection of the soft affinity,
* hard affinity and the cpupool's online CPUs for the domain (if
* XEN_VCPUAFFINITY_SOFT was set in flags). cpumap_hard contains the
* intersection between hard affinity and the cpupool's online CPUs (if
* XEN_VCPUAFFINITY_HARD was set in flags).
*
* Both are OUT-only for XEN_DOMCTL_getvcpuaffinity, in which case they
* contain the plain hard and/or soft affinity masks that were set during
* previous successful calls to XEN_DOMCTL_setvcpuaffinity (or the
* default values), without intersecting or altering them in any way.
*/
struct xenctl_bitmap cpumap_hard;
struct xenctl_bitmap cpumap_soft;
};
typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
@ -299,10 +324,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
/* XEN_DOMCTL_scheduler_op */
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF 4
/* #define XEN_SCHEDULER_SEDF 4 (Removed) */
#define XEN_SCHEDULER_CREDIT 5
#define XEN_SCHEDULER_CREDIT2 6
#define XEN_SCHEDULER_ARINC653 7
#define XEN_SCHEDULER_RTDS 8
/* Set or get info? */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1
@ -310,13 +337,6 @@ struct xen_domctl_scheduler_op {
uint32_t sched_id; /* XEN_SCHEDULER_* */
uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
union {
struct xen_domctl_sched_sedf {
uint64_aligned_t period;
uint64_aligned_t slice;
uint64_aligned_t latency;
uint32_t extratime;
uint32_t weight;
} sedf;
struct xen_domctl_sched_credit {
uint16_t weight;
uint16_t cap;
@ -324,6 +344,10 @@ struct xen_domctl_scheduler_op {
struct xen_domctl_sched_credit2 {
uint16_t weight;
} credit2;
struct xen_domctl_sched_rtds {
uint32_t period;
uint32_t budget;
} rtds;
} u;
};
typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
@ -383,29 +407,9 @@ typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
/* XEN_DOMCTL_arch_setup */
#define _XEN_DOMAINSETUP_hvm_guest 0
#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
#define _XEN_DOMAINSETUP_sioemu_guest 2
#define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest)
typedef struct xen_domctl_arch_setup {
uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
#ifdef __ia64__
uint64_aligned_t bp; /* mpaddr of boot param area */
uint64_aligned_t maxmem; /* Highest memory address for MDT. */
uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
int8_t vhpt_size_log2; /* Log2 of VHPT size. */
#endif
} xen_domctl_arch_setup_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
/* XEN_DOMCTL_settimeoffset */
struct xen_domctl_settimeoffset {
int32_t time_offset_seconds; /* applied to domain wallclock time */
int64_aligned_t time_offset_seconds; /* applied to domain wallclock time */
};
typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
@ -429,14 +433,6 @@ typedef struct xen_domctl_address_size {
DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
/* XEN_DOMCTL_real_mode_area */
struct xen_domctl_real_mode_area {
uint32_t log; /* log2 of Real Mode Area size */
};
typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
/* XEN_DOMCTL_sendtrigger */
#define XEN_DOMCTL_SENDTRIGGER_NMI 0
#define XEN_DOMCTL_SENDTRIGGER_RESET 1
@ -451,12 +447,33 @@ typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
/* Assign a device to a guest. Sets up IOMMU structures. */
/* XEN_DOMCTL_assign_device */
/* XEN_DOMCTL_test_assign_device */
/* XEN_DOMCTL_deassign_device */
/*
* XEN_DOMCTL_deassign_device: The behavior of this DOMCTL differs
* between the different type of device:
* - PCI device (XEN_DOMCTL_DEV_PCI) will be reassigned to DOM0
* - DT device (XEN_DOMCTL_DT_PCI) will left unassigned. DOM0
* will have to call XEN_DOMCTL_assign_device in order to use the
* device.
*/
#define XEN_DOMCTL_DEV_PCI 0
#define XEN_DOMCTL_DEV_DT 1
struct xen_domctl_assign_device {
uint32_t machine_sbdf; /* machine PCI ID of assigned device */
uint32_t dev; /* XEN_DOMCTL_DEV_* */
union {
struct {
uint32_t machine_sbdf; /* machine PCI ID of assigned device */
} pci;
struct {
uint32_t size; /* Length of the path */
XEN_GUEST_HANDLE_64(char) path; /* path to the device tree node */
} dt;
} u;
/* IN */
#define XEN_DOMCTL_DEV_RDM_RELAXED 1
uint32_t flag; /* flag of assigned device */
};
typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
@ -480,6 +497,7 @@ typedef enum pt_irq_type_e {
PT_IRQ_TYPE_ISA,
PT_IRQ_TYPE_MSI,
PT_IRQ_TYPE_MSI_TRANSLATE,
PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
@ -500,6 +518,9 @@ struct xen_domctl_bind_pt_irq {
uint32_t gflags;
uint64_aligned_t gtable;
} msi;
struct {
uint16_t spi;
} spi;
} u;
};
typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
@ -507,6 +528,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
/* Bind machine I/O address range -> HVM address range. */
/* If this returns -E2BIG lower nr_mfns value. */
/* XEN_DOMCTL_memory_mapping */
#define DPCI_ADD_MAPPING 1
#define DPCI_REMOVE_MAPPING 0
@ -544,6 +566,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
#define XEN_DOMCTL_MEM_CACHEATTR_WP 5
#define XEN_DOMCTL_MEM_CACHEATTR_WB 6
#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
#define XEN_DOMCTL_DELETE_MEM_CACHEATTR (~(uint32_t)0)
struct xen_domctl_pin_mem_cacheattr {
uint64_aligned_t start, end;
uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
@ -571,27 +594,19 @@ struct xen_domctl_ext_vcpucontext {
uint16_t sysenter_callback_cs;
uint8_t syscall32_disables_events;
uint8_t sysenter_disables_events;
uint64_aligned_t mcg_cap;
#if defined(__GNUC__)
union {
uint64_aligned_t mcg_cap;
struct hvm_vmce_vcpu vmce;
};
#else
struct hvm_vmce_vcpu vmce;
#endif
#endif
};
typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
/*
* Set optimizaton features for a domain
*/
/* XEN_DOMCTL_set_opt_feature */
struct xen_domctl_set_opt_feature {
#if defined(__ia64__)
struct xen_ia64_opt_feature optf;
#else
/* Make struct non-empty: do not depend on this field name! */
uint64_t dummy;
#endif
};
typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
/*
* Set the target domain for a domain
*/
@ -616,6 +631,22 @@ typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
#endif
/*
* Arranges that if the domain suspends (specifically, if it shuts
* down with code SHUTDOWN_suspend), this event channel will be
* notified.
*
* This is _instead of_ the usual notification to the global
* VIRQ_DOM_EXC. (In most systems that pirq is owned by xenstored.)
*
* Only one subscription per domain is possible. Last subscriber
* wins; others are silently displaced.
*
* NB that contrary to the rather general name, it only applies to
* domain shutdown with code suspend. Shutdown for other reasons
* (including crash), and domain death, are notified to VIRQ_DOM_EXC
* regardless.
*/
/* XEN_DOMCTL_subscribe */
struct xen_domctl_subscribe {
uint32_t port; /* IN */
@ -664,18 +695,13 @@ typedef struct xen_domctl_disable_migrate {
/* XEN_DOMCTL_gettscinfo */
/* XEN_DOMCTL_settscinfo */
struct xen_guest_tsc_info {
typedef struct xen_domctl_tsc_info {
/* IN/OUT */
uint32_t tsc_mode;
uint32_t gtsc_khz;
uint32_t incarnation;
uint32_t pad;
uint64_aligned_t elapsed_nsec;
};
typedef struct xen_guest_tsc_info xen_guest_tsc_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t);
typedef struct xen_domctl_tsc_info {
XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */
xen_guest_tsc_info_t info; /* IN */
} xen_domctl_tsc_info_t;
/* XEN_DOMCTL_gdbsx_guestmemio guest mem io */
@ -705,10 +731,21 @@ struct xen_domctl_gdbsx_domstatus {
};
/*
* Memory event operations
* VM event operations
*/
/* XEN_DOMCTL_mem_event_op */
/* XEN_DOMCTL_vm_event_op */
/*
* There are currently three rings available for VM events:
* sharing, monitor and paging. This hypercall allows one to
* control these rings (enable/disable), as well as to signal
* to the hypervisor to pull responses (resume) from the given
* ring.
*/
#define XEN_VM_EVENT_ENABLE 0
#define XEN_VM_EVENT_DISABLE 1
#define XEN_VM_EVENT_RESUME 2
/*
* Domain memory paging
@ -717,41 +754,38 @@ struct xen_domctl_gdbsx_domstatus {
* pager<->hypervisor interface. Use XENMEM_paging_op*
* to perform per-page operations.
*
* The XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE domctl returns several
* The XEN_VM_EVENT_PAGING_ENABLE domctl returns several
* non-standard error codes to indicate why paging could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EMLINK - guest has iommu passthrough enabled
* EXDEV - guest has PoD enabled
* EBUSY - guest has or had paging enabled, ring buffer still active
*/
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
#define XEN_DOMCTL_VM_EVENT_OP_PAGING 1
/*
* Access permissions.
* Monitor helper.
*
* As with paging, use the domctl for teardown/setup of the
* helper<->hypervisor interface.
*
* There are HVM hypercalls to set the per-page access permissions of every
* page in a domain. When one of these permissions--independent, read,
* write, and execute--is violated, the VCPU is paused and a memory event
* is sent with what happened. (See public/mem_event.h) .
* The monitor interface can be used to register for various VM events. For
* example, there are HVM hypercalls to set the per-page access permissions
* of every page in a domain. When one of these permissions--independent,
* read, write, and execute--is violated, the VCPU is paused and a memory event
* is sent with what happened. The memory event handler can then resume the
* VCPU and redo the access with a XEN_VM_EVENT_RESUME option.
*
* The memory event handler can then resume the VCPU and redo the access
* with a XENMEM_access_op_resume hypercall.
* See public/vm_event.h for the list of available events that can be
* subscribed to via the monitor interface.
*
* The XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE domctl returns several
* The XEN_VM_EVENT_MONITOR_* domctls returns
* non-standard error codes to indicate why access could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EBUSY - guest has or had access enabled, ring buffer still active
*
*/
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
#define XEN_DOMCTL_VM_EVENT_OP_MONITOR 2
/*
* Sharing ENOMEM helper.
@ -766,21 +800,18 @@ struct xen_domctl_gdbsx_domstatus {
* Note that shring can be turned on (as per the domctl below)
* *without* this ring being setup.
*/
#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0
#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1
#define XEN_DOMCTL_VM_EVENT_OP_SHARING 3
/* Use for teardown/setup of helper<->hypervisor interface for paging,
* access and sharing.*/
struct xen_domctl_mem_event_op {
uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
struct xen_domctl_vm_event_op {
uint32_t op; /* XEN_VM_EVENT_* */
uint32_t mode; /* XEN_DOMCTL_VM_EVENT_OP_* */
uint32_t port; /* OUT: event channel for ring */
};
typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
typedef struct xen_domctl_vm_event_op xen_domctl_vm_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vm_event_op_t);
/*
* Memory sharing operations
@ -821,7 +852,7 @@ struct xen_domctl_vcpuextstate {
/* IN: VCPU that this call applies to. */
uint32_t vcpu;
/*
* SET: xfeature support mask of struct (IN)
* SET: Ignored.
* GET: xfeature support mask of struct (IN/OUT)
* xfeature mask is served as identifications of the saving format
* so that compatible CPUs can have a check on format to decide
@ -849,6 +880,189 @@ struct xen_domctl_set_access_required {
typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
struct xen_domctl_set_broken_page_p2m {
uint64_aligned_t pfn;
};
typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
/*
* XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
* number the guest may use. Use this limit the amount of resources
* (global mapping space, xenheap) a guest may use for event channels.
*/
struct xen_domctl_set_max_evtchn {
uint32_t max_port;
};
typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
/*
* ARM: Clean and invalidate caches associated with given region of
* guest memory.
*/
struct xen_domctl_cacheflush {
/* IN: page range to flush. */
xen_pfn_t start_pfn, nr_pfns;
};
typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_vcpu_msr {
uint32_t index;
uint32_t reserved;
uint64_aligned_t value;
};
typedef struct xen_domctl_vcpu_msr xen_domctl_vcpu_msr_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msr_t);
/*
* XEN_DOMCTL_set_vcpu_msrs / XEN_DOMCTL_get_vcpu_msrs.
*
* Input:
* - A NULL 'msrs' guest handle is a request for the maximum 'msr_count'.
* - Otherwise, 'msr_count' is the number of entries in 'msrs'.
*
* Output for get:
* - If 'msr_count' is less than the number Xen needs to write, -ENOBUFS shall
* be returned and 'msr_count' updated to reflect the intended number.
* - On success, 'msr_count' shall indicate the number of MSRs written, which
* may be less than the maximum if some are not currently used by the vcpu.
*
* Output for set:
* - If Xen encounters an error with a specific MSR, -EINVAL shall be returned
* and 'msr_count' shall be set to the offending index, to aid debugging.
*/
struct xen_domctl_vcpu_msrs {
uint32_t vcpu; /* IN */
uint32_t msr_count; /* IN/OUT */
XEN_GUEST_HANDLE_64(xen_domctl_vcpu_msr_t) msrs; /* IN/OUT */
};
typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
#endif
/* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
struct xen_domctl_vnuma {
/* IN: number of vNUMA nodes to setup. Shall be greater than 0 */
uint32_t nr_vnodes;
/* IN: number of memory ranges to setup */
uint32_t nr_vmemranges;
/*
* IN: number of vCPUs of the domain (used as size of the vcpu_to_vnode
* array declared below). Shall be equal to the domain's max_vcpus.
*/
uint32_t nr_vcpus;
uint32_t pad; /* must be zero */
/*
* IN: array for specifying the distances of the vNUMA nodes
* between each others. Shall have nr_vnodes*nr_vnodes elements.
*/
XEN_GUEST_HANDLE_64(uint) vdistance;
/*
* IN: array for specifying to what vNUMA node each vCPU belongs.
* Shall have nr_vcpus elements.
*/
XEN_GUEST_HANDLE_64(uint) vcpu_to_vnode;
/*
* IN: array for specifying on what physical NUMA node each vNUMA
* node is placed. Shall have nr_vnodes elements.
*/
XEN_GUEST_HANDLE_64(uint) vnode_to_pnode;
/*
* IN: array for specifying the memory ranges. Shall have
* nr_vmemranges elements.
*/
XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
};
typedef struct xen_domctl_vnuma xen_domctl_vnuma_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vnuma_t);
struct xen_domctl_psr_cmt_op {
#define XEN_DOMCTL_PSR_CMT_OP_DETACH 0
#define XEN_DOMCTL_PSR_CMT_OP_ATTACH 1
#define XEN_DOMCTL_PSR_CMT_OP_QUERY_RMID 2
uint32_t cmd;
uint32_t data;
};
typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
/* XEN_DOMCTL_MONITOR_*
*
* Enable/disable monitoring various VM events.
* This domctl configures what events will be reported to helper apps
* via the ring buffer "MONITOR". The ring has to be first enabled
* with the domctl XEN_DOMCTL_VM_EVENT_OP_MONITOR.
*
* GET_CAPABILITIES can be used to determine which of these features is
* available on a given platform.
*
* NOTICE: mem_access events are also delivered via the "MONITOR" ring buffer;
* however, enabling/disabling those events is performed with the use of
* memory_op hypercalls!
*/
#define XEN_DOMCTL_MONITOR_OP_ENABLE 0
#define XEN_DOMCTL_MONITOR_OP_DISABLE 1
#define XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES 2
#define XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG 0
#define XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR 1
#define XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP 2
#define XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT 3
#define XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST 4
struct xen_domctl_monitor_op {
uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
/*
* When used with ENABLE/DISABLE this has to be set to
* the requested XEN_DOMCTL_MONITOR_EVENT_* value.
* With GET_CAPABILITIES this field returns a bitmap of
* events supported by the platform, in the format
* (1 << XEN_DOMCTL_MONITOR_EVENT_*).
*/
uint32_t event;
/*
* Further options when issuing XEN_DOMCTL_MONITOR_OP_ENABLE.
*/
union {
struct {
/* Which control register */
uint8_t index;
/* Pause vCPU until response */
uint8_t sync;
/* Send event only on a change of value */
uint8_t onchangeonly;
} mov_to_cr;
struct {
/* Enable the capture of an extended set of MSRs */
uint8_t extended_capture;
} mov_to_msr;
struct {
/* Pause vCPU until response */
uint8_t sync;
} guest_request;
} u;
};
typedef struct xen_domctl_monitor_op xen_domctl_monitor_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_monitor_op_t);
struct xen_domctl_psr_cat_op {
#define XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM 0
#define XEN_DOMCTL_PSR_CAT_OP_GET_L3_CBM 1
uint32_t cmd; /* IN: XEN_DOMCTL_PSR_CAT_OP_* */
uint32_t target; /* IN */
uint64_t data; /* IN/OUT */
};
typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@ -857,8 +1071,8 @@ struct xen_domctl {
#define XEN_DOMCTL_unpausedomain 4
#define XEN_DOMCTL_getdomaininfo 5
#define XEN_DOMCTL_getmemlist 6
#define XEN_DOMCTL_getpageframeinfo 7
#define XEN_DOMCTL_getpageframeinfo2 8
/* #define XEN_DOMCTL_getpageframeinfo 7 Obsolete - use getpageframeinfo3 */
/* #define XEN_DOMCTL_getpageframeinfo2 8 Obsolete - use getpageframeinfo3 */
#define XEN_DOMCTL_setvcpuaffinity 9
#define XEN_DOMCTL_shadow_op 10
#define XEN_DOMCTL_max_mem 11
@ -873,10 +1087,10 @@ struct xen_domctl {
#define XEN_DOMCTL_iomem_permission 20
#define XEN_DOMCTL_ioport_permission 21
#define XEN_DOMCTL_hypercall_init 22
#define XEN_DOMCTL_arch_setup 23
#define XEN_DOMCTL_arch_setup 23 /* Obsolete IA64 only */
#define XEN_DOMCTL_settimeoffset 24
#define XEN_DOMCTL_getvcpuaffinity 25
#define XEN_DOMCTL_real_mode_area 26
#define XEN_DOMCTL_real_mode_area 26 /* Obsolete PPC only */
#define XEN_DOMCTL_resumedomain 27
#define XEN_DOMCTL_sendtrigger 28
#define XEN_DOMCTL_subscribe 29
@ -891,7 +1105,7 @@ struct xen_domctl {
#define XEN_DOMCTL_pin_mem_cacheattr 41
#define XEN_DOMCTL_set_ext_vcpucontext 42
#define XEN_DOMCTL_get_ext_vcpucontext 43
#define XEN_DOMCTL_set_opt_feature 44
#define XEN_DOMCTL_set_opt_feature 44 /* Obsolete IA64 only */
#define XEN_DOMCTL_test_assign_device 45
#define XEN_DOMCTL_set_target 46
#define XEN_DOMCTL_deassign_device 47
@ -903,7 +1117,7 @@ struct xen_domctl {
#define XEN_DOMCTL_suppress_spurious_page_faults 53
#define XEN_DOMCTL_debug_op 54
#define XEN_DOMCTL_gethvmcontext_partial 55
#define XEN_DOMCTL_mem_event_op 56
#define XEN_DOMCTL_vm_event_op 56
#define XEN_DOMCTL_mem_sharing_op 57
#define XEN_DOMCTL_disable_migrate 58
#define XEN_DOMCTL_gettscinfo 59
@ -914,6 +1128,17 @@ struct xen_domctl {
#define XEN_DOMCTL_set_access_required 64
#define XEN_DOMCTL_audit_p2m 65
#define XEN_DOMCTL_set_virq_handler 66
#define XEN_DOMCTL_set_broken_page_p2m 67
#define XEN_DOMCTL_setnodeaffinity 68
#define XEN_DOMCTL_getnodeaffinity 69
#define XEN_DOMCTL_set_max_evtchn 70
#define XEN_DOMCTL_cacheflush 71
#define XEN_DOMCTL_get_vcpu_msrs 72
#define XEN_DOMCTL_set_vcpu_msrs 73
#define XEN_DOMCTL_setvnumainfo 74
#define XEN_DOMCTL_psr_cmt_op 75
#define XEN_DOMCTL_monitor_op 77
#define XEN_DOMCTL_psr_cat_op 78
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@ -924,9 +1149,8 @@ struct xen_domctl {
struct xen_domctl_createdomain createdomain;
struct xen_domctl_getdomaininfo getdomaininfo;
struct xen_domctl_getmemlist getmemlist;
struct xen_domctl_getpageframeinfo getpageframeinfo;
struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
struct xen_domctl_getpageframeinfo3 getpageframeinfo3;
struct xen_domctl_nodeaffinity nodeaffinity;
struct xen_domctl_vcpuaffinity vcpuaffinity;
struct xen_domctl_shadow_op shadow_op;
struct xen_domctl_max_mem max_mem;
@ -940,11 +1164,9 @@ struct xen_domctl {
struct xen_domctl_iomem_permission iomem_permission;
struct xen_domctl_ioport_permission ioport_permission;
struct xen_domctl_hypercall_init hypercall_init;
struct xen_domctl_arch_setup arch_setup;
struct xen_domctl_settimeoffset settimeoffset;
struct xen_domctl_disable_migrate disable_migrate;
struct xen_domctl_tsc_info tsc_info;
struct xen_domctl_real_mode_area real_mode_area;
struct xen_domctl_hvmcontext hvmcontext;
struct xen_domctl_hvmcontext_partial hvmcontext_partial;
struct xen_domctl_address_size address_size;
@ -956,22 +1178,29 @@ struct xen_domctl {
struct xen_domctl_ioport_mapping ioport_mapping;
struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
struct xen_domctl_ext_vcpucontext ext_vcpucontext;
struct xen_domctl_set_opt_feature set_opt_feature;
struct xen_domctl_set_target set_target;
struct xen_domctl_subscribe subscribe;
struct xen_domctl_debug_op debug_op;
struct xen_domctl_mem_event_op mem_event_op;
struct xen_domctl_vm_event_op vm_event_op;
struct xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
struct xen_domctl_vcpuextstate vcpuextstate;
struct xen_domctl_vcpu_msrs vcpu_msrs;
#endif
struct xen_domctl_set_access_required access_required;
struct xen_domctl_audit_p2m audit_p2m;
struct xen_domctl_set_virq_handler set_virq_handler;
struct xen_domctl_set_max_evtchn set_max_evtchn;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
struct xen_domctl_cacheflush cacheflush;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
struct xen_domctl_vnuma vnuma;
struct xen_domctl_psr_cmt_op psr_cmt_op;
struct xen_domctl_monitor_op monitor_op;
struct xen_domctl_psr_cat_op psr_cat_op;
uint8_t pad[128];
} u;
};
@ -983,7 +1212,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -28,6 +28,8 @@
#define __XEN_PUBLIC_ELFNOTE_H__
/*
* `incontents 200 elfnotes ELF notes
*
* The notes should live in a PT_NOTE segment and have "Xen" in the
* name field.
*
@ -36,6 +38,9 @@
*
* LEGACY indicated the fields in the legacy __xen_guest string which
* this a note type replaces.
*
* String values (for non-legacy) are NULL terminated ASCII, also known
* as ASCIZ type.
*/
/*
@ -66,8 +71,8 @@
#define XEN_ELFNOTE_VIRT_BASE 3
/*
* The offset of the ELF paddr field from the acutal required
* psuedo-physical address (numeric).
* The offset of the ELF paddr field from the actual required
* pseudo-physical address (numeric).
*
* This is used to maintain backwards compatibility with older kernels
* which wrote __PAGE_OFFSET into that field. This field defaults to 0
@ -158,6 +163,9 @@
/*
* Whether or not the guest supports cooperative suspend cancellation.
* This is a numeric value.
*
* Default is 0
*/
#define XEN_ELFNOTE_SUSPEND_CANCEL 14
@ -255,7 +263,7 @@
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -1,527 +0,0 @@
#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__
#define __XEN_PUBLIC_ELFSTRUCTS_H__ 1
/*
* Copyright (c) 1995, 1996 Erik Theisen. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
typedef uint8_t Elf_Byte;
typedef uint32_t Elf32_Addr; /* Unsigned program address */
typedef uint32_t Elf32_Off; /* Unsigned file offset */
typedef int32_t Elf32_Sword; /* Signed large integer */
typedef uint32_t Elf32_Word; /* Unsigned large integer */
typedef uint16_t Elf32_Half; /* Unsigned medium integer */
typedef uint64_t Elf64_Addr;
typedef uint64_t Elf64_Off;
typedef int32_t Elf64_Shalf;
typedef int32_t Elf64_Sword;
typedef uint32_t Elf64_Word;
typedef int64_t Elf64_Sxword;
typedef uint64_t Elf64_Xword;
typedef uint32_t Elf64_Half;
typedef uint16_t Elf64_Quarter;
/*
* e_ident[] identification indexes
* See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
*/
#define EI_MAG0 0 /* file ID */
#define EI_MAG1 1 /* file ID */
#define EI_MAG2 2 /* file ID */
#define EI_MAG3 3 /* file ID */
#define EI_CLASS 4 /* file class */
#define EI_DATA 5 /* data encoding */
#define EI_VERSION 6 /* ELF header version */
#define EI_OSABI 7 /* OS/ABI ID */
#define EI_ABIVERSION 8 /* ABI version */
#define EI_PAD 9 /* start of pad bytes */
#define EI_NIDENT 16 /* Size of e_ident[] */
/* e_ident[] magic number */
#define ELFMAG0 0x7f /* e_ident[EI_MAG0] */
#define ELFMAG1 'E' /* e_ident[EI_MAG1] */
#define ELFMAG2 'L' /* e_ident[EI_MAG2] */
#define ELFMAG3 'F' /* e_ident[EI_MAG3] */
#define ELFMAG "\177ELF" /* magic */
#define SELFMAG 4 /* size of magic */
/* e_ident[] file class */
#define ELFCLASSNONE 0 /* invalid */
#define ELFCLASS32 1 /* 32-bit objs */
#define ELFCLASS64 2 /* 64-bit objs */
#define ELFCLASSNUM 3 /* number of classes */
/* e_ident[] data encoding */
#define ELFDATANONE 0 /* invalid */
#define ELFDATA2LSB 1 /* Little-Endian */
#define ELFDATA2MSB 2 /* Big-Endian */
#define ELFDATANUM 3 /* number of data encode defines */
/* e_ident[] Operating System/ABI */
#define ELFOSABI_SYSV 0 /* UNIX System V ABI */
#define ELFOSABI_HPUX 1 /* HP-UX operating system */
#define ELFOSABI_NETBSD 2 /* NetBSD */
#define ELFOSABI_LINUX 3 /* GNU/Linux */
#define ELFOSABI_HURD 4 /* GNU/Hurd */
#define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */
#define ELFOSABI_SOLARIS 6 /* Solaris */
#define ELFOSABI_MONTEREY 7 /* Monterey */
#define ELFOSABI_IRIX 8 /* IRIX */
#define ELFOSABI_FREEBSD 9 /* FreeBSD */
#define ELFOSABI_TRU64 10 /* TRU64 UNIX */
#define ELFOSABI_MODESTO 11 /* Novell Modesto */
#define ELFOSABI_OPENBSD 12 /* OpenBSD */
#define ELFOSABI_ARM 97 /* ARM */
#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
/* e_ident */
#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
(ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
(ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
(ehdr).e_ident[EI_MAG3] == ELFMAG3)
/* ELF Header */
typedef struct elfhdr {
unsigned char e_ident[EI_NIDENT]; /* ELF Identification */
Elf32_Half e_type; /* object file type */
Elf32_Half e_machine; /* machine */
Elf32_Word e_version; /* object file version */
Elf32_Addr e_entry; /* virtual entry point */
Elf32_Off e_phoff; /* program header table offset */
Elf32_Off e_shoff; /* section header table offset */
Elf32_Word e_flags; /* processor-specific flags */
Elf32_Half e_ehsize; /* ELF header size */
Elf32_Half e_phentsize; /* program header entry size */
Elf32_Half e_phnum; /* number of program header entries */
Elf32_Half e_shentsize; /* section header entry size */
Elf32_Half e_shnum; /* number of section header entries */
Elf32_Half e_shstrndx; /* section header table's "section
header string table" entry offset */
} Elf32_Ehdr;
typedef struct {
unsigned char e_ident[EI_NIDENT]; /* Id bytes */
Elf64_Quarter e_type; /* file type */
Elf64_Quarter e_machine; /* machine type */
Elf64_Half e_version; /* version number */
Elf64_Addr e_entry; /* entry point */
Elf64_Off e_phoff; /* Program hdr offset */
Elf64_Off e_shoff; /* Section hdr offset */
Elf64_Half e_flags; /* Processor flags */
Elf64_Quarter e_ehsize; /* sizeof ehdr */
Elf64_Quarter e_phentsize; /* Program header entry size */
Elf64_Quarter e_phnum; /* Number of program headers */
Elf64_Quarter e_shentsize; /* Section header entry size */
Elf64_Quarter e_shnum; /* Number of section headers */
Elf64_Quarter e_shstrndx; /* String table index */
} Elf64_Ehdr;
/* e_type */
#define ET_NONE 0 /* No file type */
#define ET_REL 1 /* relocatable file */
#define ET_EXEC 2 /* executable file */
#define ET_DYN 3 /* shared object file */
#define ET_CORE 4 /* core file */
#define ET_NUM 5 /* number of types */
#define ET_LOPROC 0xff00 /* reserved range for processor */
#define ET_HIPROC 0xffff /* specific e_type */
/* e_machine */
#define EM_NONE 0 /* No Machine */
#define EM_M32 1 /* AT&T WE 32100 */
#define EM_SPARC 2 /* SPARC */
#define EM_386 3 /* Intel 80386 */
#define EM_68K 4 /* Motorola 68000 */
#define EM_88K 5 /* Motorola 88000 */
#define EM_486 6 /* Intel 80486 - unused? */
#define EM_860 7 /* Intel 80860 */
#define EM_MIPS 8 /* MIPS R3000 Big-Endian only */
/*
* Don't know if EM_MIPS_RS4_BE,
* EM_SPARC64, EM_PARISC,
* or EM_PPC are ABI compliant
*/
#define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */
#define EM_SPARC64 11 /* SPARC v9 64-bit unoffical */
#define EM_PARISC 15 /* HPPA */
#define EM_SPARC32PLUS 18 /* Enhanced instruction set SPARC */
#define EM_PPC 20 /* PowerPC */
#define EM_PPC64 21 /* PowerPC 64-bit */
#define EM_ARM 40 /* Advanced RISC Machines ARM */
#define EM_ALPHA 41 /* DEC ALPHA */
#define EM_SPARCV9 43 /* SPARC version 9 */
#define EM_ALPHA_EXP 0x9026 /* DEC ALPHA */
#define EM_IA_64 50 /* Intel Merced */
#define EM_X86_64 62 /* AMD x86-64 architecture */
#define EM_VAX 75 /* DEC VAX */
/* Version */
#define EV_NONE 0 /* Invalid */
#define EV_CURRENT 1 /* Current */
#define EV_NUM 2 /* number of versions */
/* Section Header */
typedef struct {
Elf32_Word sh_name; /* name - index into section header
string table section */
Elf32_Word sh_type; /* type */
Elf32_Word sh_flags; /* flags */
Elf32_Addr sh_addr; /* address */
Elf32_Off sh_offset; /* file offset */
Elf32_Word sh_size; /* section size */
Elf32_Word sh_link; /* section header table index link */
Elf32_Word sh_info; /* extra information */
Elf32_Word sh_addralign; /* address alignment */
Elf32_Word sh_entsize; /* section entry size */
} Elf32_Shdr;
typedef struct {
Elf64_Half sh_name; /* section name */
Elf64_Half sh_type; /* section type */
Elf64_Xword sh_flags; /* section flags */
Elf64_Addr sh_addr; /* virtual address */
Elf64_Off sh_offset; /* file offset */
Elf64_Xword sh_size; /* section size */
Elf64_Half sh_link; /* link to another */
Elf64_Half sh_info; /* misc info */
Elf64_Xword sh_addralign; /* memory alignment */
Elf64_Xword sh_entsize; /* table entry size */
} Elf64_Shdr;
/* Special Section Indexes */
#define SHN_UNDEF 0 /* undefined */
#define SHN_LORESERVE 0xff00 /* lower bounds of reserved indexes */
#define SHN_LOPROC 0xff00 /* reserved range for processor */
#define SHN_HIPROC 0xff1f /* specific section indexes */
#define SHN_ABS 0xfff1 /* absolute value */
#define SHN_COMMON 0xfff2 /* common symbol */
#define SHN_HIRESERVE 0xffff /* upper bounds of reserved indexes */
/* sh_type */
#define SHT_NULL 0 /* inactive */
#define SHT_PROGBITS 1 /* program defined information */
#define SHT_SYMTAB 2 /* symbol table section */
#define SHT_STRTAB 3 /* string table section */
#define SHT_RELA 4 /* relocation section with addends*/
#define SHT_HASH 5 /* symbol hash table section */
#define SHT_DYNAMIC 6 /* dynamic section */
#define SHT_NOTE 7 /* note section */
#define SHT_NOBITS 8 /* no space section */
#define SHT_REL 9 /* relation section without addends */
#define SHT_SHLIB 10 /* reserved - purpose unknown */
#define SHT_DYNSYM 11 /* dynamic symbol table section */
#define SHT_NUM 12 /* number of section types */
#define SHT_LOPROC 0x70000000 /* reserved range for processor */
#define SHT_HIPROC 0x7fffffff /* specific section header types */
#define SHT_LOUSER 0x80000000 /* reserved range for application */
#define SHT_HIUSER 0xffffffff /* specific indexes */
/* Section names */
#define ELF_BSS ".bss" /* uninitialized data */
#define ELF_DATA ".data" /* initialized data */
#define ELF_DEBUG ".debug" /* debug */
#define ELF_DYNAMIC ".dynamic" /* dynamic linking information */
#define ELF_DYNSTR ".dynstr" /* dynamic string table */
#define ELF_DYNSYM ".dynsym" /* dynamic symbol table */
#define ELF_FINI ".fini" /* termination code */
#define ELF_GOT ".got" /* global offset table */
#define ELF_HASH ".hash" /* symbol hash table */
#define ELF_INIT ".init" /* initialization code */
#define ELF_REL_DATA ".rel.data" /* relocation data */
#define ELF_REL_FINI ".rel.fini" /* relocation termination code */
#define ELF_REL_INIT ".rel.init" /* relocation initialization code */
#define ELF_REL_DYN ".rel.dyn" /* relocaltion dynamic link info */
#define ELF_REL_RODATA ".rel.rodata" /* relocation read-only data */
#define ELF_REL_TEXT ".rel.text" /* relocation code */
#define ELF_RODATA ".rodata" /* read-only data */
#define ELF_SHSTRTAB ".shstrtab" /* section header string table */
#define ELF_STRTAB ".strtab" /* string table */
#define ELF_SYMTAB ".symtab" /* symbol table */
#define ELF_TEXT ".text" /* code */
/* Section Attribute Flags - sh_flags */
#define SHF_WRITE 0x1 /* Writable */
#define SHF_ALLOC 0x2 /* occupies memory */
#define SHF_EXECINSTR 0x4 /* executable */
#define SHF_MASKPROC 0xf0000000 /* reserved bits for processor */
/* specific section attributes */
/* Symbol Table Entry */
typedef struct elf32_sym {
Elf32_Word st_name; /* name - index into string table */
Elf32_Addr st_value; /* symbol value */
Elf32_Word st_size; /* symbol size */
unsigned char st_info; /* type and binding */
unsigned char st_other; /* 0 - no defined meaning */
Elf32_Half st_shndx; /* section header index */
} Elf32_Sym;
typedef struct {
Elf64_Half st_name; /* Symbol name index in str table */
Elf_Byte st_info; /* type / binding attrs */
Elf_Byte st_other; /* unused */
Elf64_Quarter st_shndx; /* section index of symbol */
Elf64_Xword st_value; /* value of symbol */
Elf64_Xword st_size; /* size of symbol */
} Elf64_Sym;
/* Symbol table index */
#define STN_UNDEF 0 /* undefined */
/* Extract symbol info - st_info */
#define ELF32_ST_BIND(x) ((x) >> 4)
#define ELF32_ST_TYPE(x) (((unsigned int) x) & 0xf)
#define ELF32_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
#define ELF64_ST_BIND(x) ((x) >> 4)
#define ELF64_ST_TYPE(x) (((unsigned int) x) & 0xf)
#define ELF64_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf))
/* Symbol Binding - ELF32_ST_BIND - st_info */
#define STB_LOCAL 0 /* Local symbol */
#define STB_GLOBAL 1 /* Global symbol */
#define STB_WEAK 2 /* like global - lower precedence */
#define STB_NUM 3 /* number of symbol bindings */
#define STB_LOPROC 13 /* reserved range for processor */
#define STB_HIPROC 15 /* specific symbol bindings */
/* Symbol type - ELF32_ST_TYPE - st_info */
#define STT_NOTYPE 0 /* not specified */
#define STT_OBJECT 1 /* data object */
#define STT_FUNC 2 /* function */
#define STT_SECTION 3 /* section */
#define STT_FILE 4 /* file */
#define STT_NUM 5 /* number of symbol types */
#define STT_LOPROC 13 /* reserved range for processor */
#define STT_HIPROC 15 /* specific symbol types */
/* Relocation entry with implicit addend */
typedef struct {
Elf32_Addr r_offset; /* offset of relocation */
Elf32_Word r_info; /* symbol table index and type */
} Elf32_Rel;
/* Relocation entry with explicit addend */
typedef struct {
Elf32_Addr r_offset; /* offset of relocation */
Elf32_Word r_info; /* symbol table index and type */
Elf32_Sword r_addend;
} Elf32_Rela;
/* Extract relocation info - r_info */
#define ELF32_R_SYM(i) ((i) >> 8)
#define ELF32_R_TYPE(i) ((unsigned char) (i))
#define ELF32_R_INFO(s,t) (((s) << 8) + (unsigned char)(t))
typedef struct {
Elf64_Xword r_offset; /* where to do it */
Elf64_Xword r_info; /* index & type of relocation */
} Elf64_Rel;
typedef struct {
Elf64_Xword r_offset; /* where to do it */
Elf64_Xword r_info; /* index & type of relocation */
Elf64_Sxword r_addend; /* adjustment value */
} Elf64_Rela;
#define ELF64_R_SYM(info) ((info) >> 32)
#define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF)
#define ELF64_R_INFO(s,t) (((s) << 32) + (u_int32_t)(t))
/* Program Header */
typedef struct {
Elf32_Word p_type; /* segment type */
Elf32_Off p_offset; /* segment offset */
Elf32_Addr p_vaddr; /* virtual address of segment */
Elf32_Addr p_paddr; /* physical address - ignored? */
Elf32_Word p_filesz; /* number of bytes in file for seg. */
Elf32_Word p_memsz; /* number of bytes in mem. for seg. */
Elf32_Word p_flags; /* flags */
Elf32_Word p_align; /* memory alignment */
} Elf32_Phdr;
typedef struct {
Elf64_Half p_type; /* entry type */
Elf64_Half p_flags; /* flags */
Elf64_Off p_offset; /* offset */
Elf64_Addr p_vaddr; /* virtual address */
Elf64_Addr p_paddr; /* physical address */
Elf64_Xword p_filesz; /* file size */
Elf64_Xword p_memsz; /* memory size */
Elf64_Xword p_align; /* memory & file alignment */
} Elf64_Phdr;
/* Segment types - p_type */
#define PT_NULL 0 /* unused */
#define PT_LOAD 1 /* loadable segment */
#define PT_DYNAMIC 2 /* dynamic linking section */
#define PT_INTERP 3 /* the RTLD */
#define PT_NOTE 4 /* auxiliary information */
#define PT_SHLIB 5 /* reserved - purpose undefined */
#define PT_PHDR 6 /* program header */
#define PT_NUM 7 /* Number of segment types */
#define PT_LOPROC 0x70000000 /* reserved range for processor */
#define PT_HIPROC 0x7fffffff /* specific segment types */
/* Segment flags - p_flags */
#define PF_X 0x1 /* Executable */
#define PF_W 0x2 /* Writable */
#define PF_R 0x4 /* Readable */
#define PF_MASKPROC 0xf0000000 /* reserved bits for processor */
/* specific segment flags */
/* Dynamic structure */
typedef struct {
Elf32_Sword d_tag; /* controls meaning of d_val */
union {
Elf32_Word d_val; /* Multiple meanings - see d_tag */
Elf32_Addr d_ptr; /* program virtual address */
} d_un;
} Elf32_Dyn;
typedef struct {
Elf64_Xword d_tag; /* controls meaning of d_val */
union {
Elf64_Addr d_ptr;
Elf64_Xword d_val;
} d_un;
} Elf64_Dyn;
/* Dynamic Array Tags - d_tag */
#define DT_NULL 0 /* marks end of _DYNAMIC array */
#define DT_NEEDED 1 /* string table offset of needed lib */
#define DT_PLTRELSZ 2 /* size of relocation entries in PLT */
#define DT_PLTGOT 3 /* address PLT/GOT */
#define DT_HASH 4 /* address of symbol hash table */
#define DT_STRTAB 5 /* address of string table */
#define DT_SYMTAB 6 /* address of symbol table */
#define DT_RELA 7 /* address of relocation table */
#define DT_RELASZ 8 /* size of relocation table */
#define DT_RELAENT 9 /* size of relocation entry */
#define DT_STRSZ 10 /* size of string table */
#define DT_SYMENT 11 /* size of symbol table entry */
#define DT_INIT 12 /* address of initialization func. */
#define DT_FINI 13 /* address of termination function */
#define DT_SONAME 14 /* string table offset of shared obj */
#define DT_RPATH 15 /* string table offset of library
search path */
#define DT_SYMBOLIC 16 /* start sym search in shared obj. */
#define DT_REL 17 /* address of rel. tbl. w addends */
#define DT_RELSZ 18 /* size of DT_REL relocation table */
#define DT_RELENT 19 /* size of DT_REL relocation entry */
#define DT_PLTREL 20 /* PLT referenced relocation entry */
#define DT_DEBUG 21 /* bugger */
#define DT_TEXTREL 22 /* Allow rel. mod. to unwritable seg */
#define DT_JMPREL 23 /* add. of PLT's relocation entries */
#define DT_BIND_NOW 24 /* Bind now regardless of env setting */
#define DT_NUM 25 /* Number used. */
#define DT_LOPROC 0x70000000 /* reserved range for processor */
#define DT_HIPROC 0x7fffffff /* specific dynamic array tags */
/* Standard ELF hashing function */
unsigned int elf_hash(const unsigned char *name);
/*
* Note Definitions
*/
typedef struct {
Elf32_Word namesz;
Elf32_Word descsz;
Elf32_Word type;
} Elf32_Note;
typedef struct {
Elf64_Half namesz;
Elf64_Half descsz;
Elf64_Half type;
} Elf64_Note;
#if defined(ELFSIZE)
#define CONCAT(x,y) __CONCAT(x,y)
#define ELFNAME(x) CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x)))
#define ELFNAME2(x,y) CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y))))
#define ELFNAMEEND(x) CONCAT(x,CONCAT(_elf,ELFSIZE))
#define ELFDEFNNAME(x) CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x)))
#endif
#if defined(ELFSIZE) && (ELFSIZE == 32)
#define Elf_Ehdr Elf32_Ehdr
#define Elf_Phdr Elf32_Phdr
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Rel Elf32_Rel
#define Elf_RelA Elf32_Rela
#define Elf_Dyn Elf32_Dyn
#define Elf_Word Elf32_Word
#define Elf_Sword Elf32_Sword
#define Elf_Addr Elf32_Addr
#define Elf_Off Elf32_Off
#define Elf_Nhdr Elf32_Nhdr
#define Elf_Note Elf32_Note
#define ELF_R_SYM ELF32_R_SYM
#define ELF_R_TYPE ELF32_R_TYPE
#define ELF_R_INFO ELF32_R_INFO
#define ELFCLASS ELFCLASS32
#define ELF_ST_BIND ELF32_ST_BIND
#define ELF_ST_TYPE ELF32_ST_TYPE
#define ELF_ST_INFO ELF32_ST_INFO
#define AuxInfo Aux32Info
#elif defined(ELFSIZE) && (ELFSIZE == 64)
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Phdr Elf64_Phdr
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Rel Elf64_Rel
#define Elf_RelA Elf64_Rela
#define Elf_Dyn Elf64_Dyn
#define Elf_Word Elf64_Word
#define Elf_Sword Elf64_Sword
#define Elf_Addr Elf64_Addr
#define Elf_Off Elf64_Off
#define Elf_Nhdr Elf64_Nhdr
#define Elf_Note Elf64_Note
#define ELF_R_SYM ELF64_R_SYM
#define ELF_R_TYPE ELF64_R_TYPE
#define ELF_R_INFO ELF64_R_INFO
#define ELFCLASS ELFCLASS64
#define ELF_ST_BIND ELF64_ST_BIND
#define ELF_ST_TYPE ELF64_ST_TYPE
#define ELF_ST_INFO ELF64_ST_INFO
#define AuxInfo Aux64Info
#endif
#endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */

95
sys/xen/interface/errno.h Normal file
View File

@ -0,0 +1,95 @@
#ifndef __XEN_PUBLIC_ERRNO_H__
#ifndef __ASSEMBLY__
#define XEN_ERRNO(name, value) XEN_##name = value,
enum xen_errno {
#else /* !__ASSEMBLY__ */
#define XEN_ERRNO(name, value) .equ XEN_##name, value
#endif /* __ASSEMBLY__ */
/* ` enum neg_errnoval { [ -Efoo for each Efoo in the list below ] } */
/* ` enum errnoval { */
#endif /* __XEN_PUBLIC_ERRNO_H__ */
#ifdef XEN_ERRNO
/*
* Values originating from x86 Linux. Please consider using respective
* values when adding new definitions here.
*
* The set of identifiers to be added here shouldn't extend beyond what
* POSIX mandates (see e.g.
* http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html)
* with the exception that we support some optional (XSR) values
* specified there (but no new ones should be added).
*/
XEN_ERRNO(EPERM, 1) /* Operation not permitted */
XEN_ERRNO(ENOENT, 2) /* No such file or directory */
XEN_ERRNO(ESRCH, 3) /* No such process */
#ifdef __XEN__ /* Internal only, should never be exposed to the guest. */
XEN_ERRNO(EINTR, 4) /* Interrupted system call */
#endif
XEN_ERRNO(EIO, 5) /* I/O error */
XEN_ERRNO(ENXIO, 6) /* No such device or address */
XEN_ERRNO(E2BIG, 7) /* Arg list too long */
XEN_ERRNO(ENOEXEC, 8) /* Exec format error */
XEN_ERRNO(EBADF, 9) /* Bad file number */
XEN_ERRNO(ECHILD, 10) /* No child processes */
XEN_ERRNO(EAGAIN, 11) /* Try again */
XEN_ERRNO(ENOMEM, 12) /* Out of memory */
XEN_ERRNO(EACCES, 13) /* Permission denied */
XEN_ERRNO(EFAULT, 14) /* Bad address */
XEN_ERRNO(EBUSY, 16) /* Device or resource busy */
XEN_ERRNO(EEXIST, 17) /* File exists */
XEN_ERRNO(EXDEV, 18) /* Cross-device link */
XEN_ERRNO(ENODEV, 19) /* No such device */
XEN_ERRNO(EINVAL, 22) /* Invalid argument */
XEN_ERRNO(ENFILE, 23) /* File table overflow */
XEN_ERRNO(EMFILE, 24) /* Too many open files */
XEN_ERRNO(ENOSPC, 28) /* No space left on device */
XEN_ERRNO(EMLINK, 31) /* Too many links */
XEN_ERRNO(EDOM, 33) /* Math argument out of domain of func */
XEN_ERRNO(ERANGE, 34) /* Math result not representable */
XEN_ERRNO(EDEADLK, 35) /* Resource deadlock would occur */
XEN_ERRNO(ENAMETOOLONG, 36) /* File name too long */
XEN_ERRNO(ENOLCK, 37) /* No record locks available */
XEN_ERRNO(ENOSYS, 38) /* Function not implemented */
XEN_ERRNO(ENODATA, 61) /* No data available */
XEN_ERRNO(ETIME, 62) /* Timer expired */
XEN_ERRNO(EBADMSG, 74) /* Not a data message */
XEN_ERRNO(EOVERFLOW, 75) /* Value too large for defined data type */
XEN_ERRNO(EILSEQ, 84) /* Illegal byte sequence */
#ifdef __XEN__ /* Internal only, should never be exposed to the guest. */
XEN_ERRNO(ERESTART, 85) /* Interrupted system call should be restarted */
#endif
XEN_ERRNO(ENOTSOCK, 88) /* Socket operation on non-socket */
XEN_ERRNO(EOPNOTSUPP, 95) /* Operation not supported on transport endpoint */
XEN_ERRNO(EADDRINUSE, 98) /* Address already in use */
XEN_ERRNO(EADDRNOTAVAIL, 99) /* Cannot assign requested address */
XEN_ERRNO(ENOBUFS, 105) /* No buffer space available */
XEN_ERRNO(EISCONN, 106) /* Transport endpoint is already connected */
XEN_ERRNO(ENOTCONN, 107) /* Transport endpoint is not connected */
XEN_ERRNO(ETIMEDOUT, 110) /* Connection timed out */
#undef XEN_ERRNO
#endif /* XEN_ERRNO */
#ifndef __XEN_PUBLIC_ERRNO_H__
#define __XEN_PUBLIC_ERRNO_H__
/* ` } */
#ifndef __ASSEMBLY__
};
#endif
#define XEN_EWOULDBLOCK XEN_EAGAIN /* Operation would block */
#define XEN_EDEADLOCK XEN_EDEADLK /* Resource deadlock would occur */
#endif /* __XEN_PUBLIC_ERRNO_H__ */

View File

@ -71,13 +71,13 @@
#define EVTCHNOP_bind_vcpu 8
#define EVTCHNOP_unmask 9
#define EVTCHNOP_reset 10
#define EVTCHNOP_init_control 11
#define EVTCHNOP_expand_array 12
#define EVTCHNOP_set_priority 13
/* ` } */
#ifndef __XEN_EVTCHN_PORT_DEFINED__
typedef uint32_t evtchn_port_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
#define __XEN_EVTCHN_PORT_DEFINED__ 1
#endif
/*
* EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
@ -101,6 +101,17 @@ typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
* a port that is unbound and marked as accepting bindings from the calling
* domain. A fresh port is allocated in the calling domain and returned as
* <local_port>.
*
* In case the peer domain has already tried to set our event channel
* pending, before it was bound, EVTCHNOP_bind_interdomain always sets
* the local event channel pending.
*
* The usual pattern of use, in the guest's upcall (or subsequent
* handler) is as follows: (Re-enable the event channel for subsequent
* signalling and then) check for the existence of whatever condition
* is being waited for by other means, and take whatever action is
* needed (if any).
*
* NOTES:
* 1. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
@ -253,6 +264,10 @@ typedef struct evtchn_unmask evtchn_unmask_t;
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
* 3. Destroys all control blocks and event array, resets event channel
* operations to 2-level ABI if called with <dom> == DOMID_SELF and FIFO
* ABI was used. Guests should not bind events during EVTCHNOP_reset call
* as these events are likely to be lost.
*/
struct evtchn_reset {
/* IN parameters. */
@ -260,6 +275,43 @@ struct evtchn_reset {
};
typedef struct evtchn_reset evtchn_reset_t;
/*
* EVTCHNOP_init_control: initialize the control block for the FIFO ABI.
*
* Note: any events that are currently pending will not be resent and
* will be lost. Guests should call this before binding any event to
* avoid losing any events.
*/
struct evtchn_init_control {
/* IN parameters. */
uint64_t control_gfn;
uint32_t offset;
uint32_t vcpu;
/* OUT parameters. */
uint8_t link_bits;
uint8_t _pad[7];
};
typedef struct evtchn_init_control evtchn_init_control_t;
/*
* EVTCHNOP_expand_array: add an additional page to the event array.
*/
struct evtchn_expand_array {
/* IN parameters. */
uint64_t array_gfn;
};
typedef struct evtchn_expand_array evtchn_expand_array_t;
/*
* EVTCHNOP_set_priority: set the priority for an event channel.
*/
struct evtchn_set_priority {
/* IN parameters. */
uint32_t port;
uint32_t priority;
};
typedef struct evtchn_set_priority evtchn_set_priority_t;
/*
* ` enum neg_errnoval
* ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op)
@ -284,12 +336,48 @@ struct evtchn_op {
typedef struct evtchn_op evtchn_op_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
/*
* 2-level ABI
*/
#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
/*
* FIFO ABI
*/
/* Events may have priorities from 0 (highest) to 15 (lowest). */
#define EVTCHN_FIFO_PRIORITY_MAX 0
#define EVTCHN_FIFO_PRIORITY_DEFAULT 7
#define EVTCHN_FIFO_PRIORITY_MIN 15
#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1)
typedef uint32_t event_word_t;
#define EVTCHN_FIFO_PENDING 31
#define EVTCHN_FIFO_MASKED 30
#define EVTCHN_FIFO_LINKED 29
#define EVTCHN_FIFO_BUSY 28
#define EVTCHN_FIFO_LINK_BITS 17
#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1)
#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS)
struct evtchn_fifo_control_block {
uint32_t ready;
uint32_t _rsvd;
uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
};
typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t;
#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -27,6 +27,20 @@
#ifndef __XEN_PUBLIC_FEATURES_H__
#define __XEN_PUBLIC_FEATURES_H__
/*
* `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES
*
* The list of all the features the guest supports. They are set by
* parsing the XEN_ELFNOTE_FEATURES and XEN_ELFNOTE_SUPPORTED_FEATURES
* string. The format is the feature names (as given here without the
* "XENFEAT_" prefix) separated by '|' characters.
* If a feature is required for the kernel to function then the feature name
* must be preceded by a '!' character.
*
* Note that if XEN_ELFNOTE_SUPPORTED_FEATURES is used, then in the
* XENFEAT_dom0 MUST be set if the guest is to be booted as dom0,
*/
/*
* If set, the guest does not need to write-protect its pagetables, and can
* update them via direct writes.
@ -80,6 +94,14 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
/* Xen also maps grant references at pfn = mfn.
* This feature flag is deprecated and should not be used.
#define XENFEAT_grant_map_identity 12
*/
/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */
#define XENFEAT_memory_op_vnode_supported 13
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
@ -87,7 +109,7 @@
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -1,37 +0,0 @@
XEN_ROOT=../../../..
include $(XEN_ROOT)/Config.mk
architectures := x86_32 x86_64 ia64
headers := $(patsubst %, %.h, $(architectures))
scripts := $(wildcard *.py)
.PHONY: all clean check-headers
all: $(headers) check-headers
clean:
rm -f $(headers)
rm -f checker checker.c $(XEN_TARGET_ARCH).size
rm -f *.pyc *.o *~
ifeq ($(CROSS_COMPILE)$(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH))
check-headers: checker
./checker > $(XEN_TARGET_ARCH).size
diff -u reference.size $(XEN_TARGET_ARCH).size
checker: checker.c $(headers)
$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
else
check-headers:
@echo "cross build: skipping check"
endif
x86_32.h: ../arch-x86/xen-x86_32.h ../arch-x86/xen.h ../xen.h $(scripts)
python mkheader.py $* $@ $(filter %.h,$^)
x86_64.h: ../arch-x86/xen-x86_64.h ../arch-x86/xen.h ../xen.h $(scripts)
python mkheader.py $* $@ $(filter %.h,$^)
ia64.h: ../arch-ia64.h ../xen.h $(scripts)
python mkheader.py $* $@ $(filter %.h,$^)
checker.c: $(scripts)
python mkchecker.py $(XEN_TARGET_ARCH) $@ $(architectures)

View File

@ -1,58 +0,0 @@
#!/usr/bin/python
import sys;
from structs import structs;
# command line arguments
arch = sys.argv[1];
outfile = sys.argv[2];
archs = sys.argv[3:];
f = open(outfile, "w");
f.write('''
/*
* sanity checks for generated foreign headers:
* - verify struct sizes
*
* generated by %s -- DO NOT EDIT
*/
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <inttypes.h>
#include "../xen.h"
''');
for a in archs:
f.write('#include "%s.h"\n' % a);
f.write('int main(int argc, char *argv[])\n{\n');
f.write('\tprintf("\\n");');
f.write('printf("%-25s |", "structs");\n');
for a in archs:
f.write('\tprintf("%%8s", "%s");\n' % a);
f.write('\tprintf("\\n");');
f.write('\tprintf("\\n");');
for struct in structs:
f.write('\tprintf("%%-25s |", "%s");\n' % struct);
for a in archs:
if a == arch:
s = struct; # native
else:
s = struct + "_" + a;
f.write('#ifdef %s_has_no_%s\n' % (a, struct));
f.write('\tprintf("%8s", "-");\n');
f.write("#else\n");
f.write('\tprintf("%%8zd", sizeof(struct %s));\n' % s);
f.write("#endif\n");
f.write('\tprintf("\\n");\n\n');
f.write('\tprintf("\\n");\n');
f.write('\texit(0);\n');
f.write('}\n');
f.close();

View File

@ -1,167 +0,0 @@
#!/usr/bin/python
import sys, re;
from structs import unions, structs, defines;
# command line arguments
arch = sys.argv[1];
outfile = sys.argv[2];
infiles = sys.argv[3:];
###########################################################################
# configuration #2: architecture information
inttypes = {};
header = {};
footer = {};
# x86_32
inttypes["x86_32"] = {
"unsigned long" : "uint32_t",
"long" : "uint32_t",
"xen_pfn_t" : "uint32_t",
};
header["x86_32"] = """
#define __i386___X86_32 1
#pragma pack(4)
""";
footer["x86_32"] = """
#pragma pack()
""";
# x86_64
inttypes["x86_64"] = {
"unsigned long" : "__align8__ uint64_t",
"long" : "__align8__ uint64_t",
"xen_pfn_t" : "__align8__ uint64_t",
};
header["x86_64"] = """
#ifdef __GNUC__
# define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
# define __align8__ __attribute__((aligned (8)))
#else
# define __DECL_REG(name) uint64_t r ## name
# define __align8__ FIXME
#endif
#define __x86_64___X86_64 1
""";
# ia64
inttypes["ia64"] = {
"unsigned long" : "__align8__ uint64_t",
"long" : "__align8__ uint64_t",
"xen_pfn_t" : "__align8__ uint64_t",
"long double" : "__align16__ ldouble_t",
};
header["ia64"] = """
#define __align8__ __attribute__((aligned (8)))
#define __align16__ __attribute__((aligned (16)))
typedef unsigned char ldouble_t[16];
""";
###########################################################################
# main
input = "";
output = "";
fileid = re.sub("[-.]", "_", "__FOREIGN_%s__" % outfile.upper());
# read input header files
for name in infiles:
f = open(name, "r");
input += f.read();
f.close();
# add header
output += """
/*
* public xen defines and struct for %s
* generated by %s -- DO NOT EDIT
*/
#ifndef %s
#define %s 1
""" % (arch, sys.argv[0], fileid, fileid)
if arch in header:
output += header[arch];
output += "\n";
# add defines to output
for line in re.findall("#define[^\n]+", input):
for define in defines:
regex = "#define\s+%s\\b" % define;
match = re.search(regex, line);
if None == match:
continue;
if define.upper()[0] == define[0]:
replace = define + "_" + arch.upper();
else:
replace = define + "_" + arch;
regex = "\\b%s\\b" % define;
output += re.sub(regex, replace, line) + "\n";
output += "\n";
# delete defines, comments, empty lines
input = re.sub("#define[^\n]+\n", "", input);
input = re.compile("/\*(.*?)\*/", re.S).sub("", input)
input = re.compile("\n\s*\n", re.S).sub("\n", input);
# add unions to output
for union in unions:
regex = "union\s+%s\s*\{(.*?)\n\};" % union;
match = re.search(regex, input, re.S)
if None == match:
output += "#define %s_has_no_%s 1\n" % (arch, union);
else:
output += "union %s_%s {%s\n};\n" % (union, arch, match.group(1));
output += "\n";
# add structs to output
for struct in structs:
regex = "struct\s+%s\s*\{(.*?)\n\};" % struct;
match = re.search(regex, input, re.S)
if None == match:
output += "#define %s_has_no_%s 1\n" % (arch, struct);
else:
output += "struct %s_%s {%s\n};\n" % (struct, arch, match.group(1));
output += "typedef struct %s_%s %s_%s_t;\n" % (struct, arch, struct, arch);
output += "\n";
# add footer
if arch in footer:
output += footer[arch];
output += "\n";
output += "#endif /* %s */\n" % fileid;
# replace: defines
for define in defines:
if define.upper()[0] == define[0]:
replace = define + "_" + arch.upper();
else:
replace = define + "_" + arch;
output = re.sub("\\b%s\\b" % define, replace, output);
# replace: unions
for union in unions:
output = re.sub("\\b(union\s+%s)\\b" % union, "\\1_%s" % arch, output);
# replace: structs + struct typedefs
for struct in structs:
output = re.sub("\\b(struct\s+%s)\\b" % struct, "\\1_%s" % arch, output);
output = re.sub("\\b(%s)_t\\b" % struct, "\\1_%s_t" % arch, output);
# replace: integer types
integers = list(inttypes[arch].keys());
integers.sort(lambda a, b: cmp(len(b),len(a)));
for type in integers:
output = re.sub("\\b%s\\b" % type, inttypes[arch][type], output);
# print results
f = open(outfile, "w");
f.write(output);
f.close;

View File

@ -1,17 +0,0 @@
structs | x86_32 x86_64 ia64
start_info | 1104 1152 1152
trap_info | 8 16 -
pt_fpreg | - - 16
cpu_user_regs | 68 200 496
xen_ia64_boot_param | - - 96
ia64_tr_entry | - - 32
vcpu_extra_regs | - - 536
vcpu_guest_context | 2800 5168 1056
arch_vcpu_info | 24 16 0
vcpu_time_info | 32 32 32
vcpu_info | 64 64 48
arch_shared_info | 268 280 272
shared_info | 2584 3368 4384

View File

@ -1,58 +0,0 @@
# configuration: what needs translation
unions = [ "vcpu_cr_regs",
"vcpu_ar_regs" ];
structs = [ "start_info",
"trap_info",
"pt_fpreg",
"cpu_user_regs",
"xen_ia64_boot_param",
"ia64_tr_entry",
"vcpu_tr_regs",
"vcpu_guest_context_regs",
"vcpu_guest_context",
"arch_vcpu_info",
"vcpu_time_info",
"vcpu_info",
"arch_shared_info",
"shared_info" ];
defines = [ "__i386__",
"__x86_64__",
"FLAT_RING1_CS",
"FLAT_RING1_DS",
"FLAT_RING1_SS",
"FLAT_RING3_CS64",
"FLAT_RING3_DS64",
"FLAT_RING3_SS64",
"FLAT_KERNEL_CS64",
"FLAT_KERNEL_DS64",
"FLAT_KERNEL_SS64",
"FLAT_KERNEL_CS",
"FLAT_KERNEL_DS",
"FLAT_KERNEL_SS",
# x86_{32,64}
"_VGCF_i387_valid",
"VGCF_i387_valid",
"_VGCF_in_kernel",
"VGCF_in_kernel",
"_VGCF_failsafe_disables_events",
"VGCF_failsafe_disables_events",
"_VGCF_syscall_disables_events",
"VGCF_syscall_disables_events",
"_VGCF_online",
"VGCF_online",
# ia64
"VGCF_EXTRA_REGS",
# all archs
"xen_pfn_to_cr3",
"XEN_LEGACY_MAX_VCPUS",
"MAX_GUEST_CMDLINE" ];

115
sys/xen/interface/gcov.h Normal file
View File

@ -0,0 +1,115 @@
/******************************************************************************
* gcov.h
*
* Coverage structures exported by Xen.
* Structure is different from Gcc one.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2013, Citrix Systems R&D Ltd.
*/
#ifndef __XEN_PUBLIC_GCOV_H__
#define __XEN_PUBLIC_GCOV_H__ __XEN_PUBLIC_GCOV_H__
#define XENCOV_COUNTERS 5
#define XENCOV_TAG_BASE 0x58544300u
#define XENCOV_TAG_FILE (XENCOV_TAG_BASE+0x46u)
#define XENCOV_TAG_FUNC (XENCOV_TAG_BASE+0x66u)
#define XENCOV_TAG_COUNTER(n) (XENCOV_TAG_BASE+0x30u+((n)&0xfu))
#define XENCOV_TAG_END (XENCOV_TAG_BASE+0x2eu)
#define XENCOV_IS_TAG_COUNTER(n) \
((n) >= XENCOV_TAG_COUNTER(0) && (n) < XENCOV_TAG_COUNTER(XENCOV_COUNTERS))
#define XENCOV_COUNTER_NUM(n) ((n)-XENCOV_TAG_COUNTER(0))
/*
* The main structure for the blob is
* BLOB := FILE.. END
* FILE := TAG_FILE VERSION STAMP FILENAME COUNTERS FUNCTIONS
* FILENAME := LEN characters
* characters are padded to 32 bit
* LEN := 32 bit value
* COUNTERS := TAG_COUNTER(n) NUM COUNTER..
* NUM := 32 bit valie
* COUNTER := 64 bit value
* FUNCTIONS := TAG_FUNC NUM FUNCTION..
* FUNCTION := IDENT CHECKSUM NUM_COUNTERS
*
* All tagged structures are aligned to 8 bytes
*/
/**
* File information
* Prefixed with XENCOV_TAG_FILE and a string with filename
* Aligned to 8 bytes
*/
struct xencov_file
{
uint32_t tag; /* XENCOV_TAG_FILE */
uint32_t version;
uint32_t stamp;
uint32_t fn_len;
char filename[1];
};
/**
* Counters information
* Prefixed with XENCOV_TAG_COUNTER(n) where n is 0..(XENCOV_COUNTERS-1)
* Aligned to 8 bytes
*/
struct xencov_counter
{
uint32_t tag; /* XENCOV_TAG_COUNTER(n) */
uint32_t num;
uint64_t values[1];
};
/**
* Information for each function
* Number of counter is equal to the number of counter structures got before
*/
struct xencov_function
{
uint32_t ident;
uint32_t checksum;
uint32_t num_counters[1];
};
/**
* Information for all functions
* Aligned to 8 bytes
*/
struct xencov_functions
{
uint32_t tag; /* XENCOV_TAG_FUNC */
uint32_t num;
struct xencov_function xencov_function[1];
};
/**
* Terminator
*/
struct xencov_end
{
uint32_t tag; /* XENCOV_TAG_END */
};
#endif /* __XEN_PUBLIC_GCOV_H__ */

View File

@ -134,8 +134,10 @@ struct grant_entry_v1 {
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
* GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
* GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
* GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
* GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
* GTF_transfer_completed: MFN whose ownership transferred by @domid
* (non-translated guests only). [XEN]
*/
uint32_t frame;
};
@ -309,6 +311,7 @@ typedef uint16_t grant_status_t;
#define GNTTABOP_get_status_frames 9
#define GNTTABOP_get_version 10
#define GNTTABOP_swap_grant_ref 11
#define GNTTABOP_cache_flush 12
#endif /* __XEN_INTERFACE_VERSION__ */
/* ` } */
@ -320,7 +323,7 @@ typedef uint32_t grant_handle_t;
/*
* GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
* by devices and/or host CPUs. If successful, <handle> is a tracking number
* that must be presented later to destroy the mapping(s). On error, <handle>
* that must be presented later to destroy the mapping(s). On error, <status>
* is a negative status code.
* NOTES:
* 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
@ -385,7 +388,11 @@ struct gnttab_setup_table {
uint32_t nr_frames;
/* OUT parameters. */
int16_t status; /* => enum grant_status */
#if __XEN_INTERFACE_VERSION__ < 0x00040300
XEN_GUEST_HANDLE(ulong) frame_list;
#else
XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
#endif
};
typedef struct gnttab_setup_table gnttab_setup_table_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
@ -445,12 +452,10 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
#define _GNTCOPY_dest_gref (1)
#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
#define _GNTCOPY_can_fail (2)
#define GNTCOPY_can_fail (1<<_GNTCOPY_can_fail)
struct gnttab_copy {
/* IN parameters. */
struct {
struct gnttab_copy_ptr {
union {
grant_ref_t ref;
xen_pfn_t gmfn;
@ -572,6 +577,25 @@ struct gnttab_swap_grant_ref {
typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
/*
* Issue one or more cache maintenance operations on a portion of a
* page granted to the calling domain by a foreign domain.
*/
struct gnttab_cache_flush {
union {
uint64_t dev_bus_addr;
grant_ref_t ref;
} a;
uint16_t offset; /* offset from start of grant */
uint16_t length; /* size within the grant */
#define GNTTAB_CACHE_CLEAN (1<<0)
#define GNTTAB_CACHE_INVAL (1<<1)
#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
uint32_t op;
};
typedef struct gnttab_cache_flush gnttab_cache_flush_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_cache_flush_t);
#endif /* __XEN_INTERFACE_VERSION__ */
/*
@ -652,7 +676,7 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -1,4 +1,3 @@
/*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
@ -17,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2006, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_E820_H__

View File

@ -20,6 +20,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2006, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__

View File

@ -16,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2007, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
@ -23,6 +25,7 @@
#include "../xen.h"
#include "../trace.h"
#include "../event_channel.h"
/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
#define HVMOP_set_param 0
@ -80,6 +83,7 @@ typedef enum {
HVMMEM_ram_rw, /* Normal read/write guest RAM */
HVMMEM_ram_ro, /* Read-only; writes are discarded */
HVMMEM_mmio_dm, /* Reads and write go to the device model */
HVMMEM_mmio_write_dm /* Read-only; writes go to the device model */
} hvmmem_type_t;
/* Following tools-only interfaces may change in future. */
@ -90,10 +94,10 @@ typedef enum {
struct xen_hvm_track_dirty_vram {
/* Domain to be tracked. */
domid_t domid;
/* Number of pages to track. */
uint32_t nr;
/* First pfn to track. */
uint64_aligned_t first_pfn;
/* Number of pages to track. */
uint64_aligned_t nr;
/* OUT variable. */
/* Dirty bitmap buffer. */
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
@ -106,10 +110,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
struct xen_hvm_modified_memory {
/* Domain to be updated. */
domid_t domid;
/* Number of pages. */
uint32_t nr;
/* First pfn. */
uint64_aligned_t first_pfn;
/* Number of pages. */
uint64_aligned_t nr;
};
typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
@ -162,49 +166,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Deprecated by XENMEM_access_op_set_access */
#define HVMOP_set_mem_access 12
typedef enum {
HVMMEM_access_n,
HVMMEM_access_r,
HVMMEM_access_w,
HVMMEM_access_rw,
HVMMEM_access_x,
HVMMEM_access_rx,
HVMMEM_access_wx,
HVMMEM_access_rwx,
HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically
* change to r-w on a write */
HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically
* goes to rwx, generating an event without
* pausing the vcpu */
HVMMEM_access_default /* Take the domain default */
} hvmmem_access_t;
/* Notify that a region of memory is to have specific access types */
struct xen_hvm_set_mem_access {
/* Domain to be updated. */
domid_t domid;
/* Memory type */
uint16_t hvmmem_access; /* hvm_access_t */
/* Number of pages, ignored on setting default access */
uint32_t nr;
/* First pfn, or ~0ull to set the default access for new pages */
uint64_aligned_t first_pfn;
};
typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
/* Deprecated by XENMEM_access_op_get_access */
#define HVMOP_get_mem_access 13
/* Get the specific access type for that region of memory */
struct xen_hvm_get_mem_access {
/* Domain to be queried. */
domid_t domid;
/* Memory type: OUT */
uint16_t hvmmem_access; /* hvm_access_t */
/* pfn, or ~0ull for default access for new pages. IN */
uint64_aligned_t pfn;
};
typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
#define HVMOP_inject_trap 14
/* Inject a trap into a VCPU, which will get taken up on the next
@ -270,6 +236,267 @@ struct xen_hvm_inject_msi {
typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
/*
* IOREQ Servers
*
* The interface between an I/O emulator an Xen is called an IOREQ Server.
* A domain supports a single 'legacy' IOREQ Server which is instantiated if
* parameter...
*
* HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
* ioreq structures), or...
* HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
* ioreq ring), or...
* HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
* to request buffered I/O emulation).
*
* The following hypercalls facilitate the creation of IOREQ Servers for
* 'secondary' emulators which are invoked to implement port I/O, memory, or
* PCI config space ranges which they explicitly register.
*/
typedef uint16_t ioservid_t;
/*
* HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
* emulator servicing domain <domid>.
*
* The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
* the buffered ioreq ring will not be allocated and hence all emulation
* requestes to this server will be synchronous.
*/
#define HVMOP_create_ioreq_server 17
struct xen_hvm_create_ioreq_server {
domid_t domid; /* IN - domain to be serviced */
#define HVM_IOREQSRV_BUFIOREQ_OFF 0
#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
/*
* Use this when read_pointer gets updated atomically and
* the pointer pair gets read atomically:
*/
#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
ioservid_t id; /* OUT - server id */
};
typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
/*
* HVMOP_get_ioreq_server_info: Get all the information necessary to access
* IOREQ Server <id>.
*
* The emulator needs to map the synchronous ioreq structures and buffered
* ioreq ring (if it exists) that Xen uses to request emulation. These are
* hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
* respectively. In addition, if the IOREQ Server is handling buffered
* emulation requests, the emulator needs to bind to event channel
* <bufioreq_port> to listen for them. (The event channels used for
* synchronous emulation requests are specified in the per-CPU ioreq
* structures in <ioreq_pfn>).
* If the IOREQ Server is not handling buffered emulation requests then the
* values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
*/
#define HVMOP_get_ioreq_server_info 18
struct xen_hvm_get_ioreq_server_info {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
evtchn_port_t bufioreq_port; /* OUT - buffered ioreq port */
uint64_aligned_t ioreq_pfn; /* OUT - sync ioreq pfn */
uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
};
typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
/*
* HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
* for emulation by the client of IOREQ
* Server <id>
* HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
* for emulation by the client of IOREQ
* Server <id>
*
* There are three types of I/O that can be emulated: port I/O, memory accesses
* and PCI config space accesses. The <type> field denotes which type of range
* the <start> and <end> (inclusive) fields are specifying.
* PCI config space ranges are specified by segment/bus/device/function values
* which should be encoded using the HVMOP_PCI_SBDF helper macro below.
*
* NOTE: unless an emulation request falls entirely within a range mapped
* by a secondary emulator, it will not be passed to that emulator.
*/
#define HVMOP_map_io_range_to_ioreq_server 19
#define HVMOP_unmap_io_range_from_ioreq_server 20
struct xen_hvm_io_range {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
uint32_t type; /* IN - type of range */
# define HVMOP_IO_RANGE_PORT 0 /* I/O port range */
# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
# define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
uint64_aligned_t start, end; /* IN - inclusive start and end of range */
};
typedef struct xen_hvm_io_range xen_hvm_io_range_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
#define HVMOP_PCI_SBDF(s,b,d,f) \
((((s) & 0xffff) << 16) | \
(((b) & 0xff) << 8) | \
(((d) & 0x1f) << 3) | \
((f) & 0x07))
/*
* HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
* <domid>.
*
* Any registered I/O ranges will be automatically deregistered.
*/
#define HVMOP_destroy_ioreq_server 21
struct xen_hvm_destroy_ioreq_server {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
};
typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
/*
* HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
* domain <domid>.
*
* The IOREQ Server will not be passed any emulation requests until it is in the
* enabled state.
* Note that the contents of the ioreq_pfn and bufioreq_fn (see
* HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
* the enabled state.
*/
#define HVMOP_set_ioreq_server_state 22
struct xen_hvm_set_ioreq_server_state {
domid_t domid; /* IN - domain to be serviced */
ioservid_t id; /* IN - server id */
uint8_t enabled; /* IN - enabled? */
};
typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#if defined(__i386__) || defined(__x86_64__)
/*
* HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
* channel upcalls on the specified <vcpu>. If set,
* this vector will be used in preference to the
* domain global callback via (see
* HVM_PARAM_CALLBACK_IRQ).
*/
#define HVMOP_set_evtchn_upcall_vector 23
struct xen_hvm_evtchn_upcall_vector {
uint32_t vcpu;
uint8_t vector;
};
typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
#endif /* defined(__i386__) || defined(__x86_64__) */
#define HVMOP_guest_request_vm_event 24
/* HVMOP_altp2m: perform altp2m state operations */
#define HVMOP_altp2m 25
#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001
struct xen_hvm_altp2m_domain_state {
/* IN or OUT variable on/off */
uint8_t state;
};
typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
struct xen_hvm_altp2m_vcpu_enable_notify {
uint32_t vcpu_id;
uint32_t pad;
/* #VE info area gfn */
uint64_t gfn;
};
typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
struct xen_hvm_altp2m_view {
/* IN/OUT variable */
uint16_t view;
/* Create view only: default access type
* NOTE: currently ignored */
uint16_t hvmmem_default_access; /* xenmem_access_t */
};
typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
struct xen_hvm_altp2m_set_mem_access {
/* view */
uint16_t view;
/* Memory type */
uint16_t hvmmem_access; /* xenmem_access_t */
uint32_t pad;
/* gfn */
uint64_t gfn;
};
typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
struct xen_hvm_altp2m_change_gfn {
/* view */
uint16_t view;
uint16_t pad1;
uint32_t pad2;
/* old gfn */
uint64_t old_gfn;
/* new gfn, INVALID_GFN (~0UL) means revert */
uint64_t new_gfn;
};
typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
struct xen_hvm_altp2m_op {
uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */
uint32_t cmd;
/* Get/set the altp2m state for a domain */
#define HVMOP_altp2m_get_domain_state 1
#define HVMOP_altp2m_set_domain_state 2
/* Set the current VCPU to receive altp2m event notifications */
#define HVMOP_altp2m_vcpu_enable_notify 3
/* Create a new view */
#define HVMOP_altp2m_create_p2m 4
/* Destroy a view */
#define HVMOP_altp2m_destroy_p2m 5
/* Switch view for an entire domain */
#define HVMOP_altp2m_switch_p2m 6
/* Notify that a page of memory is to have specific access types */
#define HVMOP_altp2m_set_mem_access 7
/* Change a p2m entry to have a different gfn->mfn mapping */
#define HVMOP_altp2m_change_gfn 8
domid_t domain;
uint16_t pad1;
uint32_t pad2;
union {
struct xen_hvm_altp2m_domain_state domain_state;
struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
struct xen_hvm_altp2m_view view;
struct xen_hvm_altp2m_set_mem_access set_mem_access;
struct xen_hvm_altp2m_change_gfn change_gfn;
uint8_t pad[64];
} u;
};
typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t);
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -0,0 +1,82 @@
/******************************************************************************
* hvm/hvm_xs_strings.h
*
* HVM xenstore strings used in HVMLOADER.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2013, Citrix Systems
*/
#ifndef __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__
#define __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__
#define HVM_XS_HVMLOADER "hvmloader"
#define HVM_XS_BIOS "hvmloader/bios"
#define HVM_XS_GENERATION_ID_ADDRESS "hvmloader/generation-id-address"
#define HVM_XS_ALLOW_MEMORY_RELOCATE "hvmloader/allow-memory-relocate"
/* The following values allow additional ACPI tables to be added to the
* virtual ACPI BIOS that hvmloader constructs. The values specify the guest
* physical address and length of a block of ACPI tables to add. The format of
* the block is simply concatenated raw tables (which specify their own length
* in the ACPI header).
*/
#define HVM_XS_ACPI_PT_ADDRESS "hvmloader/acpi/address"
#define HVM_XS_ACPI_PT_LENGTH "hvmloader/acpi/length"
/* Any number of SMBIOS types can be passed through to an HVM guest using
* the following xenstore values. The values specify the guest physical
* address and length of a block of SMBIOS structures for hvmloader to use.
* The block is formatted in the following way:
*
* <length><struct><length><struct>...
*
* Each length separator is a 32b integer indicating the length of the next
* SMBIOS structure. For DMTF defined types (0 - 121), the passed in struct
* will replace the default structure in hvmloader. In addition, any
* OEM/vendortypes (128 - 255) will all be added.
*/
#define HVM_XS_SMBIOS_PT_ADDRESS "hvmloader/smbios/address"
#define HVM_XS_SMBIOS_PT_LENGTH "hvmloader/smbios/length"
/* Set to 1 to enable SMBIOS default portable battery (type 22) values. */
#define HVM_XS_SMBIOS_DEFAULT_BATTERY "hvmloader/smbios/default_battery"
/* The following xenstore values are used to override some of the default
* string values in the SMBIOS table constructed in hvmloader.
*/
#define HVM_XS_BIOS_STRINGS "bios-strings"
#define HVM_XS_BIOS_VENDOR "bios-strings/bios-vendor"
#define HVM_XS_BIOS_VERSION "bios-strings/bios-version"
#define HVM_XS_SYSTEM_MANUFACTURER "bios-strings/system-manufacturer"
#define HVM_XS_SYSTEM_PRODUCT_NAME "bios-strings/system-product-name"
#define HVM_XS_SYSTEM_VERSION "bios-strings/system-version"
#define HVM_XS_SYSTEM_SERIAL_NUMBER "bios-strings/system-serial-number"
#define HVM_XS_ENCLOSURE_MANUFACTURER "bios-strings/enclosure-manufacturer"
#define HVM_XS_ENCLOSURE_SERIAL_NUMBER "bios-strings/enclosure-serial-number"
#define HVM_XS_BATTERY_MANUFACTURER "bios-strings/battery-manufacturer"
#define HVM_XS_BATTERY_DEVICE_NAME "bios-strings/battery-device-name"
/* 1 to 99 OEM strings can be set in xenstore using values of the form
* below. These strings will be loaded into the SMBIOS type 11 structure.
*/
#define HVM_XS_OEM_STRINGS "bios-strings/oem-%d"
#endif /* __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__ */

View File

@ -34,13 +34,20 @@
#define IOREQ_TYPE_PIO 0 /* pio */
#define IOREQ_TYPE_COPY 1 /* mmio ops */
#define IOREQ_TYPE_PCI_CONFIG 2
#define IOREQ_TYPE_TIMEOFFSET 7
#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
/*
* VMExit dispatcher should cooperate with instruction decoder to
* prepare this structure and notify service OS and DM by sending
* virq
* virq.
*
* For I/O type IOREQ_TYPE_PCI_CONFIG, the physical address is formatted
* as follows:
*
* 63....48|47..40|39..35|34..32|31........0
* SEGMENT |BUS |DEV |FN |OFFSET
*/
struct ioreq {
uint64_t addr; /* physical address */
@ -76,30 +83,21 @@ typedef struct buf_ioreq buf_ioreq_t;
#define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */
struct buffered_iopage {
unsigned int read_pointer;
unsigned int write_pointer;
#ifdef __XEN__
union bufioreq_pointers {
struct {
#endif
uint32_t read_pointer;
uint32_t write_pointer;
#ifdef __XEN__
};
uint64_t full;
} ptrs;
#endif
buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM];
}; /* NB. Size of this structure must be no greater than one page. */
typedef struct buffered_iopage buffered_iopage_t;
#if defined(__ia64__)
struct pio_buffer {
uint32_t page_offset;
uint32_t pointer;
uint32_t data_end;
uint32_t buf_size;
void *opaque;
};
#define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */
#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */
#define PIO_BUFFER_ENTRY_NUM 2
struct buffered_piopage {
struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM];
uint8_t buffer[1];
};
#endif /* defined(__ia64__) */
/*
* ACPI Control/Event register locations. Location is controlled by a
* version number in HVM_PARAM_ACPI_IOPORTS_LOCATION.
@ -132,7 +130,7 @@ struct buffered_piopage {
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -16,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2007, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
@ -54,17 +56,54 @@
#define HVM_PARAM_BUFIOREQ_PFN 6
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
#ifdef __ia64__
#if defined(__i386__) || defined(__x86_64__)
#define HVM_PARAM_NVRAM_FD 7
#define HVM_PARAM_VHPT_SIZE 8
#define HVM_PARAM_BUFPIOREQ_PFN 9
#elif defined(__i386__) || defined(__x86_64__)
/* Expose Viridian interfaces to this HVM guest? */
/*
* Viridian enlightenments
*
* (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx)
*
* To expose viridian enlightenments to the guest set this parameter
* to the desired feature mask. The base feature set must be present
* in any valid feature mask.
*/
#define HVM_PARAM_VIRIDIAN 9
/* Base+Freq viridian feature sets:
*
* - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL)
* - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
* - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX)
* - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
* HV_X64_MSR_APIC_FREQUENCY)
*/
#define _HVMPV_base_freq 0
#define HVMPV_base_freq (1 << _HVMPV_base_freq)
/* Feature set modifications */
/* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
* HV_X64_MSR_APIC_FREQUENCY).
* This modification restores the viridian feature set to the
* original 'base' set exposed in releases prior to Xen 4.4.
*/
#define _HVMPV_no_freq 1
#define HVMPV_no_freq (1 << _HVMPV_no_freq)
/* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */
#define _HVMPV_time_ref_count 2
#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count)
/* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */
#define _HVMPV_reference_tsc 3
#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc)
#define HVMPV_feature_mask \
(HVMPV_base_freq | \
HVMPV_no_freq | \
HVMPV_time_ref_count | \
HVMPV_reference_tsc)
#endif
/*
@ -125,28 +164,34 @@
*/
#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
/* Enable blocking memory events, async or sync (pause vcpu until response)
* onchangeonly indicates messages only on a change of value */
/* Deprecated */
#define HVM_PARAM_MEMORY_EVENT_CR0 20
#define HVM_PARAM_MEMORY_EVENT_CR3 21
#define HVM_PARAM_MEMORY_EVENT_CR4 22
#define HVM_PARAM_MEMORY_EVENT_INT3 23
#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
#define HVMPME_MODE_MASK (3 << 0)
#define HVMPME_mode_disabled 0
#define HVMPME_mode_async 1
#define HVMPME_mode_sync 2
#define HVMPME_onchangeonly (1 << 2)
#define HVM_PARAM_MEMORY_EVENT_MSR 30
/* Boolean: Enable nestedhvm (hvm only) */
#define HVM_PARAM_NESTEDHVM 24
/* Params for the mem event rings */
#define HVM_PARAM_PAGING_RING_PFN 27
#define HVM_PARAM_ACCESS_RING_PFN 28
#define HVM_PARAM_MONITOR_RING_PFN 28
#define HVM_PARAM_SHARING_RING_PFN 29
#define HVM_NR_PARAMS 30
/* SHUTDOWN_* action in case of a triple fault */
#define HVM_PARAM_TRIPLE_FAULT_REASON 31
#define HVM_PARAM_IOREQ_SERVER_PFN 32
#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33
/* Location of the VM Generation ID in guest physical address space. */
#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
/* Boolean: Enable altp2m */
#define HVM_PARAM_ALTP2M 35
#define HVM_NR_PARAMS 36
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */

View File

@ -0,0 +1,49 @@
/*
* pvdrivers.h: Register of PV drivers product numbers.
* Copyright (c) 2012, Citrix Systems Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _XEN_PUBLIC_PVDRIVERS_H_
#define _XEN_PUBLIC_PVDRIVERS_H_
/*
* This is the master registry of product numbers for
* PV drivers.
* If you need a new product number allocating, please
* post to xen-devel@lists.xensource.com. You should NOT use
* a product number without allocating one.
* If you maintain a separate versioning and distribution path
* for PV drivers you should have a separate product number so
* that your drivers can be separated from others.
*
* During development, you may use the product ID to
* indicate a driver which is yet to be released.
*/
#define PVDRIVERS_PRODUCT_LIST(EACH) \
EACH("xensource-windows", 0x0001) /* Citrix */ \
EACH("gplpv-windows", 0x0002) /* James Harper */ \
EACH("linux", 0x0003) \
EACH("xenserver-windows-v7.0+", 0x0004) /* Citrix */ \
EACH("xenserver-windows-v7.2+", 0x0005) /* Citrix */ \
EACH("experimental", 0xffff)
#endif /* _XEN_PUBLIC_PVDRIVERS_H_ */

View File

@ -102,9 +102,7 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
#if defined(__i386__) || defined(__x86_64__)
#include "../arch-x86/hvm/save.h"
#elif defined(__ia64__)
#include "../arch-ia64/hvm/save.h"
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#include "../arch-arm/hvm/save.h"
#else
#error "unsupported architecture"

View File

@ -59,7 +59,7 @@
* All data in the XenStore is stored as strings. Nodes specifying numeric
* values are encoded in decimal. Integer value ranges listed below are
* expressed as fixed sized integer types capable of storing the conversion
* of a properly formatted node string, without loss of information.
* of a properly formated node string, without loss of information.
*
* Any specified default value is in effect if the corresponding XenBus node
* is not present in the XenStore.
@ -88,9 +88,15 @@
* params
* Values: string
*
* Data used by the backend driver to locate and configure the backing
* device. The format and semantics of this data vary according to the
* backing device in use and are outside the scope of this specification.
* A free formatted string providing sufficient information for the
* backend driver to open the backing device. (e.g. the path to the
* file or block device representing the backing store.)
*
* physical-device
* Values: "MAJOR:MINOR"
*
* MAJOR and MINOR are the major number and minor number of the
* backing device respectively.
*
* type
* Values: "file", "phy", "tap"
@ -493,7 +499,7 @@
* discarded region on the device must be rendered unrecoverable before the
* command returns.
*
* This operation is analogous to performing a trim (ATA) or unmap (SCSI),
* This operation is analogous to performing a trim (ATA) or unamp (SCSI),
* command on a native device.
*
* More information about trim/unmap operations can be found at:
@ -558,7 +564,6 @@ struct blkif_request_segment {
/* @last_sect: last sector in frame to transfer (inclusive). */
uint8_t first_sect, last_sect;
};
typedef struct blkif_request_segment blkif_request_segment_t;
/*
* Starting ring element for any I/O request.
@ -569,7 +574,7 @@ struct blkif_request {
blkif_vdev_t handle; /* only for read/write requests */
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
blkif_request_segment_t seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
typedef struct blkif_request blkif_request_t;

View File

@ -43,7 +43,7 @@ struct xencons_interface {
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -168,7 +168,7 @@ struct xenfb_page
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -124,7 +124,7 @@ struct xenkbd_page
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -21,8 +21,7 @@
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* License along with this library; If not, see <http://www.gnu.org/licenses/>.
*
* @section DESCRIPTION
*

View File

@ -30,6 +30,24 @@
#include "ring.h"
#include "../grant_table.h"
/*
* Older implementation of Xen network frontend / backend has an
* implicit dependency on the MAX_SKB_FRAGS as the maximum number of
* ring slots a skb can use. Netfront / netback may not work as
* expected when frontend and backend have different MAX_SKB_FRAGS.
*
* A better approach is to add mechanism for netfront / netback to
* negotiate this value. However we cannot fix all possible
* frontends, so we need to define a value which states the minimum
* slots backend must support.
*
* The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
* (18), which is proved to work with most frontends. Any new backend
* which doesn't negotiate with frontend should expect frontend to
* send a valid packet using slots up to this value.
*/
#define XEN_NETIF_NR_SLOTS_MIN 18
/*
* Notifications after enqueuing any type of message should be conditional on
* the appropriate req_event or rsp_event field in the shared ring.
@ -38,15 +56,225 @@
* that it cannot safely queue packets (as it may not be kicked to send them).
*/
/*
* "feature-split-event-channels" is introduced to separate guest TX
* and RX notification. Backend either doesn't support this feature or
* advertises it via xenstore as 0 (disabled) or 1 (enabled).
*
* To make use of this feature, frontend should allocate two event
* channels for TX and RX, advertise them to backend as
* "event-channel-tx" and "event-channel-rx" respectively. If frontend
* doesn't want to use this feature, it just writes "event-channel"
* node as before.
*/
/*
* Multiple transmit and receive queues:
* If supported, the backend will write the key "multi-queue-max-queues" to
* the directory for that vif, and set its value to the maximum supported
* number of queues.
* Frontends that are aware of this feature and wish to use it can write the
* key "multi-queue-num-queues", set to the number they wish to use, which
* must be greater than zero, and no more than the value reported by the backend
* in "multi-queue-max-queues".
*
* Queues replicate the shared rings and event channels.
* "feature-split-event-channels" may optionally be used when using
* multiple queues, but is not mandatory.
*
* Each queue consists of one shared ring pair, i.e. there must be the same
* number of tx and rx rings.
*
* For frontends requesting just one queue, the usual event-channel and
* ring-ref keys are written as before, simplifying the backend processing
* to avoid distinguishing between a frontend that doesn't understand the
* multi-queue feature, and one that does, but requested only one queue.
*
* Frontends requesting two or more queues must not write the toplevel
* event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys,
* instead writing those keys under sub-keys having the name "queue-N" where
* N is the integer ID of the queue for which those keys belong. Queues
* are indexed from zero. For example, a frontend with two queues and split
* event channels must write the following set of queue-related keys:
*
* /local/domain/1/device/vif/0/multi-queue-num-queues = "2"
* /local/domain/1/device/vif/0/queue-0 = ""
* /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
* /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
* /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>"
* /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>"
* /local/domain/1/device/vif/0/queue-1 = ""
* /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
* /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
* /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
* /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>"
*
* If there is any inconsistency in the XenStore data, the backend may
* choose not to connect any queues, instead treating the request as an
* error. This includes scenarios where more (or fewer) queues were
* requested than the frontend provided details for.
*
* Mapping of packets to queues is considered to be a function of the
* transmitting system (backend or frontend) and is not negotiated
* between the two. Guests are free to transmit packets on any queue
* they choose, provided it has been set up correctly. Guests must be
* prepared to receive packets on any queue they have requested be set up.
*/
/*
* "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
* offload off or on. If it is missing then the feature is assumed to be on.
* "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
* offload on or off. If it is missing then the feature is assumed to be off.
*/
/*
* "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
* handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
* frontends nor backends are assumed to be capable unless the flags are
* present.
*/
/*
* "feature-multicast-control" advertises the capability to filter ethernet
* multicast packets in the backend. To enable use of this capability the
* frontend must set "request-multicast-control" before moving into the
* connected state.
*
* If "request-multicast-control" is set then the backend transmit side should
* no longer flood multicast packets to the frontend, it should instead drop any
* multicast packet that does not match in a filter list. The list is
* amended by the frontend by sending dummy transmit requests containing
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as specified below.
* Once enabled by the frontend, the feature cannot be disabled except by
* closing and re-connecting to the backend.
*/
/*
* This is the 'wire' format for packets:
* Request 1: netif_tx_request -- NETTXF_* (any flags)
* [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
* [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_FLAG_MORE)
* Request 4: netif_tx_request -- NETTXF_more_data
* Request 5: netif_tx_request -- NETTXF_more_data
* Request 1: netif_tx_request_t -- NETTXF_* (any flags)
* [Request 2: netif_extra_info_t] (only if request 1 has NETTXF_extra_info)
* [Request 3: netif_extra_info_t] (only if request 2 has XEN_NETIF_EXTRA_MORE)
* Request 4: netif_tx_request_t -- NETTXF_more_data
* Request 5: netif_tx_request_t -- NETTXF_more_data
* ...
* Request N: netif_tx_request -- 0
* Request N: netif_tx_request_t -- 0
*/
/*
* Guest transmit
* ==============
*
* Ring slot size is 12 octets, however not all request/response
* structs use the full size.
*
* tx request data (netif_tx_request_t)
* ------------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | grant ref | offset | flags |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | size |
* +-----+-----+-----+-----+
*
* grant ref: Reference to buffer page.
* offset: Offset within buffer page.
* flags: NETTXF_*.
* id: request identifier, echoed in response.
* size: packet size in bytes.
*
* tx response (netif_tx_response_t)
* ---------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | status | unused |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | unused |
* +-----+-----+-----+-----+
*
* id: reflects id in transmit request
* status: NETIF_RSP_*
*
* Guest receive
* =============
*
* Ring slot size is 8 octets.
*
* rx request (netif_rx_request_t)
* -------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | pad | gref |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* id: request identifier, echoed in response.
* gref: reference to incoming granted frame.
*
* rx response (netif_rx_response_t)
* ---------------------------------
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | id | offset | flags | status |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* id: reflects id in receive request
* offset: offset in page of start of received packet
* flags: NETRXF_*
* status: -ve: NETIF_RSP_*; +ve: Rx'ed pkt size.
*
* Extra Info
* ==========
*
* Can be present if initial request has NET{T,R}XF_extra_info, or
* previous extra request has XEN_NETIF_EXTRA_MORE.
*
* The struct therefore needs to fit into either a tx or rx slot and
* is therefore limited to 8 octets.
*
* extra info (netif_extra_info_t)
* -------------------------------
*
* General format:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags| type specfic data |
* +-----+-----+-----+-----+-----+-----+-----+-----+
* | padding for tx |
* +-----+-----+-----+-----+
*
* type: XEN_NETIF_EXTRA_TYPE_*
* flags: XEN_NETIF_EXTRA_FLAG_*
* padding for tx: present only in the tx case due to 8 octet limit
* from rx case. Not shown in type specific entries below.
*
* XEN_NETIF_EXTRA_TYPE_GSO:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags| size |type | pad | features |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* type: Must be XEN_NETIF_EXTRA_TYPE_GSO
* flags: XEN_NETIF_EXTRA_FLAG_*
* size: Maximum payload size of each segment.
* type: XEN_NETIF_GSO_TYPE_*
* features: EN_NETIF_GSO_FEAT_*
*
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
*
* 0 1 2 3 4 5 6 7 octet
* +-----+-----+-----+-----+-----+-----+-----+-----+
* |type |flags| addr |
* +-----+-----+-----+-----+-----+-----+-----+-----+
*
* type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}
* flags: XEN_NETIF_EXTRA_FLAG_*
* addr: address to add/remove
*/
/* Protocol checksum field is blank in the packet (hardware offload)? */
@ -65,14 +293,13 @@
#define _NETTXF_extra_info (3)
#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
struct netif_tx_request {
grant_ref_t gref; /* Reference to buffer page */
uint16_t offset; /* Offset within buffer page */
uint16_t flags; /* NETTXF_* */
uint16_t id; /* Echoed in response message. */
uint16_t size; /* For the first request in a packet, the packet
size in bytes. For subsequent requests, the
size of that request's associated data in bytes*/
uint16_t size; /* Packet size in bytes. */
};
typedef struct netif_tx_request netif_tx_request_t;
@ -83,16 +310,18 @@ typedef struct netif_tx_request netif_tx_request_t;
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MAX (4)
/* netif_extra_info flags. */
/* netif_extra_info_t flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
/* GSO types - only TCPv4 currently supported. */
/* GSO types */
#define XEN_NETIF_GSO_TYPE_NONE (0)
#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
#define XEN_NETIF_GSO_TYPE_TCPV6 (2)
/*
* This structure needs to fit within both netif_tx_request and
* netif_rx_response for compatibility.
* This structure needs to fit within both netif_tx_request_t and
* netif_rx_response_t for compatibility.
*/
struct netif_extra_info {
uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
@ -127,14 +356,6 @@ struct netif_extra_info {
/*
* XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
* Backend advertises availability via 'feature-multicast-control'
* xenbus node containing value '1'.
* Frontend requests this feature by advertising
* 'request-multicast-control' xenbus node containing value '1'.
* If multicast control is requested then multicast flooding is
* disabled and the frontend must explicitly register its interest
* in multicast groups using dummy transmit requests containing
* MCAST_{ADD,DEL} extra-info fragments.
*/
struct {
uint8_t addr[6]; /* Address to add/remove. */
@ -153,6 +374,7 @@ typedef struct netif_tx_response netif_tx_response_t;
struct netif_rx_request {
uint16_t id; /* Echoed in response message. */
uint16_t pad;
grant_ref_t gref; /* Reference to incoming granted frame */
};
typedef struct netif_rx_request netif_rx_request_t;
@ -173,15 +395,11 @@ typedef struct netif_rx_request netif_rx_request_t;
#define _NETRXF_extra_info (3)
#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
/* GSO Prefix descriptor. */
#define _NETRXF_gso_prefix (4)
#define NETRXF_gso_prefix (1U<<_NETRXF_gso_prefix)
struct netif_rx_response {
uint16_t id;
uint16_t offset; /* Offset in page of start of received packet */
uint16_t flags; /* NETRXF_* */
int16_t status; /* -ve: NETIF_RSP_* ; +ve: Rx'ed response size. */
int16_t status; /* -ve: NETIF_RSP_* ; +ve: Rx'ed pkt size. */
};
typedef struct netif_rx_response netif_rx_response_t;
@ -195,7 +413,7 @@ DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
#define NETIF_RSP_DROPPED -2
#define NETIF_RSP_ERROR -1
#define NETIF_RSP_OKAY 0
/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
/* No response: used for auxiliary requests (e.g., netif_extra_info_t). */
#define NETIF_RSP_NULL 1
#endif
@ -203,7 +421,7 @@ DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -46,6 +46,7 @@
#define XEN_PCI_OP_aer_resume (7)
#define XEN_PCI_OP_aer_mmio (8)
#define XEN_PCI_OP_aer_slotreset (9)
#define XEN_PCI_OP_enable_multi_msi (10)
/* xen_pci_op error numbers */
#define XEN_PCI_ERR_success (0)
@ -116,7 +117,7 @@ struct xen_pci_sharedinfo {
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -18,6 +18,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2008, Keir Fraser
*/
#ifndef __XEN_PROTOCOLS_H__
@ -25,16 +27,13 @@
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
#define XEN_IO_PROTO_ABI_ARM "arm-abi"
#if defined(__i386__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
#elif defined(__x86_64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
#elif defined(__ia64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM
#else
# error arch fixup needed here

View File

@ -44,16 +44,10 @@ typedef unsigned int RING_IDX;
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
/*
* The amount of space reserved in the shared ring for accounting information.
*/
#define __RING_HEADER_SIZE(_s) \
((intptr_t)(_s)->ring - (intptr_t)(_s))
/*
* Calculate size of a shared ring, given the total available space for the
* ring and indexes (_sz), and the name tag of the request/response structure.
* A ring contains as many entries as will fit, rounded down to the nearest
* A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
#define __CONST_RING_SIZE(_s, _sz) \
@ -63,17 +57,7 @@ typedef unsigned int RING_IDX;
* The same for passing in an actual pointer instead of a name tag.
*/
#define __RING_SIZE(_s, _sz) \
(__RD32(((_sz) - __RING_HEADER_SIZE(_s)) / sizeof((_s)->ring[0])))
/*
* The number of pages needed to support a given number of request/reponse
* entries. The entry count is rounded down to the nearest power of two
* as required by the ring macros.
*/
#define __RING_PAGES(_s, _entries) \
((__RING_HEADER_SIZE(_s) \
+ (__RD32(_entries) * sizeof((_s)->ring[0])) \
+ PAGE_SIZE - 1) / PAGE_SIZE)
(__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
/*
* Macros to make the correct C datatypes for a new kind of ring.
@ -127,7 +111,7 @@ struct __name##_sring { \
uint8_t msg; \
} tapif_user; \
uint8_t pvt_pad[4]; \
} private; \
} pvt; \
uint8_t __pad[44]; \
union __name##_sring_entry ring[1]; /* variable-length */ \
}; \
@ -172,7 +156,7 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = (_s)->rsp_prod = 0; \
(_s)->req_event = (_s)->rsp_event = 1; \
(void)memset((_s)->private.pvt_pad, 0, sizeof((_s)->private.pvt_pad)); \
(void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
(void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
@ -190,21 +174,6 @@ typedef struct __name##_back_ring __name##_back_ring_t
(_r)->sring = (_s); \
} while (0)
/* Initialize to existing shared indexes -- for recovery */
#define FRONT_RING_ATTACH(_r, _s, __size) do { \
(_r)->sring = (_s); \
(_r)->req_prod_pvt = (_s)->req_prod; \
(_r)->rsp_cons = (_s)->rsp_prod; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
} while (0)
#define BACK_RING_ATTACH(_r, _s, __size) do { \
(_r)->sring = (_s); \
(_r)->rsp_prod_pvt = (_s)->rsp_prod; \
(_r)->req_cons = (_s)->req_prod; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
} while (0)
/* How big is this ring? */
#define RING_SIZE(_r) \
((_r)->nr_ents)
@ -250,6 +219,10 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
/* Ill-behaved frontend determination: Can there be this many requests? */
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
#define RING_PUSH_REQUESTS(_r) do { \
xen_wmb(); /* back sees requests /before/ updated producer index */ \
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
@ -331,7 +304,7 @@ typedef struct __name##_back_ring __name##_back_ring_t
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -64,12 +64,78 @@ struct tpmif_tx_interface {
};
typedef struct tpmif_tx_interface tpmif_tx_interface_t;
/******************************************************************************
* TPM I/O interface for Xen guest OSes, v2
*
* Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
*
* This protocol emulates the request/response behavior of a TPM using a Xen
* shared memory interface. All interaction with the TPM is at the direction
* of the frontend, since a TPM (hardware or virtual) is a passive device -
* the backend only processes commands as requested by the frontend.
*
* The frontend sends a request to the TPM by populating the shared page with
* the request packet, changing the state to TPMIF_STATE_SUBMIT, and sending
* and event channel notification. When the backend is finished, it will set
* the state to TPMIF_STATE_FINISH and send an event channel notification.
*
* In order to allow long-running commands to be canceled, the frontend can
* at any time change the state to TPMIF_STATE_CANCEL and send a notification.
* The TPM can either finish the command (changing state to TPMIF_STATE_FINISH)
* or can cancel the command and change the state to TPMIF_STATE_IDLE. The TPM
* can also change the state to TPMIF_STATE_IDLE instead of TPMIF_STATE_FINISH
* if another reason for cancellation is required - for example, a physical
* TPM may cancel a command if the interface is seized by another locality.
*
* The TPM command format is defined by the TCG, and is available at
* http://www.trustedcomputinggroup.org/resources/tpm_main_specification
*/
enum tpmif_state {
TPMIF_STATE_IDLE, /* no contents / vTPM idle / cancel complete */
TPMIF_STATE_SUBMIT, /* request ready / vTPM working */
TPMIF_STATE_FINISH, /* response ready / vTPM idle */
TPMIF_STATE_CANCEL, /* cancel requested / vTPM working */
};
/* Note: The backend should only change state to IDLE or FINISH, while the
* frontend should only change to SUBMIT or CANCEL. Status changes do not need
* to use atomic operations.
*/
/* The shared page for vTPM request/response packets looks like:
*
* Offset Contents
* =================================================
* 0 struct tpmif_shared_page
* 16 [optional] List of grant IDs
* 16+4*nr_extra_pages TPM packet data
*
* If the TPM packet data extends beyond the end of a single page, the grant IDs
* defined in extra_pages are used as if they were mapped immediately following
* the primary shared page. The grants are allocated by the frontend and mapped
* by the backend. Before sending a request spanning multiple pages, the
* frontend should verify that the TPM supports such large requests by querying
* the TPM_CAP_PROP_INPUT_BUFFER property from the TPM.
*/
struct tpmif_shared_page {
uint32_t length; /* request/response length in bytes */
uint8_t state; /* enum tpmif_state */
uint8_t locality; /* for the current request */
uint8_t pad; /* should be zero */
uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */
};
typedef struct tpmif_shared_page tpmif_shared_page_t;
#endif
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -31,6 +31,76 @@
#include "ring.h"
#include "../grant_table.h"
/*
* Feature and Parameter Negotiation
* =================================
* The two halves of a Xen pvUSB driver utilize nodes within the XenStore to
* communicate capabilities and to negotiate operating parameters. This
* section enumerates these nodes which reside in the respective front and
* backend portions of the XenStore, following the XenBus convention.
*
* Any specified default value is in effect if the corresponding XenBus node
* is not present in the XenStore.
*
* XenStore nodes in sections marked "PRIVATE" are solely for use by the
* driver side whose XenBus tree contains them.
*
*****************************************************************************
* Backend XenBus Nodes
*****************************************************************************
*
*------------------ Backend Device Identification (PRIVATE) ------------------
*
* num-ports
* Values: unsigned [1...31]
*
* Number of ports for this (virtual) USB host connector.
*
* usb-ver
* Values: unsigned [1...2]
*
* USB version of this host connector: 1 = USB 1.1, 2 = USB 2.0.
*
* port/[1...31]
* Values: string
*
* Physical USB device connected to the given port, e.g. "3-1.5".
*
*****************************************************************************
* Frontend XenBus Nodes
*****************************************************************************
*
*----------------------- Request Transport Parameters -----------------------
*
* event-channel
* Values: unsigned
*
* The identifier of the Xen event channel used to signal activity
* in the ring buffer.
*
* urb-ring-ref
* Values: unsigned
*
* The Xen grant reference granting permission for the backend to map
* the sole page in a single page sized ring buffer. This is the ring
* buffer for urb requests.
*
* conn-ring-ref
* Values: unsigned
*
* The Xen grant reference granting permission for the backend to map
* the sole page in a single page sized ring buffer. This is the ring
* buffer for connection/disconnection requests.
*
* protocol
* Values: string (XEN_IO_PROTO_ABI_*)
* Default Value: XEN_IO_PROTO_ABI_NATIVE
*
* The machine ABI rules governing the format of all ring request and
* response structures.
*
*/
enum usb_spec_version {
USB_VER_UNKNOWN = 0,
USB_VER_USB11,
@ -41,38 +111,65 @@ enum usb_spec_version {
/*
* USB pipe in usbif_request
*
* bits 0-5 are specific bits for virtual USB driver.
* bits 7-31 are standard urb pipe.
* - port number: bits 0-4
* (USB_MAXCHILDREN is 31)
*
* - port number(NEW): bits 0-4
* (USB_MAXCHILDREN is 31)
*
* - operation flag(NEW): bit 5
* (0 = submit urb,
* 1 = unlink urb)
* - operation flag: bit 5
* (0 = submit urb,
* 1 = unlink urb)
*
* - direction: bit 7
* (0 = Host-to-Device [Out]
* 1 = Device-to-Host [In])
* (0 = Host-to-Device [Out]
* 1 = Device-to-Host [In])
*
* - device address: bits 8-14
*
* - endpoint: bits 15-18
*
* - pipe type: bits 30-31
* (00 = isochronous, 01 = interrupt,
* 10 = control, 11 = bulk)
* - pipe type: bits 30-31
* (00 = isochronous, 01 = interrupt,
* 10 = control, 11 = bulk)
*/
#define usbif_pipeportnum(pipe) ((pipe) & 0x1f)
#define usbif_setportnum_pipe(pipe, portnum) \
((pipe)|(portnum))
#define usbif_pipeunlink(pipe) ((pipe) & 0x20)
#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
#define usbif_setunlink_pipe(pipe) ((pipe)|(0x20))
#define USBIF_PIPE_PORT_MASK 0x0000001f
#define USBIF_PIPE_UNLINK 0x00000020
#define USBIF_PIPE_DIR 0x00000080
#define USBIF_PIPE_DEV_MASK 0x0000007f
#define USBIF_PIPE_DEV_SHIFT 8
#define USBIF_PIPE_EP_MASK 0x0000000f
#define USBIF_PIPE_EP_SHIFT 15
#define USBIF_PIPE_TYPE_MASK 0x00000003
#define USBIF_PIPE_TYPE_SHIFT 30
#define USBIF_PIPE_TYPE_ISOC 0
#define USBIF_PIPE_TYPE_INT 1
#define USBIF_PIPE_TYPE_CTRL 2
#define USBIF_PIPE_TYPE_BULK 3
#define usbif_pipeportnum(pipe) ((pipe) & USBIF_PIPE_PORT_MASK)
#define usbif_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
#define usbif_pipeunlink(pipe) ((pipe) & USBIF_PIPE_UNLINK)
#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
#define usbif_setunlink_pipe(pipe) ((pipe) | USBIF_PIPE_UNLINK)
#define usbif_pipein(pipe) ((pipe) & USBIF_PIPE_DIR)
#define usbif_pipeout(pipe) (!usbif_pipein(pipe))
#define usbif_pipedevice(pipe) \
(((pipe) >> USBIF_PIPE_DEV_SHIFT) & USBIF_PIPE_DEV_MASK)
#define usbif_pipeendpoint(pipe) \
(((pipe) >> USBIF_PIPE_EP_SHIFT) & USBIF_PIPE_EP_MASK)
#define usbif_pipetype(pipe) \
(((pipe) >> USBIF_PIPE_TYPE_SHIFT) & USBIF_PIPE_TYPE_MASK)
#define usbif_pipeisoc(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_ISOC)
#define usbif_pipeint(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_INT)
#define usbif_pipectrl(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_CTRL)
#define usbif_pipebulk(pipe) (usbif_pipetype(pipe) == USBIF_PIPE_TYPE_BULK)
#define USBIF_BACK_MAX_PENDING_REQS (128)
#define USBIF_MAX_SEGMENTS_PER_REQUEST (16)
#define USBIF_MAX_PORTNR 31
/*
* RING for transferring urbs.
@ -142,6 +239,10 @@ struct usbif_conn_response {
uint16_t id; /* request id */
uint8_t portnum; /* port number */
uint8_t speed; /* usb_device_speed */
#define USBIF_SPEED_NONE 0
#define USBIF_SPEED_LOW 1
#define USBIF_SPEED_FULL 2
#define USBIF_SPEED_HIGH 3
};
typedef struct usbif_conn_response usbif_conn_response_t;

View File

@ -1,8 +1,8 @@
/******************************************************************************
* vscsiif.h
*
*
* Based on the blkif.h code.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -30,57 +30,212 @@
#include "ring.h"
#include "../grant_table.h"
/* command between backend and frontend */
#define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */
#define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/
#define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/
/*
* Feature and Parameter Negotiation
* =================================
* The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to
* communicate capabilities and to negotiate operating parameters. This
* section enumerates these nodes which reside in the respective front and
* backend portions of the XenStore, following the XenBus convention.
*
* Any specified default value is in effect if the corresponding XenBus node
* is not present in the XenStore.
*
* XenStore nodes in sections marked "PRIVATE" are solely for use by the
* driver side whose XenBus tree contains them.
*
*****************************************************************************
* Backend XenBus Nodes
*****************************************************************************
*
*------------------ Backend Device Identification (PRIVATE) ------------------
*
* p-devname
* Values: string
*
* A free string used to identify the physical device (e.g. a disk name).
*
* p-dev
* Values: string
*
* A string specifying the backend device: either a 4-tuple "h:c:t:l"
* (host, controller, target, lun, all integers), or a WWN (e.g.
* "naa.60014054ac780582").
*
* v-dev
* Values: string
*
* A string specifying the frontend device in form of a 4-tuple "h:c:t:l"
* (host, controller, target, lun, all integers).
*
*--------------------------------- Features ---------------------------------
*
* feature-sg-grant
* Values: unsigned [VSCSIIF_SG_TABLESIZE...65535]
* Default Value: 0
*
* Specifies the maximum number of scatter/gather elements in grant pages
* supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE
* SG elements specified directly in the request.
*
*****************************************************************************
* Frontend XenBus Nodes
*****************************************************************************
*
*----------------------- Request Transport Parameters -----------------------
*
* event-channel
* Values: unsigned
*
* The identifier of the Xen event channel used to signal activity
* in the ring buffer.
*
* ring-ref
* Values: unsigned
*
* The Xen grant reference granting permission for the backend to map
* the sole page in a single page sized ring buffer.
*
* protocol
* Values: string (XEN_IO_PROTO_ABI_*)
* Default Value: XEN_IO_PROTO_ABI_NATIVE
*
* The machine ABI rules governing the format of all ring request and
* response structures.
*/
/* Requests from the frontend to the backend */
#define VSCSIIF_BACK_MAX_PENDING_REQS 128
/*
* Request a SCSI operation specified via a CDB in vscsiif_request.cmnd.
* The target is specified via channel, id and lun.
*
* The operation to be performed is specified via a CDB in cmnd[], the length
* of the CDB is in cmd_len. sc_data_direction specifies the direction of data
* (to the device, from the device, or none at all).
*
* If data is to be transferred to or from the device the buffer(s) in the
* guest memory is/are specified via one or multiple scsiif_request_segment
* descriptors each specifying a memory page via a grant_ref_t, a offset into
* the page and the length of the area in that page. All scsiif_request_segment
* areas concatenated form the resulting data buffer used by the operation.
* If the number of scsiif_request_segment areas is not too large (less than
* or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the
* seg[] array and the number of valid scsiif_request_segment elements is to be
* set in nr_segments.
*
* If "feature-sg-grant" in the Xenstore is set it is possible to specify more
* than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection.
* The maximum number of allowed scsiif_request_segment elements is the value
* of the "feature-sg-grant" entry from Xenstore. When using indirection the
* seg[] array doesn't contain specifications of the data buffers, but
* references to scsiif_request_segment arrays, which in turn reference the
* data buffers. While nr_segments holds the number of populated seg[] entries
* (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
* elements referencing the target data buffers is calculated from the lengths
* of the seg[] elements (the sum of all valid seg[].length divided by the
* size of one scsiif_request_segment structure). The frontend may use a mix of
* direct and indirect requests.
*/
#define VSCSIIF_ACT_SCSI_CDB 1
/*
* Request abort of a running operation for the specified target given by
* channel, id, lun and the operation's rqid in ref_rqid.
*/
#define VSCSIIF_ACT_SCSI_ABORT 2
/*
* Request a device reset of the specified target (channel and id).
*/
#define VSCSIIF_ACT_SCSI_RESET 3
/*
* Preset scatter/gather elements for a following request. Deprecated.
* Keeping the define only to avoid usage of the value "4" for other actions.
*/
#define VSCSIIF_ACT_SCSI_SG_PRESET 4
/*
* Maximum scatter/gather segments per request.
*
* Considering balance between allocating al least 16 "vscsiif_request"
* structures on one page (4096bytes) and number of scatter gather
* needed, we decided to use 26 as a magic number.
* Considering balance between allocating at least 16 "vscsiif_request"
* structures on one page (4096 bytes) and the number of scatter/gather
* elements needed, we decided to use 26 as a magic number.
*
* If "feature-sg-grant" is set, more scatter/gather elements can be specified
* by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages.
* In this case the vscsiif_request seg elements don't contain references to
* the user data, but to the SG elements referencing the user data.
*/
#define VSCSIIF_SG_TABLESIZE 26
/*
* base on linux kernel 2.6.18
* based on Linux kernel 2.6.18, still valid
*
* Changing these values requires support of multiple protocols via the rings
* as "old clients" will blindly use these values and the resulting structure
* sizes.
*/
#define VSCSIIF_MAX_COMMAND_SIZE 16
#define VSCSIIF_SENSE_BUFFERSIZE 96
struct scsiif_request_segment {
grant_ref_t gref;
uint16_t offset;
uint16_t length;
};
typedef struct scsiif_request_segment vscsiif_segment_t;
#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
/* Size of one request is 252 bytes */
struct vscsiif_request {
uint16_t rqid; /* private guest value, echoed in resp */
uint8_t act; /* command between backend and frontend */
uint8_t cmd_len;
uint8_t cmd_len; /* valid CDB bytes */
uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
uint16_t timeout_per_command; /* The command is issued by twice
the value in Backend. */
uint16_t channel, id, lun;
uint16_t padding;
uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
DMA_FROM_DEVICE(2)
DMA_NONE(3) requests */
uint8_t nr_segments; /* Number of pieces of scatter-gather */
uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; /* the CDB */
uint16_t timeout_per_command; /* deprecated: timeout in secs, 0=default */
uint16_t channel, id, lun; /* (virtual) device specification */
uint16_t ref_rqid; /* command abort reference */
uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
DMA_FROM_DEVICE(2)
DMA_NONE(3) requests */
uint8_t nr_segments; /* Number of pieces of scatter-gather */
/*
* flag in nr_segments: SG elements via grant page
*
* If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number
* of grant pages containing SG elements. Usable if "feature-sg-grant" set.
*/
#define VSCSIIF_SG_GRANT 0x80
struct scsiif_request_segment {
grant_ref_t gref;
uint16_t offset;
uint16_t length;
} seg[VSCSIIF_SG_TABLESIZE];
vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
uint32_t reserved[3];
};
typedef struct vscsiif_request vscsiif_request_t;
/*
* The following interface is deprecated!
*/
#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) \
/ sizeof(vscsiif_segment_t))
struct vscsiif_sg_list {
/* First two fields must match struct vscsiif_request! */
uint16_t rqid; /* private guest value, must match main req */
uint8_t act; /* VSCSIIF_ACT_SCSI_SG_PRESET */
uint8_t nr_segments; /* Number of pieces of scatter-gather */
vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
};
typedef struct vscsiif_sg_list vscsiif_sg_list_t;
/* End of deprecated interface */
/* Size of one response is 252 bytes */
struct vscsiif_response {
uint16_t rqid;
uint8_t padding;
uint16_t rqid; /* identifies request */
uint8_t act; /* deprecated: valid only if SG_PRESET supported */
uint8_t sense_len;
uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
int32_t rslt;
@ -97,7 +252,7 @@ DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -36,9 +36,6 @@
enum xenbus_state {
XenbusStateUnknown = 0,
/*
* Initializing: Back-end is initializing.
*/
XenbusStateInitialising = 1,
/*
@ -52,9 +49,6 @@ enum xenbus_state {
*/
XenbusStateInitialised = 3,
/*
* Connected: The normal state for a front to backend connection.
*/
XenbusStateConnected = 4,
/*
@ -62,18 +56,6 @@ enum xenbus_state {
*/
XenbusStateClosing = 5,
/*
* Closed: No connection exists between front and back end.
*
* For backend devices with the "online" attribute, the front can
* request a reconnect at any time. To handle this transition
* gracefully, backend devices must reinitialize any XenStore data
* used to negotiate features with a peer before transitioning to
* the closed state. When a reconnect request occurs, the
* XenBus backend support code will automatically transition the
* backend device from Closed to InitWait, kicking off the ring
* and feature negotiation process.
*/
XenbusStateClosed = 6,
/*
@ -90,7 +72,7 @@ typedef enum xenbus_state XenbusState;
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -49,7 +49,9 @@ enum xsd_sockmsg_type
XS_RESUME,
XS_SET_TARGET,
XS_RESTRICT,
XS_RESET_WATCHES
XS_RESET_WATCHES,
XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};
#define XS_WRITE_NONE "NONE"
@ -83,7 +85,8 @@ __attribute__((unused))
XSD_ERROR(EROFS),
XSD_ERROR(EBUSY),
XSD_ERROR(EAGAIN),
XSD_ERROR(EISCONN)
XSD_ERROR(EISCONN),
XSD_ERROR(E2BIG)
};
#endif
@ -103,7 +106,10 @@ enum xs_watch_type
XS_WATCH_TOKEN
};
/* Inter-domain shared memory communications. */
/*
* `incontents 150 xenstore_struct XenStore wire protocol.
*
* Inter-domain shared memory communications. */
#define XENSTORE_RING_SIZE 1024
typedef uint32_t XENSTORE_RING_IDX;
#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
@ -112,6 +118,8 @@ struct xenstore_domain_interface {
char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
XENSTORE_RING_IDX req_cons, req_prod;
XENSTORE_RING_IDX rsp_cons, rsp_prod;
uint32_t server_features; /* Bitmap of features supported by the server */
uint32_t connection;
};
/* Violating this is very bad. See docs/misc/xenstore.txt. */
@ -121,12 +129,19 @@ struct xenstore_domain_interface {
#define XENSTORE_ABS_PATH_MAX 3072
#define XENSTORE_REL_PATH_MAX 2048
/* The ability to reconnect a ring */
#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
/* Valid values for the connection field */
#define XENSTORE_CONNECTED 0 /* the steady-state */
#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */
#endif /* _XS_WIRE_H */
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -96,9 +96,6 @@
typedef struct xen_kexec_image {
#if defined(__i386__) || defined(__x86_64__)
unsigned long page_list[KEXEC_XEN_NO_PAGES];
#endif
#if defined(__ia64__)
unsigned long reboot_code_buffer;
#endif
unsigned long indirection_page;
unsigned long start_address;
@ -108,6 +105,20 @@ typedef struct xen_kexec_image {
* Perform kexec having previously loaded a kexec or kdump kernel
* as appropriate.
* type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
*
* Control is transferred to the image entry point with the host in
* the following state.
*
* - The image may be executed on any PCPU and all other PCPUs are
* stopped.
*
* - Local interrupts are disabled.
*
* - Register values are undefined.
*
* - The image segments have writeable 1:1 virtual to machine
* mappings. The location of any page tables is undefined and these
* page table frames are not be mapped.
*/
#define KEXEC_CMD_kexec 0
typedef struct xen_kexec_exec {
@ -119,12 +130,12 @@ typedef struct xen_kexec_exec {
* type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
* image == relocation information for kexec (ignored for unload) [in]
*/
#define KEXEC_CMD_kexec_load 1
#define KEXEC_CMD_kexec_unload 2
typedef struct xen_kexec_load {
#define KEXEC_CMD_kexec_load_v1 1 /* obsolete since 0x00040400 */
#define KEXEC_CMD_kexec_unload_v1 2 /* obsolete since 0x00040400 */
typedef struct xen_kexec_load_v1 {
int type;
xen_kexec_image_t image;
} xen_kexec_load_t;
} xen_kexec_load_v1_t;
#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */
#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */
@ -134,7 +145,7 @@ typedef struct xen_kexec_load {
* to Xen it exists in a separate EFI
* region on ia64, and thus needs to be
* inserted into iomem_machine separately */
#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of
#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* Obsolete: machine address and size of
* the ia64_boot_param */
#define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of
* of the EFI Memory Map */
@ -155,12 +166,82 @@ typedef struct xen_kexec_range {
unsigned long start;
} xen_kexec_range_t;
#if __XEN_INTERFACE_VERSION__ >= 0x00040400
/*
* A contiguous chunk of a kexec image and it's destination machine
* address.
*/
typedef struct xen_kexec_segment {
union {
XEN_GUEST_HANDLE(const_void) h;
uint64_t _pad;
} buf;
uint64_t buf_size;
uint64_t dest_maddr;
uint64_t dest_size;
} xen_kexec_segment_t;
DEFINE_XEN_GUEST_HANDLE(xen_kexec_segment_t);
/*
* Load a kexec image into memory.
*
* For KEXEC_TYPE_DEFAULT images, the segments may be anywhere in RAM.
* The image is relocated prior to being executed.
*
* For KEXEC_TYPE_CRASH images, each segment of the image must reside
* in the memory region reserved for kexec (KEXEC_RANGE_MA_CRASH) and
* the entry point must be within the image. The caller is responsible
* for ensuring that multiple images do not overlap.
*
* All image segments will be loaded to their destination machine
* addresses prior to being executed. The trailing portion of any
* segments with a source buffer (from dest_maddr + buf_size to
* dest_maddr + dest_size) will be zeroed.
*
* Segments with no source buffer will be accessible to the image when
* it is executed.
*/
#define KEXEC_CMD_kexec_load 4
typedef struct xen_kexec_load {
uint8_t type; /* One of KEXEC_TYPE_* */
uint8_t _pad;
uint16_t arch; /* ELF machine type (EM_*). */
uint32_t nr_segments;
union {
XEN_GUEST_HANDLE(xen_kexec_segment_t) h;
uint64_t _pad;
} segments;
uint64_t entry_maddr; /* image entry point machine address. */
} xen_kexec_load_t;
DEFINE_XEN_GUEST_HANDLE(xen_kexec_load_t);
/*
* Unload a kexec image.
*
* Type must be one of KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
*/
#define KEXEC_CMD_kexec_unload 5
typedef struct xen_kexec_unload {
uint8_t type;
} xen_kexec_unload_t;
DEFINE_XEN_GUEST_HANDLE(xen_kexec_unload_t);
#else /* __XEN_INTERFACE_VERSION__ < 0x00040400 */
#define KEXEC_CMD_kexec_load KEXEC_CMD_kexec_load_v1
#define KEXEC_CMD_kexec_unload KEXEC_CMD_kexec_unload_v1
#define xen_kexec_load xen_kexec_load_v1
#define xen_kexec_load_t xen_kexec_load_v1_t
#endif
#endif /* _XEN_PUBLIC_KEXEC_H */
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -1,265 +0,0 @@
/******************************************************************************
* libelf.h
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __XC_LIBELF__
#define __XC_LIBELF__ 1
#if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
#define XEN_ELF_LITTLE_ENDIAN
#else
#error define architectural endianness
#endif
#undef ELFSIZE
#include "elfnote.h"
#include "elfstructs.h"
#include "features.h"
/* ------------------------------------------------------------------------ */
typedef union {
Elf32_Ehdr e32;
Elf64_Ehdr e64;
} elf_ehdr;
typedef union {
Elf32_Phdr e32;
Elf64_Phdr e64;
} elf_phdr;
typedef union {
Elf32_Shdr e32;
Elf64_Shdr e64;
} elf_shdr;
typedef union {
Elf32_Sym e32;
Elf64_Sym e64;
} elf_sym;
typedef union {
Elf32_Rel e32;
Elf64_Rel e64;
} elf_rel;
typedef union {
Elf32_Rela e32;
Elf64_Rela e64;
} elf_rela;
typedef union {
Elf32_Note e32;
Elf64_Note e64;
} elf_note;
struct elf_binary {
/* elf binary */
const char *image;
size_t size;
char class;
char data;
const elf_ehdr *ehdr;
const char *sec_strtab;
const elf_shdr *sym_tab;
const char *sym_strtab;
/* loaded to */
char *dest;
uint64_t pstart;
uint64_t pend;
uint64_t reloc_offset;
uint64_t bsd_symtab_pstart;
uint64_t bsd_symtab_pend;
#ifndef __XEN__
/* misc */
FILE *log;
#endif
int verbose;
};
/* ------------------------------------------------------------------------ */
/* accessing elf header fields */
#ifdef XEN_ELF_BIG_ENDIAN
# define NATIVE_ELFDATA ELFDATA2MSB
#else
# define NATIVE_ELFDATA ELFDATA2LSB
#endif
#define elf_32bit(elf) (ELFCLASS32 == (elf)->class)
#define elf_64bit(elf) (ELFCLASS64 == (elf)->class)
#define elf_msb(elf) (ELFDATA2MSB == (elf)->data)
#define elf_lsb(elf) (ELFDATA2LSB == (elf)->data)
#define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data)
#define elf_uval(elf, str, elem) \
((ELFCLASS64 == (elf)->class) \
? elf_access_unsigned((elf), (str), \
offsetof(typeof(*(str)),e64.elem), \
sizeof((str)->e64.elem)) \
: elf_access_unsigned((elf), (str), \
offsetof(typeof(*(str)),e32.elem), \
sizeof((str)->e32.elem)))
#define elf_sval(elf, str, elem) \
((ELFCLASS64 == (elf)->class) \
? elf_access_signed((elf), (str), \
offsetof(typeof(*(str)),e64.elem), \
sizeof((str)->e64.elem)) \
: elf_access_signed((elf), (str), \
offsetof(typeof(*(str)),e32.elem), \
sizeof((str)->e32.elem)))
#define elf_size(elf, str) \
((ELFCLASS64 == (elf)->class) \
? sizeof((str)->e64) : sizeof((str)->e32))
uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr,
uint64_t offset, size_t size);
int64_t elf_access_signed(struct elf_binary *elf, const void *ptr,
uint64_t offset, size_t size);
uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr);
/* ------------------------------------------------------------------------ */
/* xc_libelf_tools.c */
int elf_shdr_count(struct elf_binary *elf);
int elf_phdr_count(struct elf_binary *elf);
const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name);
const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index);
const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index);
const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr);
const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr);
const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr);
const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr);
const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr);
const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol);
const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index);
const char *elf_note_name(struct elf_binary *elf, const elf_note * note);
const void *elf_note_desc(struct elf_binary *elf, const elf_note * note);
uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note);
const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note);
int elf_is_elfbinary(const void *image);
int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr);
/* ------------------------------------------------------------------------ */
/* xc_libelf_loader.c */
int elf_init(struct elf_binary *elf, const char *image, size_t size);
#ifdef __XEN__
void elf_set_verbose(struct elf_binary *elf);
#else
void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose);
#endif
void elf_parse_binary(struct elf_binary *elf);
void elf_load_binary(struct elf_binary *elf);
void *elf_get_ptr(struct elf_binary *elf, unsigned long addr);
uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol);
void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart); /* private */
/* ------------------------------------------------------------------------ */
/* xc_libelf_relocate.c */
int elf_reloc(struct elf_binary *elf);
/* ------------------------------------------------------------------------ */
/* xc_libelf_dominfo.c */
#define UNSET_ADDR ((uint64_t)-1)
enum xen_elfnote_type {
XEN_ENT_NONE = 0,
XEN_ENT_LONG = 1,
XEN_ENT_STR = 2
};
struct xen_elfnote {
enum xen_elfnote_type type;
const char *name;
union {
const char *str;
uint64_t num;
} data;
};
struct elf_dom_parms {
/* raw */
const char *guest_info;
const void *elf_note_start;
const void *elf_note_end;
struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1];
/* parsed */
char guest_os[16];
char guest_ver[16];
char xen_ver[16];
char loader[16];
int pae;
int bsd_symtab;
uint64_t virt_base;
uint64_t virt_entry;
uint64_t virt_hypercall;
uint64_t virt_hv_start_low;
uint64_t elf_paddr_offset;
uint32_t f_supported[XENFEAT_NR_SUBMAPS];
uint32_t f_required[XENFEAT_NR_SUBMAPS];
/* calculated */
uint64_t virt_offset;
uint64_t virt_kstart;
uint64_t virt_kend;
};
static inline void elf_xen_feature_set(int nr, uint32_t * addr)
{
addr[nr >> 5] |= 1 << (nr & 31);
}
static inline int elf_xen_feature_get(int nr, uint32_t * addr)
{
return !!(addr[nr >> 5] & (1 << (nr & 31)));
}
int elf_xen_parse_features(const char *features,
uint32_t *supported,
uint32_t *required);
int elf_xen_parse_note(struct elf_binary *elf,
struct elf_dom_parms *parms,
const elf_note *note);
int elf_xen_parse_guest_info(struct elf_binary *elf,
struct elf_dom_parms *parms);
int elf_xen_parse(struct elf_binary *elf,
struct elf_dom_parms *parms);
#endif /* __XC_LIBELF__ */

View File

@ -1,80 +0,0 @@
/******************************************************************************
* mem_event.h
*
* Memory event common structures.
*
* Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _XEN_PUBLIC_MEM_EVENT_H
#define _XEN_PUBLIC_MEM_EVENT_H
#include "xen.h"
#include "io/ring.h"
/* Memory event flags */
#define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
#define MEM_EVENT_FLAG_DROP_PAGE (1 << 1)
#define MEM_EVENT_FLAG_EVICT_FAIL (1 << 2)
#define MEM_EVENT_FLAG_FOREIGN (1 << 3)
#define MEM_EVENT_FLAG_DUMMY (1 << 4)
/* Reasons for the memory event request */
#define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */
#define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is address */
#define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is CR0 value */
#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */
#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */
#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn are RIP */
typedef struct mem_event_st {
uint32_t flags;
uint32_t vcpu_id;
uint64_t gfn;
uint64_t offset;
uint64_t gla; /* if gla_valid */
uint32_t p2mt;
uint16_t access_r:1;
uint16_t access_w:1;
uint16_t access_x:1;
uint16_t gla_valid:1;
uint16_t available:12;
uint16_t reason;
} mem_event_request_t, mem_event_response_t;
DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
#endif
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -28,6 +28,7 @@
#define __XEN_PUBLIC_MEMORY_H__
#include "xen.h"
#include "physdev.h"
/*
* Increase or decrease the specified domain's memory reservation. Returns the
@ -55,6 +56,8 @@
/* Flag to request allocation only from the node specified */
#define XENMEMF_exact_node_request (1<<17)
#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
/* Flag to indicate the node specified is virtual node */
#define XENMEMF_vnode (1<<18)
#endif
struct xen_memory_reservation {
@ -68,6 +71,8 @@ struct xen_memory_reservation {
* IN: GPFN bases of extents to populate with memory
* OUT: GMFN bases of extents that were allocated
* (NB. This command also updates the mach_to_phys translation table)
* XENMEM_claim_pages:
* IN: must be zero
*/
XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
@ -184,6 +189,15 @@ struct xen_machphys_mfn_list {
typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
/*
* For a compat caller, this is identical to XENMEM_machphys_mfn_list.
*
* For a non compat caller, this functions similarly to
* XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
* m2p table.
*/
#define XENMEM_machphys_compat_mfn_list 25
/*
* Returns the location in virtual address space of the machine_to_phys
* mapping table. Architectures which do not have a m2p table, or which do not
@ -198,13 +212,15 @@ struct xen_machphys_mapping {
typedef struct xen_machphys_mapping xen_machphys_mapping_t;
DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
/* Source mapping space. */
/* ` enum phys_map_space { */
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
#define XENMAPSPACE_gmfn 2 /* GMFN */
#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
* XENMEM_add_to_physmap_range only.
*/
* XENMEM_add_to_physmap_batch only. */
/* ` } */
/*
* Sets the GPFN at which a particular page appears in the specified guest's
@ -219,45 +235,22 @@ struct xen_add_to_physmap {
/* Number of pages to go through for gmfn_range */
uint16_t size;
/* Source mapping space. */
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
#define XENMAPSPACE_gmfn 2 /* GMFN */
#define XENMAPSPACE_gmfn_range 3 /* GMFN range */
unsigned int space;
unsigned int space; /* => enum phys_map_space */
#define XENMAPIDX_grant_table_status 0x80000000
/* Index into source mapping space. */
/* Index into space being mapped. */
xen_ulong_t idx;
/* GPFN where the source mapping page should appear. */
/* GPFN in domid where the source mapping page should appear. */
xen_pfn_t gpfn;
};
typedef struct xen_add_to_physmap xen_add_to_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
*/
#define XENMEM_remove_from_physmap 15
struct xen_remove_from_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
/* GPFN of the current mapping of the page. */
xen_pfn_t gpfn;
};
typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
/*** REMOVED ***/
/*#define XENMEM_translate_gpfn_list 8*/
#define XENMEM_add_to_physmap_range 23
struct xen_add_to_physmap_range {
/* A batched version of add_to_physmap. */
#define XENMEM_add_to_physmap_batch 23
struct xen_add_to_physmap_batch {
/* IN */
/* Which domain to change the mapping for. */
domid_t domid;
@ -278,8 +271,34 @@ struct xen_add_to_physmap_range {
/* Per index error code. */
XEN_GUEST_HANDLE(int) errs;
};
typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
#if __XEN_INTERFACE_VERSION__ < 0x00040400
#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
#define xen_add_to_physmap_range xen_add_to_physmap_batch
typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
#endif
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
*/
#define XENMEM_remove_from_physmap 15
struct xen_remove_from_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
/* GPFN of the current mapping of the page. */
xen_pfn_t gpfn;
};
typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
/*** REMOVED ***/
/*#define XENMEM_translate_gpfn_list 8*/
/*
* Returns the pseudo-physical memory map as it was when the domain
@ -356,32 +375,78 @@ typedef struct xen_pod_target xen_pod_target_t;
#define XENMEM_paging_op_evict 1
#define XENMEM_paging_op_prep 2
#define XENMEM_access_op 21
#define XENMEM_access_op_resume 0
struct xen_mem_event_op {
uint8_t op; /* XENMEM_*_op_* */
struct xen_mem_paging_op {
uint8_t op; /* XENMEM_paging_op_* */
domid_t domain;
/* PAGING_PREP IN: buffer to immediately fill page in */
uint64_aligned_t buffer;
/* Other OPs */
uint64_aligned_t gfn; /* IN: gfn of page being operated on */
};
typedef struct xen_mem_event_op xen_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
typedef struct xen_mem_paging_op xen_mem_paging_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
#define XENMEM_access_op 21
#define XENMEM_access_op_set_access 0
#define XENMEM_access_op_get_access 1
#define XENMEM_access_op_enable_emulate 2
#define XENMEM_access_op_disable_emulate 3
typedef enum {
XENMEM_access_n,
XENMEM_access_r,
XENMEM_access_w,
XENMEM_access_rw,
XENMEM_access_x,
XENMEM_access_rx,
XENMEM_access_wx,
XENMEM_access_rwx,
/*
* Page starts off as r-x, but automatically
* change to r-w on a write
*/
XENMEM_access_rx2rw,
/*
* Log access: starts off as n, automatically
* goes to rwx, generating an event without
* pausing the vcpu
*/
XENMEM_access_n2rwx,
/* Take the domain default */
XENMEM_access_default
} xenmem_access_t;
struct xen_mem_access_op {
/* XENMEM_access_op_* */
uint8_t op;
/* xenmem_access_t */
uint8_t access;
domid_t domid;
/*
* Number of pages for set op
* Ignored on setting default access and other ops
*/
uint32_t nr;
/*
* First pfn for set op
* pfn for get op
* ~0ull is used to set and get the default access for pages
*/
uint64_aligned_t pfn;
};
typedef struct xen_mem_access_op xen_mem_access_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
#define XENMEM_sharing_op 22
#define XENMEM_sharing_op_nominate_gfn 0
#define XENMEM_sharing_op_nominate_gref 1
#define XENMEM_sharing_op_share 2
#define XENMEM_sharing_op_resume 3
#define XENMEM_sharing_op_debug_gfn 4
#define XENMEM_sharing_op_debug_mfn 5
#define XENMEM_sharing_op_debug_gref 6
#define XENMEM_sharing_op_add_physmap 7
#define XENMEM_sharing_op_audit 8
#define XENMEM_sharing_op_debug_gfn 3
#define XENMEM_sharing_op_debug_mfn 4
#define XENMEM_sharing_op_debug_gref 5
#define XENMEM_sharing_op_add_physmap 6
#define XENMEM_sharing_op_audit 7
#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
@ -430,14 +495,127 @@ struct xen_mem_sharing_op {
typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
/*
* Attempt to stake a claim for a domain on a quantity of pages
* of system RAM, but _not_ assign specific pageframes. Only
* arithmetic is performed so the hypercall is very fast and need
* not be preemptible, thus sidestepping time-of-check-time-of-use
* races for memory allocation. Returns 0 if the hypervisor page
* allocator has atomically and successfully claimed the requested
* number of pages, else non-zero.
*
* Any domain may have only one active claim. When sufficient memory
* has been allocated to resolve the claim, the claim silently expires.
* Claiming zero pages effectively resets any outstanding claim and
* is always successful.
*
* Note that a valid claim may be staked even after memory has been
* allocated for a domain. In this case, the claim is not incremental,
* i.e. if the domain's tot_pages is 3, and a claim is staked for 10,
* only 7 additional pages are claimed.
*
* Caller must be privileged or the hypercall fails.
*/
#define XENMEM_claim_pages 24
/*
* XENMEM_claim_pages flags - the are no flags at this time.
* The zero value is appropiate.
*/
/*
* With some legacy devices, certain guest-physical addresses cannot safely
* be used for other purposes, e.g. to map guest RAM. This hypercall
* enumerates those regions so the toolstack can avoid using them.
*/
#define XENMEM_reserved_device_memory_map 27
struct xen_reserved_device_memory {
xen_pfn_t start_pfn;
xen_ulong_t nr_pages;
};
typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
struct xen_reserved_device_memory_map {
#define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
/* IN */
uint32_t flags;
/*
* IN/OUT
*
* Gets set to the required number of entries when too low,
* signaled by error code -ERANGE.
*/
unsigned int nr_entries;
/* OUT */
XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
/* IN */
union {
struct physdev_pci_device pci;
} dev;
};
typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
/*
* XENMEM_get_vnumainfo used by guest to get
* vNUMA topology from hypervisor.
*/
#define XENMEM_get_vnumainfo 26
/* vNUMA node memory ranges */
struct xen_vmemrange {
uint64_t start, end;
unsigned int flags;
unsigned int nid;
};
typedef struct xen_vmemrange xen_vmemrange_t;
DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
/*
* vNUMA topology specifies vNUMA node number, distance table,
* memory ranges and vcpu mapping provided for guests.
* XENMEM_get_vnumainfo hypercall expects to see from guest
* nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
* After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
* copied back to guest. Domain returns expected values of nr_vnodes,
* nr_vmemranges and nr_vcpus to guest if the values where incorrect.
*/
struct xen_vnuma_topology_info {
/* IN */
domid_t domid;
uint16_t pad;
/* IN/OUT */
unsigned int nr_vnodes;
unsigned int nr_vcpus;
unsigned int nr_vmemranges;
/* OUT */
union {
XEN_GUEST_HANDLE(uint) h;
uint64_t pad;
} vdistance;
union {
XEN_GUEST_HANDLE(uint) h;
uint64_t pad;
} vcpu_to_vnode;
union {
XEN_GUEST_HANDLE(xen_vmemrange_t) h;
uint64_t pad;
} vmemrange;
};
typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
/* Next available subop number is 28 */
#endif /* __XEN_PUBLIC_MEMORY_H__ */
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -36,9 +36,14 @@
/* I/O-check error reported via ISA port 0x61, bit 6. */
#define _XEN_NMIREASON_io_error 0
#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
/* PCI SERR reported via ISA port 0x61, bit 7. */
#define _XEN_NMIREASON_pci_serr 1
#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
#if __XEN_INTERFACE_VERSION__ < 0x00040300 /* legacy alias of the above */
/* Parity error reported via ISA port 0x61, bit 7. */
#define _XEN_NMIREASON_parity_error 1
#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
#endif
/* Unknown hardware-generated NMI. */
#define _XEN_NMIREASON_unknown 2
#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
@ -72,7 +77,7 @@ DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -16,6 +16,8 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2006, Keir Fraser
*/
#ifndef __XEN_PUBLIC_PHYSDEV_H__
@ -158,15 +160,15 @@ struct physdev_map_pirq {
domid_t domid;
/* IN */
int type;
/* IN */
/* IN (ignored for ..._MULTI_MSI) */
int index;
/* IN or OUT */
int pirq;
/* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
/* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */
int bus;
/* IN */
int devfn;
/* IN */
/* IN (also OUT for ..._MULTI_MSI) */
int entry_nr;
/* IN */
uint64_t table_base;
@ -293,6 +295,11 @@ struct physdev_pci_device_add {
uint8_t bus;
uint8_t devfn;
} physfn;
/*
* Optional parameters array.
* First element ([0]) is PXM domain associated with the device (if
* XEN_PCI_DEV_PXM is set)
*/
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
uint32_t optarr[];
#elif defined(__GNUC__)
@ -304,6 +311,12 @@ DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t);
#define PHYSDEVOP_pci_device_remove 26
#define PHYSDEVOP_restore_msi_ext 27
/*
* Dom0 should use these two to announce MMIO resources assigned to
* MSI-X capable devices won't (prepare) or may (release) change.
*/
#define PHYSDEVOP_prepare_msix 30
#define PHYSDEVOP_release_msix 31
struct physdev_pci_device {
/* IN */
uint16_t seg;
@ -313,6 +326,24 @@ struct physdev_pci_device {
typedef struct physdev_pci_device physdev_pci_device_t;
DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
#define PHYSDEVOP_DBGP_RESET_PREPARE 1
#define PHYSDEVOP_DBGP_RESET_DONE 2
#define PHYSDEVOP_DBGP_BUS_UNKNOWN 0
#define PHYSDEVOP_DBGP_BUS_PCI 1
#define PHYSDEVOP_dbgp_op 29
struct physdev_dbgp_op {
/* IN */
uint8_t op;
uint8_t bus;
union {
struct physdev_pci_device pci;
} u;
};
typedef struct physdev_dbgp_op physdev_dbgp_op_t;
DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_t);
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **
@ -320,9 +351,11 @@ DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
*/
#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
#if __XEN_INTERFACE_VERSION__ < 0x00040600
/*
* These all-capitals physdev operation names are superceded by the new names
* (defined above) since interface version 0x00030202.
* (defined above) since interface version 0x00030202. The guard above was
* added post-4.5 only though and hence shouldn't check for 0x00030202.
*/
#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
@ -333,6 +366,7 @@ DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
#endif
#if __XEN_INTERFACE_VERSION__ < 0x00040200
#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1
@ -345,7 +379,7 @@ DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -35,13 +35,28 @@
* Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
* 1 January, 1970 if the current system time was <system_time>.
*/
#define XENPF_settime 17
struct xenpf_settime {
#define XENPF_settime32 17
struct xenpf_settime32 {
/* IN variables. */
uint32_t secs;
uint32_t nsecs;
uint64_t system_time;
};
#define XENPF_settime64 62
struct xenpf_settime64 {
/* IN variables. */
uint64_t secs;
uint32_t nsecs;
uint32_t mbz;
uint64_t system_time;
};
#if __XEN_INTERFACE_VERSION__ < 0x00040600
#define XENPF_settime XENPF_settime32
#define xenpf_settime xenpf_settime32
#else
#define XENPF_settime XENPF_settime64
#define xenpf_settime xenpf_settime64
#endif
typedef struct xenpf_settime xenpf_settime_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
@ -126,6 +141,26 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
#define XEN_EFI_query_variable_info 9
#define XEN_EFI_query_capsule_capabilities 10
#define XEN_EFI_update_capsule 11
struct xenpf_efi_time {
uint16_t year;
uint8_t month;
uint8_t day;
uint8_t hour;
uint8_t min;
uint8_t sec;
uint32_t ns;
int16_t tz;
uint8_t daylight;
};
struct xenpf_efi_guid {
uint32_t data1;
uint16_t data2;
uint16_t data3;
uint8_t data4[8];
};
struct xenpf_efi_runtime_call {
uint32_t function;
/*
@ -134,21 +169,11 @@ struct xenpf_efi_runtime_call {
* where it holds the single returned value.
*/
uint32_t misc;
unsigned long status;
xen_ulong_t status;
union {
#define XEN_EFI_GET_TIME_SET_CLEARS_NS 0x00000001
struct {
struct xenpf_efi_time {
uint16_t year;
uint8_t month;
uint8_t day;
uint8_t hour;
uint8_t min;
uint8_t sec;
uint32_t ns;
int16_t tz;
uint8_t daylight;
} time;
struct xenpf_efi_time time;
uint32_t resolution;
uint32_t accuracy;
} get_time;
@ -168,22 +193,18 @@ struct xenpf_efi_runtime_call {
#define XEN_EFI_VARIABLE_RUNTIME_ACCESS 0x00000004
struct {
XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
unsigned long size;
xen_ulong_t size;
XEN_GUEST_HANDLE(void) data;
struct xenpf_efi_guid {
uint32_t data1;
uint16_t data2;
uint16_t data3;
uint8_t data4[8];
} vendor_guid;
struct xenpf_efi_guid vendor_guid;
} get_variable, set_variable;
struct {
unsigned long size;
xen_ulong_t size;
XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
struct xenpf_efi_guid vendor_guid;
} get_next_variable_name;
#define XEN_EFI_VARINFO_BOOT_SNAPSHOT 0x00000001
struct {
uint32_t attr;
uint64_t max_store_size;
@ -193,14 +214,14 @@ struct xenpf_efi_runtime_call {
struct {
XEN_GUEST_HANDLE(void) capsule_header_array;
unsigned long capsule_count;
xen_ulong_t capsule_count;
uint64_t max_capsule_size;
unsigned int reset_type;
uint32_t reset_type;
} query_capsule_capabilities;
struct {
XEN_GUEST_HANDLE(void) capsule_header_array;
unsigned long capsule_count;
xen_ulong_t capsule_count;
uint64_t sg_list; /* machine address */
} update_capsule;
} u;
@ -218,6 +239,8 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_efi_runtime_call_t);
#define XEN_FW_EFI_VENDOR 2
#define XEN_FW_EFI_MEM_INFO 3
#define XEN_FW_EFI_RT_VERSION 4
#define XEN_FW_EFI_PCI_ROM 5
#define XEN_FW_KBD_SHIFT_FLAGS 5
struct xenpf_firmware_info {
/* IN variables. */
uint32_t type;
@ -265,7 +288,21 @@ struct xenpf_firmware_info {
uint64_t attr;
uint32_t type;
} mem;
struct {
/* IN variables */
uint16_t segment;
uint8_t bus;
uint8_t devfn;
uint16_t vendor;
uint16_t devid;
/* OUT variables */
uint64_t address;
xen_ulong_t size;
} pci_rom;
} efi_info; /* XEN_FW_EFI_INFO */
/* Int16, Fn02: Get keyboard shift flags. */
uint8_t kbd_shift_flags; /* XEN_FW_KBD_SHIFT_FLAGS */
} u;
};
typedef struct xenpf_firmware_info xenpf_firmware_info_t;
@ -274,10 +311,16 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
#define XENPF_enter_acpi_sleep 51
struct xenpf_enter_acpi_sleep {
/* IN variables */
#if __XEN_INTERFACE_VERSION__ < 0x00040300
uint16_t pm1a_cnt_val; /* PM1a control value. */
uint16_t pm1b_cnt_val; /* PM1b control value. */
#else
uint16_t val_a; /* PM1a control / sleep type A. */
uint16_t val_b; /* PM1b control / sleep type B. */
#endif
uint32_t sleep_state; /* Which state to enter (Sn). */
uint32_t flags; /* Must be zero. */
#define XENPF_ACPI_SLEEP_EXTENDED 0x00000001
uint32_t flags; /* XENPF_ACPI_SLEEP_*. */
};
typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t);
@ -504,6 +547,67 @@ struct xenpf_core_parking {
typedef struct xenpf_core_parking xenpf_core_parking_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_core_parking_t);
/*
* Access generic platform resources(e.g., accessing MSR, port I/O, etc)
* in unified way. Batch resource operations in one call are supported and
* they are always non-preemptible and executed in their original order.
* The batch itself returns a negative integer for general errors, or a
* non-negative integer for the number of successful operations. For the latter
* case, the @ret in the failed entry (if any) indicates the exact error.
*/
#define XENPF_resource_op 61
#define XEN_RESOURCE_OP_MSR_READ 0
#define XEN_RESOURCE_OP_MSR_WRITE 1
/*
* Specially handled MSRs:
* - MSR_IA32_TSC
* READ: Returns the scaled system time(ns) instead of raw timestamp. In
* multiple entry case, if other MSR read is followed by a MSR_IA32_TSC
* read, then both reads are guaranteed to be performed atomically (with
* IRQ disabled). The return time indicates the point of reading that MSR.
* WRITE: Not supported.
*/
struct xenpf_resource_entry {
union {
uint32_t cmd; /* IN: XEN_RESOURCE_OP_* */
int32_t ret; /* OUT: return value for failed entry */
} u;
uint32_t rsvd; /* IN: padding and must be zero */
uint64_t idx; /* IN: resource address to access */
uint64_t val; /* IN/OUT: resource value to set/get */
};
typedef struct xenpf_resource_entry xenpf_resource_entry_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_resource_entry_t);
struct xenpf_resource_op {
uint32_t nr_entries; /* number of resource entry */
uint32_t cpu; /* which cpu to run */
XEN_GUEST_HANDLE(xenpf_resource_entry_t) entries;
};
typedef struct xenpf_resource_op xenpf_resource_op_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_resource_op_t);
#define XENPF_get_symbol 63
struct xenpf_symdata {
/* IN/OUT variables */
uint32_t namelen; /* IN: size of name buffer */
/* OUT: strlen(name) of hypervisor symbol (may be */
/* larger than what's been copied to guest) */
uint32_t symnum; /* IN: Symbol to read */
/* OUT: Next available symbol. If same as IN then */
/* we reached the end */
/* OUT variables */
XEN_GUEST_HANDLE(char) name;
uint64_t address;
char type;
};
typedef struct xenpf_symdata xenpf_symdata_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_symdata_t);
/*
* ` enum neg_errnoval
* ` HYPERVISOR_platform_op(const struct xen_platform_op*);
@ -513,6 +617,8 @@ struct xen_platform_op {
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
union {
struct xenpf_settime settime;
struct xenpf_settime32 settime32;
struct xenpf_settime64 settime64;
struct xenpf_add_memtype add_memtype;
struct xenpf_del_memtype del_memtype;
struct xenpf_read_memtype read_memtype;
@ -530,6 +636,8 @@ struct xen_platform_op {
struct xenpf_cpu_hotadd cpu_add;
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking;
struct xenpf_resource_op resource_op;
struct xenpf_symdata symdata;
uint8_t pad[128];
} u;
};
@ -541,7 +649,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

133
sys/xen/interface/pmu.h Normal file
View File

@ -0,0 +1,133 @@
/*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Copyright (c) 2015 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef __XEN_PUBLIC_PMU_H__
#define __XEN_PUBLIC_PMU_H__
#include "xen.h"
#if defined(__i386__) || defined(__x86_64__)
#include "arch-x86/pmu.h"
#elif defined (__arm__) || defined (__aarch64__)
#include "arch-arm.h"
#else
#error "Unsupported architecture"
#endif
#define XENPMU_VER_MAJ 0
#define XENPMU_VER_MIN 1
/*
* ` enum neg_errnoval
* ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args);
*
* @cmd == XENPMU_* (PMU operation)
* @args == struct xenpmu_params
*/
/* ` enum xenpmu_op { */
#define XENPMU_mode_get 0 /* Also used for getting PMU version */
#define XENPMU_mode_set 1
#define XENPMU_feature_get 2
#define XENPMU_feature_set 3
#define XENPMU_init 4
#define XENPMU_finish 5
#define XENPMU_lvtpc_set 6
#define XENPMU_flush 7 /* Write cached MSR values to HW */
/* ` } */
/* Parameters structure for HYPERVISOR_xenpmu_op call */
struct xen_pmu_params {
/* IN/OUT parameters */
struct {
uint32_t maj;
uint32_t min;
} version;
uint64_t val;
/* IN parameters */
uint32_t vcpu;
uint32_t pad;
};
typedef struct xen_pmu_params xen_pmu_params_t;
DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
/* PMU modes:
* - XENPMU_MODE_OFF: No PMU virtualization
* - XENPMU_MODE_SELF: Guests can profile themselves
* - XENPMU_MODE_HV: Guests can profile themselves, dom0 profiles
* itself and Xen
* - XENPMU_MODE_ALL: Only dom0 has access to VPMU and it profiles
* everyone: itself, the hypervisor and the guests.
*/
#define XENPMU_MODE_OFF 0
#define XENPMU_MODE_SELF (1<<0)
#define XENPMU_MODE_HV (1<<1)
#define XENPMU_MODE_ALL (1<<2)
/*
* PMU features:
* - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
*/
#define XENPMU_FEATURE_INTEL_BTS 1
/*
* Shared PMU data between hypervisor and PV(H) domains.
*
* The hypervisor fills out this structure during PMU interrupt and sends an
* interrupt to appropriate VCPU.
* Architecture-independent fields of xen_pmu_data are WO for the hypervisor
* and RO for the guest but some fields in xen_pmu_arch can be writable
* by both the hypervisor and the guest (see arch-$arch/pmu.h).
*/
struct xen_pmu_data {
/* Interrupted VCPU */
uint32_t vcpu_id;
/*
* Physical processor on which the interrupt occurred. On non-privileged
* guests set to vcpu_id;
*/
uint32_t pcpu_id;
/*
* Domain that was interrupted. On non-privileged guests set to DOMID_SELF.
* On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
* XENPMU_MODE_ALL mode, domain ID of another domain.
*/
domid_t domain_id;
uint8_t pad[6];
/* Architecture-specific information */
struct xen_pmu_arch pmu;
};
#endif /* __XEN_PUBLIC_PMU_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -1,8 +1,8 @@
/******************************************************************************
* sched.h
*
*
* Scheduler state interactions
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@ -29,21 +29,34 @@
#include "event_channel.h"
/*
* `incontents 150 sched Guest Scheduler Operations
*
* The SCHEDOP interface provides mechanisms for a guest to interact
* with the scheduler, including yield, blocking and shutting itself
* down.
*/
/*
* The prototype for this hypercall is:
* long sched_op(int cmd, void *arg)
* ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
*
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == Operation-specific extra argument(s), as described below.
*
* ... == Additional Operation-specific extra arguments, described below.
*
* Versions of Xen prior to 3.0.2 provided only the following legacy version
* of this hypercall, supporting only the commands yield, block and shutdown:
* long sched_op(int cmd, unsigned long arg)
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
* == SHUTDOWN_* code (SCHEDOP_shutdown)
* This legacy version is available to new guests as sched_op_compat().
*
* This legacy version is available to new guests as:
* ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
*/
/* ` enum sched_op { // SCHEDOP_* => struct sched_* */
/*
* Voluntarily yield the CPU.
* @arg == NULL.
@ -61,21 +74,58 @@
/*
* Halt execution of this domain (all VCPUs) and notify the system controller.
* @arg == pointer to sched_shutdown structure.
* @arg == pointer to sched_shutdown_t structure.
*
* If the sched_shutdown_t reason is SHUTDOWN_suspend then
* x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN
* of the guest's start info page. RDX/EDX is the third hypercall
* argument.
*
* In addition, which reason is SHUTDOWN_suspend this hypercall
* returns 1 if suspend was cancelled or the domain was merely
* checkpointed, and 0 if it is resuming in a new domain.
*/
#define SCHEDOP_shutdown 2
struct sched_shutdown {
unsigned int reason; /* SHUTDOWN_* */
};
typedef struct sched_shutdown sched_shutdown_t;
DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
/*
* Poll a set of event-channel ports. Return when one or more are pending. An
* optional timeout may be specified.
* @arg == pointer to sched_poll structure.
* @arg == pointer to sched_poll_t structure.
*/
#define SCHEDOP_poll 3
/*
* Declare a shutdown for another domain. The main use of this function is
* in interpreting shutdown requests and reasons for fully-virtualized
* domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
* @arg == pointer to sched_remote_shutdown_t structure.
*/
#define SCHEDOP_remote_shutdown 4
/*
* Latch a shutdown code, so that when the domain later shuts down it
* reports this code to the control tools.
* @arg == sched_shutdown_t, as for SCHEDOP_shutdown.
*/
#define SCHEDOP_shutdown_code 5
/*
* Setup, poke and destroy a domain watchdog timer.
* @arg == pointer to sched_watchdog_t structure.
* With id == 0, setup a domain watchdog timer to cause domain shutdown
* after timeout, returns watchdog id.
* With id != 0 and timeout == 0, destroy domain watchdog timer.
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
/* ` } */
struct sched_shutdown {
unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
};
typedef struct sched_shutdown sched_shutdown_t;
DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
struct sched_poll {
XEN_GUEST_HANDLE(evtchn_port_t) ports;
unsigned int nr_ports;
@ -84,36 +134,13 @@ struct sched_poll {
typedef struct sched_poll sched_poll_t;
DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
/*
* Declare a shutdown for another domain. The main use of this function is
* in interpreting shutdown requests and reasons for fully-virtualized
* domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
* @arg == pointer to sched_remote_shutdown structure.
*/
#define SCHEDOP_remote_shutdown 4
struct sched_remote_shutdown {
domid_t domain_id; /* Remote domain ID */
unsigned int reason; /* SHUTDOWN_xxx reason */
unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
};
typedef struct sched_remote_shutdown sched_remote_shutdown_t;
DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
/*
* Latch a shutdown code, so that when the domain later shuts down it
* reports this code to the control tools.
* @arg == as for SCHEDOP_shutdown.
*/
#define SCHEDOP_shutdown_code 5
/*
* Setup, poke and destroy a domain watchdog timer.
* @arg == pointer to sched_watchdog structure.
* With id == 0, setup a domain watchdog timer to cause domain shutdown
* after timeout, returns watchdog id.
* With id != 0 and timeout == 0, destroy domain watchdog timer.
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
struct sched_watchdog {
uint32_t id; /* watchdog ID */
uint32_t timeout; /* timeout */
@ -126,18 +153,21 @@ DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
*/
/* ` enum sched_shutdown_reason { */
#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
#define SHUTDOWN_MAX 4 /* Maximum valid shutdown reason. */
/* ` } */
#endif /* __XEN_PUBLIC_SCHED_H__ */
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -33,8 +33,10 @@
#include "xen.h"
#include "domctl.h"
#include "physdev.h"
#include "tmem.h"
#define XEN_SYSCTL_INTERFACE_VERSION 0x00000009
#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000C
/*
* Read console content from Xen buffer ring.
@ -71,7 +73,7 @@ struct xen_sysctl_tbuf_op {
#define XEN_SYSCTL_TBUFOP_disable 5
uint32_t cmd;
/* IN/OUT variables */
struct xenctl_cpumap cpu_mask;
struct xenctl_bitmap cpu_mask;
uint32_t evt_mask;
/* OUT variables */
uint64_aligned_t buffer_mfn;
@ -101,6 +103,7 @@ struct xen_sysctl_physinfo {
uint64_aligned_t total_pages;
uint64_aligned_t free_pages;
uint64_aligned_t scrub_pages;
uint64_aligned_t outstanding_pages;
uint32_t hw_cap[8];
/* XEN_SYSCTL_PHYSCAP_??? */
@ -225,13 +228,17 @@ struct pm_cx_stat {
uint64_aligned_t idle_time; /* idle time from boot */
XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */
XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
uint64_aligned_t pc2;
uint64_aligned_t pc3;
uint64_aligned_t pc6;
uint64_aligned_t pc7;
uint64_aligned_t cc3;
uint64_aligned_t cc6;
uint64_aligned_t cc7;
uint32_t nr_pc; /* entry nr in pc[] */
uint32_t nr_cc; /* entry nr in cc[] */
/*
* These two arrays may (and generally will) have unused slots; slots not
* having a corresponding hardware register will not be written by the
* hypervisor. It is therefore up to the caller to put a suitable sentinel
* into all slots before invoking the function.
* Indexing is 1-biased (PC1/CC1 being at index 0).
*/
XEN_GUEST_HANDLE_64(uint64) pc;
XEN_GUEST_HANDLE_64(uint64) cc;
};
struct xen_sysctl_get_pmstat {
@ -457,61 +464,76 @@ struct xen_sysctl_lockprof_op {
typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
/* XEN_SYSCTL_topologyinfo */
#define INVALID_TOPOLOGY_ID (~0U)
struct xen_sysctl_topologyinfo {
/*
* IN: maximum addressable entry in the caller-provided arrays.
* OUT: largest cpu identifier in the system.
* If OUT is greater than IN then the arrays are truncated!
* If OUT is leass than IN then the array tails are not written by sysctl.
*/
uint32_t max_cpu_index;
/* XEN_SYSCTL_cputopoinfo */
#define XEN_INVALID_CORE_ID (~0U)
#define XEN_INVALID_SOCKET_ID (~0U)
#define XEN_INVALID_NODE_ID (~0U)
/*
* If not NULL, these arrays are filled with core/socket/node identifier
* for each cpu.
* If a cpu has no core/socket/node information (e.g., cpu not present)
* then the sentinel value ~0u is written to each array.
* The number of array elements written by the sysctl is:
* min(@max_cpu_index_IN,@max_cpu_index_OUT)+1
*/
XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
struct xen_sysctl_cputopo {
uint32_t core;
uint32_t socket;
uint32_t node;
};
typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
typedef struct xen_sysctl_cputopo xen_sysctl_cputopo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopo_t);
/*
* IN:
* - a NULL 'cputopo' handle is a request for maximun 'num_cpus'.
* - otherwise it's the number of entries in 'cputopo'
*
* OUT:
* - If 'num_cpus' is less than the number Xen wants to write but the handle
* handle is not a NULL one, partial data gets returned and 'num_cpus' gets
* updated to reflect the intended number.
* - Otherwise, 'num_cpus' shall indicate the number of entries written, which
* may be less than the input value.
*/
struct xen_sysctl_cputopoinfo {
uint32_t num_cpus;
XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
};
typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
/* XEN_SYSCTL_numainfo */
#define INVALID_NUMAINFO_ID (~0U)
#define XEN_INVALID_MEM_SZ (~0U)
#define XEN_INVALID_NODE_DIST (~0U)
struct xen_sysctl_meminfo {
uint64_t memsize;
uint64_t memfree;
};
typedef struct xen_sysctl_meminfo xen_sysctl_meminfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_meminfo_t);
/*
* IN:
* - Both 'meminfo' and 'distance' handles being null is a request
* for maximum value of 'num_nodes'.
* - Otherwise it's the number of entries in 'meminfo' and square root
* of number of entries in 'distance' (when corresponding handle is
* non-null)
*
* OUT:
* - If 'num_nodes' is less than the number Xen wants to write but either
* handle is not a NULL one, partial data gets returned and 'num_nodes'
* gets updated to reflect the intended number.
* - Otherwise, 'num_nodes' shall indicate the number of entries written, which
* may be less than the input value.
*/
struct xen_sysctl_numainfo {
/*
* IN: maximum addressable entry in the caller-provided arrays.
* OUT: largest node identifier in the system.
* If OUT is greater than IN then the arrays are truncated!
*/
uint32_t max_node_index;
uint32_t num_nodes;
/* NB. Entries are 0 if node is not present. */
XEN_GUEST_HANDLE_64(uint64) node_to_memsize;
XEN_GUEST_HANDLE_64(uint64) node_to_memfree;
XEN_GUEST_HANDLE_64(xen_sysctl_meminfo_t) meminfo;
/*
* Array, of size (max_node_index+1)^2, listing memory access distances
* between nodes. If an entry has no node distance information (e.g., node
* not present) then the value ~0u is written.
*
* Note that the array rows must be indexed by multiplying by the minimum
* of the caller-provided max_node_index and the returned value of
* max_node_index. That is, if the largest node index in the system is
* smaller than the caller can handle, a smaller 2-d array is constructed
* within the space provided by the caller. When this occurs, trailing
* space provided by the caller is not modified. If the largest node index
* in the system is larger than the caller can handle, then a 2-d array of
* the maximum size handleable by the caller is constructed.
* Distance between nodes 'i' and 'j' is stored in index 'i*N + j',
* where N is the number of nodes that will be returned in 'num_nodes'
* (i.e. not 'num_nodes' provided by the caller)
*/
XEN_GUEST_HANDLE_64(uint32) node_to_node_distance;
XEN_GUEST_HANDLE_64(uint32) distance;
};
typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
@ -532,7 +554,7 @@ struct xen_sysctl_cpupool_op {
uint32_t domid; /* IN: M */
uint32_t cpu; /* IN: AR */
uint32_t n_dom; /* OUT: I */
struct xenctl_cpumap cpumap; /* OUT: IF */
struct xenctl_bitmap cpumap; /* OUT: IF */
};
typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
@ -596,6 +618,152 @@ struct xen_sysctl_scheduler_op {
typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
/* XEN_SYSCTL_coverage_op */
/*
* Get total size of information, to help allocate
* the buffer. The pointer points to a 32 bit value.
*/
#define XEN_SYSCTL_COVERAGE_get_total_size 0
/*
* Read coverage information in a single run
* You must use a tool to split them.
*/
#define XEN_SYSCTL_COVERAGE_read 1
/*
* Reset all the coverage counters to 0
* No parameters.
*/
#define XEN_SYSCTL_COVERAGE_reset 2
/*
* Like XEN_SYSCTL_COVERAGE_read but reset also
* counters to 0 in a single call.
*/
#define XEN_SYSCTL_COVERAGE_read_and_reset 3
struct xen_sysctl_coverage_op {
uint32_t cmd; /* XEN_SYSCTL_COVERAGE_* */
union {
uint32_t total_size; /* OUT */
XEN_GUEST_HANDLE_64(uint8) raw_info; /* OUT */
} u;
};
typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
#define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
#define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
/* The L3 cache size is returned in KB unit */
#define XEN_SYSCTL_PSR_CMT_get_l3_cache_size 2
#define XEN_SYSCTL_PSR_CMT_enabled 3
#define XEN_SYSCTL_PSR_CMT_get_l3_event_mask 4
struct xen_sysctl_psr_cmt_op {
uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CMT_* */
uint32_t flags; /* padding variable, may be extended for future use */
union {
uint64_t data; /* OUT */
struct {
uint32_t cpu; /* IN */
uint32_t rsvd;
} l3_cache;
} u;
};
typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
/* XEN_SYSCTL_pcitopoinfo */
#define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
struct xen_sysctl_pcitopoinfo {
/*
* IN: Number of elements in 'pcitopo' and 'nodes' arrays.
* OUT: Number of processed elements of those arrays.
*/
uint32_t num_devs;
/* IN: list of devices for which node IDs are requested. */
XEN_GUEST_HANDLE_64(physdev_pci_device_t) devs;
/*
* OUT: node identifier for each device.
* If information for a particular device is not available then
* corresponding entry will be set to XEN_INVALID_NODE_ID. If
* device is not known to the hypervisor then XEN_INVALID_DEV
* will be provided.
*/
XEN_GUEST_HANDLE_64(uint32) nodes;
};
typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
#define XEN_SYSCTL_PSR_CAT_get_l3_info 0
struct xen_sysctl_psr_cat_op {
uint32_t cmd; /* IN: XEN_SYSCTL_PSR_CAT_* */
uint32_t target; /* IN */
union {
struct {
uint32_t cbm_len; /* OUT: CBM length */
uint32_t cos_max; /* OUT: Maximum COS */
} l3_info;
} u;
};
typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
#define XEN_SYSCTL_TMEM_OP_THAW 0
#define XEN_SYSCTL_TMEM_OP_FREEZE 1
#define XEN_SYSCTL_TMEM_OP_FLUSH 2
#define XEN_SYSCTL_TMEM_OP_DESTROY 3
#define XEN_SYSCTL_TMEM_OP_LIST 4
#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT 5
#define XEN_SYSCTL_TMEM_OP_SET_CAP 6
#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS 7
#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB 8
#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN 10
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION 11
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS 12
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP 14
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS 15
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS 16
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES 17
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID 18
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE 19
#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV 20
#define XEN_SYSCTL_TMEM_OP_SAVE_END 21
#define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN 30
#define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE 32
#define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE 33
/*
* XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_[PAGE|INV] override the 'buf' in
* xen_sysctl_tmem_op with this structure - sometimes with an extra
* page tackled on.
*/
struct tmem_handle {
uint32_t pool_id;
uint32_t index;
xen_tmem_oid_t oid;
};
struct xen_sysctl_tmem_op {
uint32_t cmd; /* IN: XEN_SYSCTL_TMEM_OP_* . */
int32_t pool_id; /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
uint32_t cli_id; /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
for all others can be the domain id or
XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
uint32_t arg1; /* IN: If not applicable to command use 0. */
uint32_t arg2; /* IN: If not applicable to command use 0. */
uint32_t pad; /* Padding so structure is the same under 32 and 64. */
xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. */
};
typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
struct xen_sysctl {
uint32_t cmd;
#define XEN_SYSCTL_readconsole 1
@ -612,16 +780,22 @@ struct xen_sysctl {
#define XEN_SYSCTL_pm_op 12
#define XEN_SYSCTL_page_offline_op 14
#define XEN_SYSCTL_lockprof_op 15
#define XEN_SYSCTL_topologyinfo 16
#define XEN_SYSCTL_cputopoinfo 16
#define XEN_SYSCTL_numainfo 17
#define XEN_SYSCTL_cpupool_op 18
#define XEN_SYSCTL_scheduler_op 19
#define XEN_SYSCTL_coverage_op 20
#define XEN_SYSCTL_psr_cmt_op 21
#define XEN_SYSCTL_pcitopoinfo 22
#define XEN_SYSCTL_psr_cat_op 23
#define XEN_SYSCTL_tmem_op 24
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_tbuf_op tbuf_op;
struct xen_sysctl_physinfo physinfo;
struct xen_sysctl_topologyinfo topologyinfo;
struct xen_sysctl_cputopoinfo cputopoinfo;
struct xen_sysctl_pcitopoinfo pcitopoinfo;
struct xen_sysctl_numainfo numainfo;
struct xen_sysctl_sched_id sched_id;
struct xen_sysctl_perfc_op perfc_op;
@ -636,6 +810,10 @@ struct xen_sysctl {
struct xen_sysctl_lockprof_op lockprof_op;
struct xen_sysctl_cpupool_op cpupool_op;
struct xen_sysctl_scheduler_op scheduler_op;
struct xen_sysctl_coverage_op coverage_op;
struct xen_sysctl_psr_cmt_op psr_cmt_op;
struct xen_sysctl_psr_cat_op psr_cat_op;
struct xen_sysctl_tmem_op tmem_op;
uint8_t pad[128];
} u;
};
@ -647,7 +825,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -33,48 +33,28 @@
#define TMEM_SPEC_VERSION 1
/* Commands to HYPERVISOR_tmem_op() */
#define TMEM_CONTROL 0
#ifdef __XEN__
#define TMEM_CONTROL 0 /* Now called XEN_SYSCTL_tmem_op */
#else
#undef TMEM_CONTROL
#endif
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
#define TMEM_NEW_PAGE 3
#define TMEM_PUT_PAGE 4
#define TMEM_GET_PAGE 5
#define TMEM_FLUSH_PAGE 6
#define TMEM_FLUSH_OBJECT 7
#if __XEN_INTERFACE_VERSION__ < 0x00040400
#define TMEM_NEW_PAGE 3
#define TMEM_READ 8
#define TMEM_WRITE 9
#define TMEM_XCHG 10
#endif
/* Privileged commands to HYPERVISOR_tmem_op() */
#define TMEM_AUTH 101
#define TMEM_AUTH 101
#define TMEM_RESTORE_NEW 102
/* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */
#define TMEMC_THAW 0
#define TMEMC_FREEZE 1
#define TMEMC_FLUSH 2
#define TMEMC_DESTROY 3
#define TMEMC_LIST 4
#define TMEMC_SET_WEIGHT 5
#define TMEMC_SET_CAP 6
#define TMEMC_SET_COMPRESS 7
#define TMEMC_QUERY_FREEABLE_MB 8
#define TMEMC_SAVE_BEGIN 10
#define TMEMC_SAVE_GET_VERSION 11
#define TMEMC_SAVE_GET_MAXPOOLS 12
#define TMEMC_SAVE_GET_CLIENT_WEIGHT 13
#define TMEMC_SAVE_GET_CLIENT_CAP 14
#define TMEMC_SAVE_GET_CLIENT_FLAGS 15
#define TMEMC_SAVE_GET_POOL_FLAGS 16
#define TMEMC_SAVE_GET_POOL_NPAGES 17
#define TMEMC_SAVE_GET_POOL_UUID 18
#define TMEMC_SAVE_GET_NEXT_PAGE 19
#define TMEMC_SAVE_GET_NEXT_INV 20
#define TMEMC_SAVE_END 21
#define TMEMC_RESTORE_BEGIN 30
#define TMEMC_RESTORE_PUT_PAGE 32
#define TMEMC_RESTORE_FLUSH_PAGE 33
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
@ -93,9 +73,16 @@
#define EFROZEN 1000
#define EEMPTY 1001
struct xen_tmem_oid {
uint64_t oid[3];
};
typedef struct xen_tmem_oid xen_tmem_oid_t;
DEFINE_XEN_GUEST_HANDLE(xen_tmem_oid_t);
#ifndef __ASSEMBLY__
#if __XEN_INTERFACE_VERSION__ < 0x00040400
typedef xen_pfn_t tmem_cli_mfn_t;
#endif
typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t;
struct tmem_op {
uint32_t cmd;
@ -106,33 +93,22 @@ struct tmem_op {
uint32_t flags;
uint32_t arg1;
} creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
struct {
uint32_t subop;
uint32_t cli_id;
uint32_t arg1;
uint32_t arg2;
uint64_t oid[3];
tmem_cli_va_t buf;
} ctrl; /* for cmd == TMEM_CONTROL */
struct {
#if __XEN_INTERFACE_VERSION__ < 0x00040600
uint64_t oid[3];
#else
xen_tmem_oid_t oid;
#endif
uint32_t index;
uint32_t tmem_offset;
uint32_t pfn_offset;
uint32_t len;
tmem_cli_mfn_t cmfn; /* client machine page frame */
xen_pfn_t cmfn; /* client machine page frame */
} gen; /* for all other cmd ("generic") */
} u;
};
typedef struct tmem_op tmem_op_t;
DEFINE_XEN_GUEST_HANDLE(tmem_op_t);
struct tmem_handle {
uint32_t pool_id;
uint32_t index;
uint64_t oid[3];
};
#endif
#endif /* __XEN_PUBLIC_TMEM_H__ */
@ -140,7 +116,7 @@ struct tmem_handle {
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -50,13 +50,41 @@
#define TRC_SUBCLS_SHIFT 12
/* trace subclasses for SVM */
#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
#define TRC_HVM_EMUL 0x00084000 /* emulated devices */
#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */
#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
/*
* The highest 3 bits of the last 12 bits of TRC_SCHED_CLASS above are
* reserved for encoding what scheduler produced the information. The
* actual event is encoded in the last 9 bits.
*
* This means we have 8 scheduling IDs available (which means at most 8
* schedulers generating events) and, in each scheduler, up to 512
* different events.
*/
#define TRC_SCHED_ID_BITS 3
#define TRC_SCHED_ID_SHIFT (TRC_SUBCLS_SHIFT - TRC_SCHED_ID_BITS)
#define TRC_SCHED_ID_MASK (((1UL<<TRC_SCHED_ID_BITS) - 1) << TRC_SCHED_ID_SHIFT)
#define TRC_SCHED_EVT_MASK (~(TRC_SCHED_ID_MASK))
/* Per-scheduler IDs, to identify scheduler specific events */
#define TRC_SCHED_CSCHED 0
#define TRC_SCHED_CSCHED2 1
/* #define XEN_SCHEDULER_SEDF 2 (Removed) */
#define TRC_SCHED_ARINC653 3
#define TRC_SCHED_RTDS 4
/* Per-scheduler tracing */
#define TRC_SCHED_CLASS_EVT(_c, _e) \
( ( TRC_SCHED_CLASS | \
((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
(_e & TRC_SCHED_EVT_MASK) )
/* Trace classes for Hardware */
#define TRC_HW_PM 0x00801000 /* Power management traces */
#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
@ -94,20 +122,51 @@
#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17)
#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
#define TRC_PV_ENTRY 0x00201000 /* Hypervisor entry points for PV guests. */
#define TRC_PV_SUBCALL 0x00202000 /* Sub-call in a multicall hypercall */
#define TRC_PV_HYPERCALL (TRC_PV + 1)
#define TRC_PV_TRAP (TRC_PV + 3)
#define TRC_PV_PAGE_FAULT (TRC_PV + 4)
#define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5)
#define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6)
#define TRC_PV_EMULATE_4GB (TRC_PV + 7)
#define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8)
#define TRC_PV_PAGING_FIXUP (TRC_PV + 9)
#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10)
#define TRC_PV_PTWR_EMULATION (TRC_PV + 11)
#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12)
/* Indicates that addresses in trace record are 64 bits */
#define TRC_64_FLAG (0x100)
#define TRC_PV_HYPERCALL (TRC_PV_ENTRY + 1)
#define TRC_PV_TRAP (TRC_PV_ENTRY + 3)
#define TRC_PV_PAGE_FAULT (TRC_PV_ENTRY + 4)
#define TRC_PV_FORCED_INVALID_OP (TRC_PV_ENTRY + 5)
#define TRC_PV_EMULATE_PRIVOP (TRC_PV_ENTRY + 6)
#define TRC_PV_EMULATE_4GB (TRC_PV_ENTRY + 7)
#define TRC_PV_MATH_STATE_RESTORE (TRC_PV_ENTRY + 8)
#define TRC_PV_PAGING_FIXUP (TRC_PV_ENTRY + 9)
#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV_ENTRY + 10)
#define TRC_PV_PTWR_EMULATION (TRC_PV_ENTRY + 11)
#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV_ENTRY + 12)
#define TRC_PV_HYPERCALL_V2 (TRC_PV_ENTRY + 13)
#define TRC_PV_HYPERCALL_SUBCALL (TRC_PV_SUBCALL + 14)
/*
* TRC_PV_HYPERCALL_V2 format
*
* Only some of the hypercall argument are recorded. Bit fields A0 to
* A5 in the first extra word are set if the argument is present and
* the arguments themselves are packed sequentially in the following
* words.
*
* The TRC_64_FLAG bit is not set for these events (even if there are
* 64-bit arguments in the record).
*
* Word
* 0 bit 31 30|29 28|27 26|25 24|23 22|21 20|19 ... 0
* A5 |A4 |A3 |A2 |A1 |A0 |Hypercall op
* 1 First 32 bit (or low word of first 64 bit) arg in record
* 2 Second 32 bit (or high word of first 64 bit) arg in record
* ...
*
* A0-A5 bitfield values:
*
* 00b Argument not present
* 01b 32-bit argument present
* 10b 64-bit argument present
* 11b Reserved
*/
#define TRC_PV_HYPERCALL_V2_ARG_32(i) (0x1 << (20 + 2*(i)))
#define TRC_PV_HYPERCALL_V2_ARG_64(i) (0x2 << (20 + 2*(i)))
#define TRC_PV_HYPERCALL_V2_ARG_MASK (0xfff00000)
#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1)
#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2)
@ -172,6 +231,25 @@
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
/* Trace events for emulated devices */
#define TRC_HVM_EMUL_HPET_START_TIMER (TRC_HVM_EMUL + 0x1)
#define TRC_HVM_EMUL_PIT_START_TIMER (TRC_HVM_EMUL + 0x2)
#define TRC_HVM_EMUL_RTC_START_TIMER (TRC_HVM_EMUL + 0x3)
#define TRC_HVM_EMUL_LAPIC_START_TIMER (TRC_HVM_EMUL + 0x4)
#define TRC_HVM_EMUL_HPET_STOP_TIMER (TRC_HVM_EMUL + 0x5)
#define TRC_HVM_EMUL_PIT_STOP_TIMER (TRC_HVM_EMUL + 0x6)
#define TRC_HVM_EMUL_RTC_STOP_TIMER (TRC_HVM_EMUL + 0x7)
#define TRC_HVM_EMUL_LAPIC_STOP_TIMER (TRC_HVM_EMUL + 0x8)
#define TRC_HVM_EMUL_PIT_TIMER_CB (TRC_HVM_EMUL + 0x9)
#define TRC_HVM_EMUL_LAPIC_TIMER_CB (TRC_HVM_EMUL + 0xA)
#define TRC_HVM_EMUL_PIC_INT_OUTPUT (TRC_HVM_EMUL + 0xB)
#define TRC_HVM_EMUL_PIC_KICK (TRC_HVM_EMUL + 0xC)
#define TRC_HVM_EMUL_PIC_INTACK (TRC_HVM_EMUL + 0xD)
#define TRC_HVM_EMUL_PIC_POSEDGE (TRC_HVM_EMUL + 0xE)
#define TRC_HVM_EMUL_PIC_NEGEDGE (TRC_HVM_EMUL + 0xF)
#define TRC_HVM_EMUL_PIC_PEND_IRQ_CALL (TRC_HVM_EMUL + 0x10)
#define TRC_HVM_EMUL_LAPIC_PIC_INTR (TRC_HVM_EMUL + 0x11)
/* trace events for per class */
#define TRC_PM_FREQ_CHANGE (TRC_HW_PM + 0x01)
#define TRC_PM_IDLE_ENTRY (TRC_HW_PM + 0x02)
@ -187,6 +265,14 @@
#define TRC_HW_IRQ_UNMAPPED_VECTOR (TRC_HW_IRQ + 0x7)
#define TRC_HW_IRQ_HANDLED (TRC_HW_IRQ + 0x8)
/*
* Event Flags
*
* Some events (e.g, TRC_PV_TRAP and TRC_HVM_IOMEM_READ) have multiple
* record formats. These event flags distinguish between the
* different formats.
*/
#define TRC_64_FLAG 0x100 /* Addresses are 64 bits (instead of 32 bits) */
/* This structure represents a single trace buffer record. */
struct t_rec {
@ -237,7 +323,7 @@ struct t_info {
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -31,7 +31,7 @@
/*
* Prototype for this hypercall is:
* int vcpu_op(int cmd, int vcpuid, void *extra_args)
* long vcpu_op(int cmd, unsigned int vcpuid, void *extra_args)
* @cmd == VCPUOP_??? (VCPU operation).
* @vcpuid == VCPU to operate on.
* @extra_args == Operation-specific extra arguments (NULL if none).
@ -232,7 +232,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -28,6 +28,8 @@
#ifndef __XEN_PUBLIC_VERSION_H__
#define __XEN_PUBLIC_VERSION_H__
#include "xen.h"
/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
/* arg == NULL; returns major:minor (16:16). */
@ -58,7 +60,7 @@ typedef char xen_changeset_info_t[64];
#define XENVER_platform_parameters 5
struct xen_platform_parameters {
unsigned long virt_start;
xen_ulong_t virt_start;
};
typedef struct xen_platform_parameters xen_platform_parameters_t;
@ -86,7 +88,7 @@ typedef char xen_commandline_t[1024];
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -0,0 +1,269 @@
/******************************************************************************
* vm_event.h
*
* Memory event common structures.
*
* Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _XEN_PUBLIC_VM_EVENT_H
#define _XEN_PUBLIC_VM_EVENT_H
#include "xen.h"
#define VM_EVENT_INTERFACE_VERSION 0x00000001
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#include "io/ring.h"
/*
* Memory event flags
*/
/*
* VCPU_PAUSED in a request signals that the vCPU triggering the event has been
* paused
* VCPU_PAUSED in a response signals to unpause the vCPU
*/
#define VM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
/* Flags to aid debugging vm_event */
#define VM_EVENT_FLAG_FOREIGN (1 << 1)
/*
* The following flags can be set in response to a mem_access event.
*
* Emulate the fault-causing instruction (if set in the event response flags).
* This will allow the guest to continue execution without lifting the page
* access restrictions.
*/
#define VM_EVENT_FLAG_EMULATE (1 << 2)
/*
* Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
* potentially having side effects (like memory mapped or port I/O) disabled.
*/
#define VM_EVENT_FLAG_EMULATE_NOWRITE (1 << 3)
/*
* Toggle singlestepping on vm_event response.
* Requires the vCPU to be paused already (synchronous events only).
*/
#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4)
/*
* Data is being sent back to the hypervisor in the event response, to be
* returned by the read function when emulating an instruction.
* This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
* and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
* (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
* VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
*/
#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
/*
* Deny completion of the operation that triggered the event.
* Currently only useful for MSR, CR0, CR3 and CR4 write events.
*/
#define VM_EVENT_FLAG_DENY (1 << 6)
/*
* This flag can be set in a request or a response
*
* On a request, indicates that the event occurred in the alternate p2m specified by
* the altp2m_idx request field.
*
* On a response, indicates that the VCPU should resume in the alternate p2m specified
* by the altp2m_idx response field if possible.
*/
#define VM_EVENT_FLAG_ALTERNATE_P2M (1 << 7)
/*
* Reasons for the vm event request
*/
/* Default case */
#define VM_EVENT_REASON_UNKNOWN 0
/* Memory access violation */
#define VM_EVENT_REASON_MEM_ACCESS 1
/* Memory sharing event */
#define VM_EVENT_REASON_MEM_SHARING 2
/* Memory paging event */
#define VM_EVENT_REASON_MEM_PAGING 3
/* A control register was updated */
#define VM_EVENT_REASON_WRITE_CTRLREG 4
/* An MSR was updated. */
#define VM_EVENT_REASON_MOV_TO_MSR 5
/* Debug operation executed (e.g. int3) */
#define VM_EVENT_REASON_SOFTWARE_BREAKPOINT 6
/* Single-step (e.g. MTF) */
#define VM_EVENT_REASON_SINGLESTEP 7
/* An event has been requested via HVMOP_guest_request_vm_event. */
#define VM_EVENT_REASON_GUEST_REQUEST 8
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0
#define VM_EVENT_X86_CR3 1
#define VM_EVENT_X86_CR4 2
#define VM_EVENT_X86_XCR0 3
/*
* Using a custom struct (not hvm_hw_cpu) so as to not fill
* the vm_event ring buffer too quickly.
*/
struct vm_event_regs_x86 {
uint64_t rax;
uint64_t rcx;
uint64_t rdx;
uint64_t rbx;
uint64_t rsp;
uint64_t rbp;
uint64_t rsi;
uint64_t rdi;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rflags;
uint64_t dr7;
uint64_t rip;
uint64_t cr0;
uint64_t cr2;
uint64_t cr3;
uint64_t cr4;
uint64_t sysenter_cs;
uint64_t sysenter_esp;
uint64_t sysenter_eip;
uint64_t msr_efer;
uint64_t msr_star;
uint64_t msr_lstar;
uint64_t fs_base;
uint64_t gs_base;
uint32_t cs_arbytes;
uint32_t _pad;
};
/*
* mem_access flag definitions
*
* These flags are set only as part of a mem_event request.
*
* R/W/X: Defines the type of violation that has triggered the event
* Multiple types can be set in a single violation!
* GLA_VALID: If the gla field holds a guest VA associated with the event
* FAULT_WITH_GLA: If the violation was triggered by accessing gla
* FAULT_IN_GPT: If the violation was triggered during translating gla
*/
#define MEM_ACCESS_R (1 << 0)
#define MEM_ACCESS_W (1 << 1)
#define MEM_ACCESS_X (1 << 2)
#define MEM_ACCESS_RWX (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
#define MEM_ACCESS_RW (MEM_ACCESS_R | MEM_ACCESS_W)
#define MEM_ACCESS_RX (MEM_ACCESS_R | MEM_ACCESS_X)
#define MEM_ACCESS_WX (MEM_ACCESS_W | MEM_ACCESS_X)
#define MEM_ACCESS_GLA_VALID (1 << 3)
#define MEM_ACCESS_FAULT_WITH_GLA (1 << 4)
#define MEM_ACCESS_FAULT_IN_GPT (1 << 5)
struct vm_event_mem_access {
uint64_t gfn;
uint64_t offset;
uint64_t gla; /* if flags has MEM_ACCESS_GLA_VALID set */
uint32_t flags; /* MEM_ACCESS_* */
uint32_t _pad;
};
struct vm_event_write_ctrlreg {
uint32_t index;
uint32_t _pad;
uint64_t new_value;
uint64_t old_value;
};
struct vm_event_debug {
uint64_t gfn;
};
struct vm_event_mov_to_msr {
uint64_t msr;
uint64_t value;
};
#define MEM_PAGING_DROP_PAGE (1 << 0)
#define MEM_PAGING_EVICT_FAIL (1 << 1)
struct vm_event_paging {
uint64_t gfn;
uint32_t p2mt;
uint32_t flags;
};
struct vm_event_sharing {
uint64_t gfn;
uint32_t p2mt;
uint32_t _pad;
};
struct vm_event_emul_read_data {
uint32_t size;
/* The struct is used in a union with vm_event_regs_x86. */
uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
};
typedef struct vm_event_st {
uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
uint32_t flags; /* VM_EVENT_FLAG_* */
uint32_t reason; /* VM_EVENT_REASON_* */
uint32_t vcpu_id;
uint16_t altp2m_idx; /* may be used during request and response */
uint16_t _pad[3];
union {
struct vm_event_paging mem_paging;
struct vm_event_sharing mem_sharing;
struct vm_event_mem_access mem_access;
struct vm_event_write_ctrlreg write_ctrlreg;
struct vm_event_mov_to_msr mov_to_msr;
struct vm_event_debug software_breakpoint;
struct vm_event_debug singlestep;
} u;
union {
union {
struct vm_event_regs_x86 x86;
} regs;
struct vm_event_emul_read_data emul_read_data;
} data;
} vm_event_request_t, vm_event_response_t;
DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#endif /* _XEN_PUBLIC_VM_EVENT_H */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/

View File

@ -27,14 +27,17 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040200
#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040600
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
#elif !defined(__XEN_INTERFACE_VERSION__)
/* Guests which do not specify a version get the legacy interface. */
#define __XEN_INTERFACE_VERSION__ 0x00000000
/*
* The interface version is not set if and only if xen/xen-os.h is not
* included.
*/
#error "Please include xen/xen-os.h"
#endif
#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__

View File

@ -31,9 +31,7 @@
#if defined(__i386__) || defined(__x86_64__)
#include "arch-x86/xen.h"
#elif defined(__ia64__)
#include "arch-ia64.h"
#elif defined(__arm__)
#elif defined(__arm__) || defined (__aarch64__)
#include "arch-arm.h"
#else
#error "Unsupported architecture"
@ -45,8 +43,10 @@ DEFINE_XEN_GUEST_HANDLE(char);
__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
DEFINE_XEN_GUEST_HANDLE(int);
__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
#if __XEN_INTERFACE_VERSION__ < 0x00040300
DEFINE_XEN_GUEST_HANDLE(long);
__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
#endif
DEFINE_XEN_GUEST_HANDLE(void);
DEFINE_XEN_GUEST_HANDLE(uint64_t);
@ -101,6 +101,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
#define __HYPERVISOR_xenpmu_op 40
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
@ -160,6 +161,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
#define VIRQ_XENPMU 13 /* V. PMC interrupt */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@ -277,15 +279,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
* refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
* pages instead of using MTRRs.
*
* The PAT MSR is as follow (it is a 64-bit value, each entry is 8 bits):
* PAT4 PAT0
* +---+----+----+----+-----+----+----+
* WC | WC | WB | UC | UC- | WC | WB | <= Linux
* +---+----+----+----+-----+----+----+
* WC | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
* +---+----+----+----+-----+----+----+
* WC | WP | WC | UC | UC- | WT | WB | <= Xen
* +---+----+----+----+-----+----+----+
* The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
* PAT4 PAT0
* +-----+-----+----+----+----+-----+----+----+
* | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
* +-----+-----+----+----+----+-----+----+----+
* | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
* +-----+-----+----+----+----+-----+----+----+
* | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
* +-----+-----+----+----+----+-----+----+----+
*
* The lookup of this index table translates to looking up
* Bit 7, Bit 4, and Bit 3 of val entry:
@ -319,48 +321,54 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
/*
* MMU EXTENDED OPERATIONS
*
* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
*
* ` enum neg_errnoval
* ` HYPERVISOR_mmuext_op(mmuext_op_t uops[],
* ` unsigned int count,
* ` unsigned int *pdone,
* ` unsigned int foreigndom)
*/
/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
* A foreigndom (FD) can be specified (or DOMID_SELF for none).
* Where the FD has some effect, it is described below.
*
*
* cmd: MMUEXT_(UN)PIN_*_TABLE
* mfn: Machine frame number to be (un)pinned as a p.t. page.
* The frame must belong to the FD, if one is specified.
*
*
* cmd: MMUEXT_NEW_BASEPTR
* mfn: Machine frame number of new page-table base to install in MMU.
*
*
* cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
* mfn: Machine frame number of new page-table base to install in MMU
* when in user space.
*
*
* cmd: MMUEXT_TLB_FLUSH_LOCAL
* No additional arguments. Flushes local TLB.
*
*
* cmd: MMUEXT_INVLPG_LOCAL
* linear_addr: Linear address to be flushed from the local TLB.
*
*
* cmd: MMUEXT_TLB_FLUSH_MULTI
* vcpumask: Pointer to bitmap of VCPUs to be flushed.
*
*
* cmd: MMUEXT_INVLPG_MULTI
* linear_addr: Linear address to be flushed.
* vcpumask: Pointer to bitmap of VCPUs to be flushed.
*
*
* cmd: MMUEXT_TLB_FLUSH_ALL
* No additional arguments. Flushes all VCPUs' TLBs.
*
*
* cmd: MMUEXT_INVLPG_ALL
* linear_addr: Linear address to be flushed from all VCPUs' TLBs.
*
*
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
*
* cmd: MMUEXT_FLUSH_CACHE_GLOBAL
* No additional arguments. Writes back and flushes cache contents
* on all CPUs in the system.
*
*
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
@ -375,6 +383,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
* cmd: MMUEXT_[UN]MARK_SUPER
* mfn: Machine frame number of head of superpage to be [un]marked.
*/
/* ` enum mmuext_cmd { */
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
#define MMUEXT_PIN_L3_TABLE 2
@ -395,10 +404,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
#define MMUEXT_FLUSH_CACHE_GLOBAL 18
#define MMUEXT_MARK_SUPER 19
#define MMUEXT_UNMARK_SUPER 20
/* ` } */
#ifndef __ASSEMBLY__
struct mmuext_op {
unsigned int cmd;
unsigned int cmd; /* => enum mmuext_cmd */
union {
/* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
* CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
@ -423,9 +433,24 @@ typedef struct mmuext_op mmuext_op_t;
DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
#endif
/*
* ` enum neg_errnoval
* ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val,
* ` enum uvm_flags flags)
* `
* ` enum neg_errnoval
* ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val,
* ` enum uvm_flags flags,
* ` domid_t domid)
* `
* ` @va: The virtual address whose mapping we want to change
* ` @val: The new page table entry, must contain a machine address
* ` @flags: Control TLB flushes
*/
/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
/* ` enum uvm_flags { */
#define UVMF_NONE (0UL<<0) /* No flushing at all. */
#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
@ -433,6 +458,7 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
/* ` } */
/*
* Commands to HYPERVISOR_console_io().
@ -462,7 +488,21 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
/*
* x86/64 guests: strictly hide M2P from user mode.
* This allows the guest to control respective hypervisor behavior:
* - when not set, L4 tables get created with the respective slot blank,
* and whenever the L4 table gets used as a kernel one the missing
* mapping gets inserted,
* - when set, L4 tables get created with the respective slot initialized
* as before, and whenever the L4 table gets used as a user one the
* mapping gets zapped.
*/
#define VMASST_TYPE_m2p_strict 32
#if __XEN_INTERFACE_VERSION__ < 0x00040600
#define MAX_VMASST_TYPE 3
#endif
#ifndef __ASSEMBLY__
@ -515,21 +555,28 @@ typedef struct mmu_update mmu_update_t;
DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
/*
* Send an array of these to HYPERVISOR_multicall().
* NB. The fields are natural register size for this architecture.
* ` enum neg_errnoval
* ` HYPERVISOR_multicall(multicall_entry_t call_list[],
* ` uint32_t nr_calls);
*
* NB. The fields are logically the natural register size for this
* architecture. In cases where xen_ulong_t is larger than this then
* any unused bits in the upper portion must be zero.
*/
struct multicall_entry {
unsigned long op, result;
unsigned long args[6];
xen_ulong_t op, result;
xen_ulong_t args[6];
};
typedef struct multicall_entry multicall_entry_t;
DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
#if __XEN_INTERFACE_VERSION__ < 0x00040400
/*
* Event channel endpoints per domain:
* Event channel endpoints per domain (when using the 2-level ABI):
* 1024 if a long is 32 bits; 4096 if a long is 64 bits.
*/
#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS
#endif
struct vcpu_time_info {
/*
@ -585,8 +632,12 @@ struct vcpu_info {
* to block: this avoids wakeup-waiting races.
*/
uint8_t evtchn_upcall_pending;
#ifdef XEN_HAVE_PV_UPCALL_MASK
uint8_t evtchn_upcall_mask;
unsigned long evtchn_pending_sel;
#else /* XEN_HAVE_PV_UPCALL_MASK */
uint8_t pad0;
#endif /* XEN_HAVE_PV_UPCALL_MASK */
xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct vcpu_time_info time;
}; /* 64 bytes (x86) */
@ -595,6 +646,7 @@ typedef struct vcpu_info vcpu_info_t;
#endif
/*
* `incontents 200 startofday_shared Start-of-day shared data structure
* Xen/kernel shared data -- pointer provided in start_info.
*
* This structure is defined to be both smaller than a page, and the
@ -636,8 +688,8 @@ struct shared_info {
* per-vcpu selector word to be set. Each bit in the selector covers a
* 'C long' in the PENDING bitfield array.
*/
unsigned long evtchn_pending[sizeof(unsigned long) * 8];
unsigned long evtchn_mask[sizeof(unsigned long) * 8];
xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
@ -646,6 +698,12 @@ struct shared_info {
uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
#if !defined(__i386__)
uint32_t wc_sec_hi;
# define xen_wc_sec_hi wc_sec_hi
#elif !defined(__XEN__) && !defined(__XEN_TOOLS__)
# define xen_wc_sec_hi arch.wc_sec_hi
#endif
struct arch_shared_info arch;
@ -655,30 +713,43 @@ typedef struct shared_info shared_info_t;
#endif
/*
* Start-of-day memory layout:
* `incontents 200 startofday Start-of-day memory layout
*
* 1. The domain is started within contiguous virtual-memory region.
* 2. The contiguous region ends on an aligned 4MB boundary.
* 3. This the order of bootstrap elements in the initial virtual region:
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
* (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
* (unless relocated due to XEN_ELFNOTE_INIT_P2M)
* d. start_info_t structure [register ESI (x86)]
* e. bootstrap page tables [pt_base, CR3 (x86)]
* f. bootstrap stack [register ESP (x86)]
* in case of dom0 this page contains the console info, too
* e. unless dom0: xenstore ring page
* f. unless dom0: console ring page
* g. bootstrap page tables [pt_base and CR3 (x86)]
* h. bootstrap stack [register ESP (x86)]
* 4. Bootstrap elements are packed together, but each is 4kB-aligned.
* 5. The initial ram disk may be omitted.
* 6. The list of page frames forms a contiguous 'pseudo-physical' memory
* 5. The list of page frames forms a contiguous 'pseudo-physical' memory
* layout for the domain. In particular, the bootstrap virtual-memory
* region is a 1:1 mapping to the first section of the pseudo-physical map.
* 7. All bootstrap elements are mapped read-writable for the guest OS. The
* 6. All bootstrap elements are mapped read-writable for the guest OS. The
* only exception is the bootstrap page table, which is mapped read-only.
* 8. There is guaranteed to be at least 512kB padding after the final
* 7. There is guaranteed to be at least 512kB padding after the final
* bootstrap element. If necessary, the bootstrap virtual region is
* extended by an extra 4MB to ensure this.
*
* Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page
* table layout") a bug caused the pt_base (3.g above) and cr3 to not point
* to the start of the guest page tables (it was offset by two pages).
* This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU
* or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got
* allocated in the order: 'first L1','first L2', 'first L3', so the offset
* to the page table base is by two pages back. The initial domain if it is
* 32-bit and runs under a 64-bit hypervisor should _NOT_ use two of the
* pages preceding pt_base and mark them as reserved/unused.
*/
#define MAX_GUEST_CMDLINE 1024
#ifdef XEN_HAVE_PV_GUEST_ENTRY
struct start_info {
/* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
char magic[32]; /* "xen-<version>-<platform>". */
@ -705,6 +776,7 @@ struct start_info {
/* (PFN of pre-loaded module if */
/* SIF_MOD_START_PFN set in flags). */
unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
#define MAX_GUEST_CMDLINE 1024
int8_t cmd_line[MAX_GUEST_CMDLINE];
/* The pfn range here covers both page table and p->m table frames. */
unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
@ -717,12 +789,15 @@ typedef struct start_info start_info_t;
#define console_mfn console.domU.mfn
#define console_evtchn console.domU.evtchn
#endif
#endif /* XEN_HAVE_PV_GUEST_ENTRY */
/* These flags are passed in the 'flags' field of start_info_t. */
#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
/* P->M making the 3 level tree obsolete? */
#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
/*
@ -750,7 +825,14 @@ struct xen_multiboot_mod_list
/* Unused, must be zero */
uint32_t pad;
};
/*
* `incontents 200 startofday_dom0_console Dom0_console
*
* The console structure in start_info.console.dom0
*
* This structure includes a variety of information required to
* have a working VGA/VESA console.
*/
typedef struct dom0_vga_console_info {
uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
#define XEN_VGATYPE_TEXT_MODE_3 0x03
@ -815,6 +897,9 @@ __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
/* Default definitions for macros used by domctl/sysctl. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#ifndef int64_aligned_t
#define int64_aligned_t int64_t
#endif
#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif
@ -823,9 +908,9 @@ __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
#endif
#ifndef __ASSEMBLY__
struct xenctl_cpumap {
struct xenctl_bitmap {
XEN_GUEST_HANDLE_64(uint8) bitmap;
uint32_t nr_cpus;
uint32_t nr_bits;
};
#endif
@ -836,7 +921,7 @@ struct xenctl_cpumap {
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -144,7 +144,7 @@ DEFINE_XEN_GUEST_HANDLE(xenoprof_ibs_counter_t);
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil

View File

@ -25,6 +25,8 @@
#ifndef __FLASK_OP_H__
#define __FLASK_OP_H__
#include "../event_channel.h"
#define XEN_FLASK_INTERFACE_VERSION 1
struct xen_flask_load {
@ -142,6 +144,19 @@ struct xen_flask_peersid {
uint32_t sid;
};
struct xen_flask_relabel {
/* IN */
uint32_t domid;
uint32_t sid;
};
struct xen_flask_devicetree_label {
/* IN */
uint32_t sid;
uint32_t length;
XEN_GUEST_HANDLE(char) path;
};
struct xen_flask_op {
uint32_t cmd;
#define FLASK_LOAD 1
@ -167,6 +182,8 @@ struct xen_flask_op {
#define FLASK_ADD_OCONTEXT 21
#define FLASK_DEL_OCONTEXT 22
#define FLASK_GET_PEER_SID 23
#define FLASK_RELABEL_DOMAIN 24
#define FLASK_DEVICETREE_LABEL 25
uint32_t interface_version; /* XEN_FLASK_INTERFACE_VERSION */
union {
struct xen_flask_load load;
@ -185,6 +202,8 @@ struct xen_flask_op {
/* FLASK_ADD_OCONTEXT, FLASK_DEL_OCONTEXT */
struct xen_flask_ocontext ocontext;
struct xen_flask_peersid peersid;
struct xen_flask_relabel relabel;
struct xen_flask_devicetree_label devicetree_label;
} u;
};
typedef struct xen_flask_op xen_flask_op_t;

View File

@ -33,11 +33,7 @@
#ifndef _XEN_INTR_H_
#define _XEN_INTR_H_
#ifndef __XEN_EVTCHN_PORT_DEFINED__
typedef uint32_t evtchn_port_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
#define __XEN_EVTCHN_PORT_DEFINED__ 1
#endif
#include <xen/interface/event_channel.h>
/** Registered Xen interrupt callback handle. */
typedef void * xen_intr_handle_t;

View File

@ -190,230 +190,109 @@ SUBDIR= alias \
# NB: keep these sorted by MK_* knobs
.if ${MK_AT} != "no"
SUBDIR+= at
.endif
.if ${MK_ATM} != "no"
SUBDIR+= atm
.endif
.if ${MK_BLUETOOTH} != "no"
SUBDIR+= bluetooth
.endif
.if ${MK_BSD_CPIO} != "no"
SUBDIR+= cpio
.endif
.if ${MK_CALENDAR} != "no"
SUBDIR+= calendar
.endif
.if ${MK_CLANG} != "no"
SUBDIR+= clang
.endif
.if ${MK_EE} != "no"
SUBDIR+= ee
.endif
.if ${MK_FILE} != "no"
SUBDIR+= file
.endif
.if ${MK_FINGER} != "no"
SUBDIR+= finger
.endif
.if ${MK_FTP} != "no"
SUBDIR+= ftp
.endif
.if ${MK_GAMES} != "no"
SUBDIR+= caesar
SUBDIR+= factor
SUBDIR+= fortune
SUBDIR+= grdc
SUBDIR+= morse
SUBDIR+= number
SUBDIR+= pom
SUBDIR+= primes
SUBDIR+= random
.endif
.if ${MK_GPL_DTC} != "yes"
SUBDIR+= dtc
.endif
.if ${MK_GROFF} != "no"
SUBDIR+= vgrind
.endif
.if ${MK_HESIOD} != "no"
SUBDIR+= hesinfo
.endif
.if ${MK_ICONV} != "no"
SUBDIR+= iconv
SUBDIR+= mkcsmapper
SUBDIR+= mkesdb
.endif
.if ${MK_ISCSI} != "no"
SUBDIR+= iscsictl
.endif
.if ${MK_KDUMP} != "no"
SUBDIR+= kdump
SUBDIR+= truss
.endif
.if ${MK_KERBEROS_SUPPORT} != "no"
SUBDIR+= compile_et
.endif
.if ${MK_LDNS_UTILS} != "no"
SUBDIR+= drill
SUBDIR+= host
.endif
.if ${MK_LOCATE} != "no"
SUBDIR+= locate
.endif
SUBDIR.${MK_AT}+= at
SUBDIR.${MK_ATM}+= atm
SUBDIR.${MK_BLUETOOTH}+= bluetooth
SUBDIR.${MK_BSD_CPIO}+= cpio
SUBDIR.${MK_CALENDAR}+= calendar
SUBDIR.${MK_CLANG}+= clang
SUBDIR.${MK_EE}+= ee
SUBDIR.${MK_FILE}+= file
SUBDIR.${MK_FINGER}+= finger
SUBDIR.${MK_FTP}+= ftp
SUBDIR.${MK_GAMES}+= caesar
SUBDIR.${MK_GAMES}+= factor
SUBDIR.${MK_GAMES}+= fortune
SUBDIR.${MK_GAMES}+= grdc
SUBDIR.${MK_GAMES}+= morse
SUBDIR.${MK_GAMES}+= number
SUBDIR.${MK_GAMES}+= pom
SUBDIR.${MK_GAMES}+= primes
SUBDIR.${MK_GAMES}+= random
SUBDIR.${MK_GPL_DTC}+= dtc
SUBDIR.${MK_GROFF}+= vgrind
SUBDIR.${MK_HESIOD}+= hesinfo
SUBDIR.${MK_ICONV}+= iconv
SUBDIR.${MK_ICONV}+= mkcsmapper
SUBDIR.${MK_ICONV}+= mkesdb
SUBDIR.${MK_ISCSI}+= iscsictl
SUBDIR.${MK_KDUMP}+= kdump
SUBDIR.${MK_KDUMP}+= truss
SUBDIR.${MK_KERBEROS_SUPPORT}+= compile_et
SUBDIR.${MK_LDNS_UTILS}+= drill
SUBDIR.${MK_LDNS_UTILS}+= host
SUBDIR.${MK_LOCATE}+= locate
# XXX msgs?
.if ${MK_MAIL} != "no"
SUBDIR+= biff
SUBDIR+= from
SUBDIR+= mail
SUBDIR+= msgs
SUBDIR.${MK_MAIL}+= biff
SUBDIR.${MK_MAIL}+= from
SUBDIR.${MK_MAIL}+= mail
SUBDIR.${MK_MAIL}+= msgs
SUBDIR.${MK_MAKE}+= bmake
SUBDIR.${MK_MAN_UTILS}+= catman
.if ${MK_MANDOCDB} == "no" # AND
SUBDIR.${MK_MAN_UTILS}+= makewhatis
.endif
.if ${MK_MAKE} != "no"
SUBDIR+= bmake
.endif
.if ${MK_MAN_UTILS} != "no"
SUBDIR+= catman
.if ${MK_MANDOCDB} == "no"
SUBDIR+= makewhatis
.endif
SUBDIR+= man
.endif
.if ${MK_NETCAT} != "no"
SUBDIR+= nc
.endif
.if ${MK_NIS} != "no"
SUBDIR+= ypcat
SUBDIR+= ypmatch
SUBDIR+= ypwhich
.endif
.if ${MK_OPENSSH} != "no"
SUBDIR+= ssh-copy-id
.endif
.if ${MK_OPENSSL} != "no"
SUBDIR+= bc
SUBDIR+= chkey
SUBDIR+= dc
SUBDIR+= newkey
.endif
.if ${MK_QUOTAS} != "no"
SUBDIR+= quota
.endif
.if ${MK_RCMDS} != "no"
SUBDIR+= rlogin
SUBDIR+= rsh
SUBDIR+= ruptime
SUBDIR+= rwho
.endif
.if ${MK_SENDMAIL} != "no"
SUBDIR+= vacation
.endif
.if ${MK_TALK} != "no"
SUBDIR+= talk
.endif
.if ${MK_TELNET} != "no"
SUBDIR+= telnet
.endif
.if ${MK_TESTS} != "no"
SUBDIR+= tests
.endif
.if ${MK_TEXTPROC} != "no"
SUBDIR+= checknr
SUBDIR+= colcrt
SUBDIR+= ul
.endif
.if ${MK_TFTP} != "no"
SUBDIR+= tftp
.endif
.if ${MK_TOOLCHAIN} != "no"
SUBDIR+= addr2line
SUBDIR+= ar
SUBDIR+= c89
SUBDIR+= c99
SUBDIR+= ctags
SUBDIR+= cxxfilt
SUBDIR+= elfcopy
SUBDIR+= file2c
SUBDIR.${MK_MAN_UTILS}+= man
SUBDIR.${MK_NETCAT}+= nc
SUBDIR.${MK_NIS}+= ypcat
SUBDIR.${MK_NIS}+= ypmatch
SUBDIR.${MK_NIS}+= ypwhich
SUBDIR.${MK_OPENSSH}+= ssh-copy-id
SUBDIR.${MK_OPENSSL}+= bc
SUBDIR.${MK_OPENSSL}+= chkey
SUBDIR.${MK_OPENSSL}+= dc
SUBDIR.${MK_OPENSSL}+= newkey
SUBDIR.${MK_QUOTAS}+= quota
SUBDIR.${MK_RCMDS}+= rlogin
SUBDIR.${MK_RCMDS}+= rsh
SUBDIR.${MK_RCMDS}+= ruptime
SUBDIR.${MK_RCMDS}+= rwho
SUBDIR.${MK_SENDMAIL}+= vacation
SUBDIR.${MK_TALK}+= talk
SUBDIR.${MK_TELNET}+= telnet
SUBDIR.${MK_TESTS}+= tests
SUBDIR.${MK_TEXTPROC}+= checknr
SUBDIR.${MK_TEXTPROC}+= colcrt
SUBDIR.${MK_TEXTPROC}+= ul
SUBDIR.${MK_TFTP}+= tftp
SUBDIR.${MK_TOOLCHAIN}+= addr2line
SUBDIR.${MK_TOOLCHAIN}+= ar
SUBDIR.${MK_TOOLCHAIN}+= c89
SUBDIR.${MK_TOOLCHAIN}+= c99
SUBDIR.${MK_TOOLCHAIN}+= ctags
SUBDIR.${MK_TOOLCHAIN}+= cxxfilt
SUBDIR.${MK_TOOLCHAIN}+= elfcopy
SUBDIR.${MK_TOOLCHAIN}+= file2c
.if ${MACHINE_ARCH} != "aarch64" # ARM64TODO gprof does not build
SUBDIR+= gprof
SUBDIR.${MK_TOOLCHAIN}+= gprof
.endif
SUBDIR+= indent
SUBDIR+= lex
SUBDIR+= mkstr
SUBDIR+= nm
SUBDIR+= readelf
SUBDIR+= rpcgen
SUBDIR+= unifdef
SUBDIR+= size
SUBDIR+= strings
SUBDIR.${MK_TOOLCHAIN}+= indent
SUBDIR.${MK_TOOLCHAIN}+= lex
SUBDIR.${MK_TOOLCHAIN}+= mkstr
SUBDIR.${MK_TOOLCHAIN}+= nm
SUBDIR.${MK_TOOLCHAIN}+= readelf
SUBDIR.${MK_TOOLCHAIN}+= rpcgen
SUBDIR.${MK_TOOLCHAIN}+= unifdef
SUBDIR.${MK_TOOLCHAIN}+= size
SUBDIR.${MK_TOOLCHAIN}+= strings
.if ${MACHINE_ARCH} != "aarch64" # ARM64TODO xlint does not build
SUBDIR+= xlint
.endif
SUBDIR+= xstr
SUBDIR+= yacc
.endif
.if ${MK_VI} != "no"
SUBDIR+= vi
.endif
.if ${MK_VT} != "no"
SUBDIR+= vtfontcvt
.endif
.if ${MK_USB} != "no"
SUBDIR+= usbhidaction
SUBDIR+= usbhidctl
.endif
.if ${MK_UTMPX} != "no"
SUBDIR+= last
SUBDIR+= users
SUBDIR+= who
.endif
.if ${MK_SVN} == "yes" || ${MK_SVNLITE} == "yes"
SUBDIR+= svn
SUBDIR.${MK_TOOLCHAIN}+= xlint
.endif
SUBDIR.${MK_TOOLCHAIN}+= xstr
SUBDIR.${MK_TOOLCHAIN}+= yacc
SUBDIR.${MK_VI}+= vi
SUBDIR.${MK_VT}+= vtfontcvt
SUBDIR.${MK_USB}+= usbhidaction
SUBDIR.${MK_USB}+= usbhidctl
SUBDIR.${MK_UTMPX}+= last
SUBDIR.${MK_UTMPX}+= users
SUBDIR.${MK_UTMPX}+= who
SUBDIR.${MK_SVN}+= svn
SUBDIR.${MK_SVNLITE}+= svn
.include <bsd.arch.inc.mk>
SUBDIR:= ${SUBDIR:O}
SUBDIR:= ${SUBDIR:O:u}
SUBDIR_PARALLEL=

View File

@ -564,15 +564,12 @@ eventloop(struct trussinfo *info)
}
find_thread(info, si.si_pid, pl.pl_lwpid);
if (si.si_status == SIGTRAP) {
if (si.si_status == SIGTRAP &&
(pl.pl_flags & (PL_FLAG_SCE|PL_FLAG_SCX)) != 0) {
if (pl.pl_flags & PL_FLAG_SCE)
enter_syscall(info, &pl);
else if (pl.pl_flags & PL_FLAG_SCX)
exit_syscall(info, &pl);
else
errx(1,
"pl_flags %x contains neither PL_FLAG_SCE nor PL_FLAG_SCX",
pl.pl_flags);
pending_signal = 0;
} else if (pl.pl_flags & PL_FLAG_CHILD) {
if ((info->flags & COUNTONLY) == 0)

View File

@ -1,14 +1,14 @@
# $FreeBSD$
LIBXO= ${.CURDIR:H:H}/contrib/libxo
LIBXOSRC= ${SRCTOP}/contrib/libxo
.PATH: ${LIBXO}/xo
.PATH: ${LIBXOSRC}/xo
PROG= xo
MAN= xo.1
# XXX For xoversion.h
CFLAGS+=-I${LIBXO}/libxo
CFLAGS+=-I${LIBXOSRC}/libxo
LIBADD= xo util

View File

@ -1051,17 +1051,19 @@ netbufcmp(struct netbuf *n1, struct netbuf *n2)
static bool_t
netbuf_copybuf(struct netbuf *dst, const struct netbuf *src)
{
assert(src->len <= src->maxlen);
if (dst->len != src->len || dst->buf == NULL) {
if (dst->maxlen < src->len || dst->buf == NULL) {
if (dst->buf != NULL)
free(dst->buf);
if ((dst->buf = malloc(src->len)) == NULL)
if ((dst->buf = calloc(1, src->maxlen)) == NULL)
return (FALSE);
dst->maxlen = dst->len = src->len;
dst->maxlen = src->maxlen;
}
dst->len = src->len;
memcpy(dst->buf, src->buf, src->len);
return (TRUE);
}