Add in last remaining files to get AMD-SVM operational.

Submitted by:	Anish Gupta (akgupt3@gmail.com)
This commit is contained in:
Peter Grehan 2013-08-23 00:37:26 +00:00
parent 0bddaa8d25
commit df5e6de3e3
6 changed files with 1570 additions and 136 deletions

View File

@ -37,137 +37,6 @@ __FBSDID("$FreeBSD$");
#include <machine/vmm.h>
#include "io/iommu.h"
static int
amdv_init(void)
{
printf("amdv_init: not implemented\n");
return (ENXIO);
}
static int
amdv_cleanup(void)
{
printf("amdv_cleanup: not implemented\n");
return (ENXIO);
}
static void *
amdv_vminit(struct vm *vm)
{
printf("amdv_vminit: not implemented\n");
return (NULL);
}
static int
amdv_vmrun(void *arg, int vcpu, register_t rip)
{
printf("amdv_vmrun: not implemented\n");
return (ENXIO);
}
static void
amdv_vmcleanup(void *arg)
{
printf("amdv_vmcleanup: not implemented\n");
return;
}
static int
amdv_vmmmap_set(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, size_t length,
vm_memattr_t attr, int prot, boolean_t spok)
{
printf("amdv_vmmmap_set: not implemented\n");
return (EINVAL);
}
static vm_paddr_t
amdv_vmmmap_get(void *arg, vm_paddr_t gpa)
{
printf("amdv_vmmmap_get: not implemented\n");
return (EINVAL);
}
static int
amdv_getreg(void *arg, int vcpu, int regnum, uint64_t *retval)
{
printf("amdv_getreg: not implemented\n");
return (EINVAL);
}
static int
amdv_setreg(void *arg, int vcpu, int regnum, uint64_t val)
{
printf("amdv_setreg: not implemented\n");
return (EINVAL);
}
static int
amdv_getdesc(void *vmi, int vcpu, int num, struct seg_desc *desc)
{
printf("amdv_get_desc: not implemented\n");
return (EINVAL);
}
static int
amdv_setdesc(void *vmi, int vcpu, int num, struct seg_desc *desc)
{
printf("amdv_get_desc: not implemented\n");
return (EINVAL);
}
static int
amdv_inject_event(void *vmi, int vcpu, int type, int vector,
uint32_t error_code, int error_code_valid)
{
printf("amdv_inject_event: not implemented\n");
return (EINVAL);
}
static int
amdv_getcap(void *arg, int vcpu, int type, int *retval)
{
printf("amdv_getcap: not implemented\n");
return (EINVAL);
}
static int
amdv_setcap(void *arg, int vcpu, int type, int val)
{
printf("amdv_setcap: not implemented\n");
return (EINVAL);
}
struct vmm_ops vmm_ops_amd = {
amdv_init,
amdv_cleanup,
amdv_vminit,
amdv_vmrun,
amdv_vmcleanup,
amdv_vmmmap_set,
amdv_vmmmap_get,
amdv_getreg,
amdv_setreg,
amdv_getdesc,
amdv_setdesc,
amdv_inject_event,
amdv_getcap,
amdv_setcap
};
static int
amd_iommu_init(void)
{

1363
sys/amd64/vmm/amd/svm.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
/*-
* Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _SVM_MSR_H_
#define _SVM_MSR_H_
/*
* SVM CPUID function, address 0x8000_000A edx bit decoding.
*/
#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
#define AMD_CPUID_SVM_ASID_FLUSH BIT(6) /* Flush by ASID */
#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
#endif /* _SVM_MSR_H_ */

View File

@ -0,0 +1,137 @@
/*-
* Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asmacros.h>
#include "svm_assym.s"
/*
* Macros to save and restore GPRs.
*/
#define SAVE_GPR_STATE(reg); \
movq %rbp, SCTX_RBP(reg); \
movq %rbx, SCTX_RBX(reg); \
movq %rcx, SCTX_RCX(reg); \
movq %r8, SCTX_R8(reg); \
movq %r9, SCTX_R9(reg); \
movq %r10, SCTX_R10(reg); \
movq %r11, SCTX_R11(reg); \
movq %r12, SCTX_R12(reg); \
movq %r13, SCTX_R13(reg); \
movq %r14, SCTX_R14(reg); \
movq %r15, SCTX_R15(reg); \
#define LOAD_GPR_STATE(reg) \
movq SCTX_RBP(reg), %rbp; \
movq SCTX_RBX(reg), %rbx; \
movq SCTX_RCX(reg), %rcx; \
movq SCTX_R8(reg), %r8; \
movq SCTX_R9(reg), %r9; \
movq SCTX_R10(reg), %r10; \
movq SCTX_R11(reg), %r11; \
movq SCTX_R12(reg), %r12; \
movq SCTX_R13(reg), %r13; \
movq SCTX_R14(reg), %r14; \
movq SCTX_R15(reg), %r15; \
/*
* Macros to save and restore vcpu registers which are not
* done by SVM.
*/
#define SAVE_GUEST_STATE(reg) \
movq %rdi, SCTX_GUEST_RDI(reg); \
movq %rsi, SCTX_GUEST_RSI(reg); \
movq %rdx, SCTX_GUEST_RDX(reg); \
SAVE_GPR_STATE(reg)
#define LOAD_GUEST_STATE(reg) \
movq SCTX_GUEST_RDI(reg), %rdi; \
movq SCTX_GUEST_RSI(reg), %rsi; \
movq SCTX_GUEST_RDX(reg), %rdx; \
LOAD_GPR_STATE(reg)
/*
* Macros to save and restore host registers which are not
* saved by SVM.
*/
#define SAVE_HOST_STATE(reg) \
mov %fs, SCTX_HOST_FS(reg); \
mov %gs, SCTX_HOST_GS(reg); \
movq %rsp, SCTX_HOST_RSP(reg); \
SAVE_GPR_STATE(reg)
#define LOAD_HOST_STATE(reg) \
mov SCTX_HOST_FS(reg), %fs; \
mov SCTX_HOST_GS(reg), %gs; \
movq SCTX_HOST_RSP(reg), %rsp; \
LOAD_GPR_STATE(reg)
/*
* This is where virtual machine vcpu start execution.
* int svm_launch(vmcb_pa, gswctx, hswctx)
* vmcb_pa - VMCB physical address is in %rdi.
* gswctx - Guest context is in %rsi.
* hswctx - Host context is in %rdx.
*
* Note: SVM guarantees host RSP and RAX will be restored
* back after guest exit. RAX is where VMCB Phy addr is so
* we are left with only RSP. RSP will hold base for guest
* software context which will have base for host software
* context.
*/
ENTRY(svm_launch)
/* Save host GPRs. */
SAVE_HOST_STATE(%rdx)
/*
* Move the parameters to final destinations.
* RAX - VMCB phy addr.
* RSP - Guest software context.
* SCTX_GUEST_HOST(guest) - Host software context.
*/
movq %rdi, %rax
movq %rsi, %rsp
movq %rdx, SCTX_GUEST_HCTX_BASE(%rsp)
/* Load guest context. */
LOAD_GUEST_STATE(%rsp)
vmload %rax
vmrun %rax
vmsave %rax
/* Save guest state. */
SAVE_GUEST_STATE(%rsp)
/* Restore host context base in RDX. */
movq SCTX_GUEST_HCTX_BASE(%rsp), %rdx
/* Restore host GPRs. */
LOAD_HOST_STATE(%rdx)
ret
END(svm_launch)

View File

@ -843,7 +843,7 @@ static int
verify_inst_length(struct vie *vie)
{
if (vie->num_processed == vie->num_valid)
if (vie->num_processed)
return (0);
else
return (-1);

View File

@ -8,6 +8,7 @@ CFLAGS+= -DVMM_KEEP_STATS -DSMP
CFLAGS+= -I${.CURDIR}/../../amd64/vmm
CFLAGS+= -I${.CURDIR}/../../amd64/vmm/io
CFLAGS+= -I${.CURDIR}/../../amd64/vmm/intel
CFLAGS+= -I${.CURDIR}/../../amd64/vmm/amd
# generic vmm support
.PATH: ${.CURDIR}/../../amd64/vmm
@ -40,11 +41,14 @@ SRCS+= ept.c \
# amd-specific files
.PATH: ${.CURDIR}/../../amd64/vmm/amd
SRCS+= amdv.c
SRCS+= vmcb.c \
svm.c \
npt.c \
amdv.c
OBJS= vmx_support.o svm_support.o
OBJS= vmx_support.o
CLEANFILES= vmx_assym.s vmx_genassym.o
CLEANFILES= vmx_assym.s vmx_genassym.o svm_assym.s svm_genassym.o
vmx_assym.s: vmx_genassym.o
.if exists(@)
@ -52,11 +56,23 @@ vmx_assym.s: @/kern/genassym.sh
.endif
sh @/kern/genassym.sh vmx_genassym.o > ${.TARGET}
svm_assym.s: svm_genassym.o
.if exists(@)
svm_assym.s: @/kern/genassym.sh
.endif
sh @/kern/genassym.sh svm_genassym.o > ${.TARGET}
vmx_support.o: vmx_support.S vmx_assym.s
${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \
${.IMPSRC} -o ${.TARGET}
svm_support.o: svm_support.S svm_assym.s
${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \
${.IMPSRC} -o ${.TARGET}
vmx_genassym.o: vmx_genassym.c @ machine x86
${CC} -c ${CFLAGS:N-fno-common} ${.IMPSRC}
svm_genassym.o: svm_genassym.c @ machine x86
${CC} -c ${CFLAGS:N-fno-common} ${.IMPSRC}
.include <bsd.kmod.mk>