Simplify register state save and restore across a VMRUN:

- Host registers are now stored on the stack instead of a per-cpu host context.

- Host %FS and %GS selectors are not saved and restored across VMRUN.
  - Restoring the %FS/%GS selectors was futile anyways since that only updates
    the low 32 bits of base address in the hidden descriptor state.
  - GS.base is properly updated via the MSR_GSBASE on return from svm_launch().
  - FS.base is not used while inside the kernel so it can be safely ignored.

- Add function prologue/epilogue so svm_launch() can be traced with Dtrace's
  FBT entry/exit probes. They also serve to save/restore the host %rbp across
  VMRUN.

Reviewed by:	grehan
Discussed with:	Anish Gupta (akgupt3@gmail.com)
This commit is contained in:
Neel Natu 2014-09-27 02:04:58 +00:00
parent af198d882a
commit 30571674ce
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bhyve_svm/; revision=272195
4 changed files with 87 additions and 147 deletions

View File

@ -126,11 +126,6 @@ static struct asid asid[MAXCPU];
*/
static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
/*
* S/w saved host context.
*/
static struct svm_regctx host_ctx[MAXCPU];
static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
@ -679,7 +674,7 @@ svm_inout_str_index(struct svm_regctx *regs, int in)
{
uint64_t val;
val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi;
val = in ? regs->sctx_rdi : regs->sctx_rsi;
return (val);
}
@ -1156,7 +1151,7 @@ emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
state = svm_get_vmcb_state(sc, vcpu);
ctx = svm_get_guest_regctx(sc, vcpu);
state->rax = result & 0xffffffff;
ctx->e.g.sctx_rdx = result >> 32;
ctx->sctx_rdx = result >> 32;
}
return (error);
@ -1315,7 +1310,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
case VMCB_EXIT_MSR: /* MSR access. */
eax = state->rax;
ecx = ctx->sctx_rcx;
edx = ctx->e.g.sctx_rdx;
edx = ctx->sctx_rdx;
retu = false;
if (info1) {
@ -1357,7 +1352,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
(uint32_t *)&state->rax,
(uint32_t *)&ctx->sctx_rbx,
(uint32_t *)&ctx->sctx_rcx,
(uint32_t *)&ctx->e.g.sctx_rdx);
(uint32_t *)&ctx->sctx_rdx);
break;
case VMCB_EXIT_HLT:
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
@ -1775,7 +1770,7 @@ static int
svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
void *rend_cookie, void *suspended_cookie)
{
struct svm_regctx *hctx, *gctx;
struct svm_regctx *gctx;
struct svm_softc *svm_sc;
struct svm_vcpu *vcpustate;
struct vmcb_state *state;
@ -1806,7 +1801,6 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
thiscpu = curcpu;
gctx = svm_get_guest_regctx(svm_sc, vcpu);
hctx = &host_ctx[thiscpu];
vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
if (vcpustate->lastcpu != thiscpu) {
@ -1885,7 +1879,7 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
/* Launch Virtual Machine. */
VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
svm_launch(vmcb_pa, gctx, hctx);
svm_launch(vmcb_pa, gctx);
CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
@ -1950,11 +1944,11 @@ swctx_regptr(struct svm_regctx *regctx, int reg)
case VM_REG_GUEST_RCX:
return (&regctx->sctx_rcx);
case VM_REG_GUEST_RDX:
return (&regctx->e.g.sctx_rdx);
return (&regctx->sctx_rdx);
case VM_REG_GUEST_RDI:
return (&regctx->e.g.sctx_rdi);
return (&regctx->sctx_rdi);
case VM_REG_GUEST_RSI:
return (&regctx->e.g.sctx_rsi);
return (&regctx->sctx_rsi);
case VM_REG_GUEST_RBP:
return (&regctx->sctx_rbp);
case VM_REG_GUEST_R8:

View File

@ -34,33 +34,15 @@
printf("SVM ERROR:%s " fmt "\n", __func__, ##args);
/*
* Software saved machine state for guest and host.
* Guest register state that is saved outside the VMCB.
*/
/* Additional guest register state */
struct svm_gctx {
struct svm_regctx {
register_t sctx_rbp;
register_t sctx_rbx;
register_t sctx_rcx;
register_t sctx_rdx;
register_t sctx_rdi;
register_t sctx_rsi;
/* Points to host context area. */
register_t sctx_hostctx_base;
};
/* Additional host register state */
struct svm_hctx {
uint16_t sctx_fs;
uint16_t sctx_gs;
register_t sctx_rsp;
};
/* Common register context area for guest and host. */
struct svm_regctx {
register_t sctx_rbp;
register_t sctx_rbx;
register_t sctx_rcx;
register_t sctx_r8;
register_t sctx_r9;
register_t sctx_r10;
@ -69,14 +51,9 @@ struct svm_regctx {
register_t sctx_r13;
register_t sctx_r14;
register_t sctx_r15;
union {
struct svm_hctx h; /* host-specific register state */
struct svm_gctx g; /* guest-specific register state */
} e;
};
void svm_launch(uint64_t pa, struct svm_regctx *, struct svm_regctx *);
void svm_launch(uint64_t pa, struct svm_regctx *);
static __inline void
disable_gintr(void)

View File

@ -35,7 +35,9 @@ __FBSDID("$FreeBSD$");
ASSYM(SCTX_RBX, offsetof(struct svm_regctx, sctx_rbx));
ASSYM(SCTX_RCX, offsetof(struct svm_regctx, sctx_rcx));
ASSYM(SCTX_RBP, offsetof(struct svm_regctx, sctx_rbp));
ASSYM(SCTX_RDX, offsetof(struct svm_regctx, sctx_rdx));
ASSYM(SCTX_RDI, offsetof(struct svm_regctx, sctx_rdi));
ASSYM(SCTX_RSI, offsetof(struct svm_regctx, sctx_rsi));
ASSYM(SCTX_R8, offsetof(struct svm_regctx, sctx_r8));
ASSYM(SCTX_R9, offsetof(struct svm_regctx, sctx_r9));
ASSYM(SCTX_R10, offsetof(struct svm_regctx, sctx_r10));
@ -44,14 +46,3 @@ ASSYM(SCTX_R12, offsetof(struct svm_regctx, sctx_r12));
ASSYM(SCTX_R13, offsetof(struct svm_regctx, sctx_r13));
ASSYM(SCTX_R14, offsetof(struct svm_regctx, sctx_r14));
ASSYM(SCTX_R15, offsetof(struct svm_regctx, sctx_r15));
/* Guest only registers. */
ASSYM(SCTX_GUEST_RDX, offsetof(struct svm_regctx, e.g.sctx_rdx));
ASSYM(SCTX_GUEST_RDI, offsetof(struct svm_regctx, e.g.sctx_rdi));
ASSYM(SCTX_GUEST_RSI, offsetof(struct svm_regctx, e.g.sctx_rsi));
ASSYM(SCTX_GUEST_HCTX_BASE, offsetof(struct svm_regctx, e.g.sctx_hostctx_base));
/* Host only registers. */
ASSYM(SCTX_HOST_GS, offsetof(struct svm_regctx, e.h.sctx_gs));
ASSYM(SCTX_HOST_FS, offsetof(struct svm_regctx, e.h.sctx_fs));
ASSYM(SCTX_HOST_RSP, offsetof(struct svm_regctx, e.h.sctx_rsp));

View File

@ -28,110 +28,88 @@
#include "svm_assym.s"
/*
* Macros to save and restore GPRs.
*/
#define SAVE_GPR_STATE(reg); \
movq %rbp, SCTX_RBP(reg); \
movq %rbx, SCTX_RBX(reg); \
movq %rcx, SCTX_RCX(reg); \
movq %r8, SCTX_R8(reg); \
movq %r9, SCTX_R9(reg); \
movq %r10, SCTX_R10(reg); \
movq %r11, SCTX_R11(reg); \
movq %r12, SCTX_R12(reg); \
movq %r13, SCTX_R13(reg); \
movq %r14, SCTX_R14(reg); \
movq %r15, SCTX_R15(reg); \
#define LOAD_GPR_STATE(reg) \
movq SCTX_RBP(reg), %rbp; \
movq SCTX_RBX(reg), %rbx; \
movq SCTX_RCX(reg), %rcx; \
movq SCTX_R8(reg), %r8; \
movq SCTX_R9(reg), %r9; \
movq SCTX_R10(reg), %r10; \
movq SCTX_R11(reg), %r11; \
movq SCTX_R12(reg), %r12; \
movq SCTX_R13(reg), %r13; \
movq SCTX_R14(reg), %r14; \
movq SCTX_R15(reg), %r15; \
/*
* Macros to save and restore vcpu registers which are not
* done by SVM.
*/
#define SAVE_GUEST_STATE(reg) \
movq %rdi, SCTX_GUEST_RDI(reg); \
movq %rsi, SCTX_GUEST_RSI(reg); \
movq %rdx, SCTX_GUEST_RDX(reg); \
SAVE_GPR_STATE(reg)
#define LOAD_GUEST_STATE(reg) \
movq SCTX_GUEST_RDI(reg), %rdi; \
movq SCTX_GUEST_RSI(reg), %rsi; \
movq SCTX_GUEST_RDX(reg), %rdx; \
LOAD_GPR_STATE(reg)
/*
* Macros to save and restore host registers which are not
* saved by SVM.
*/
#define SAVE_HOST_STATE(reg) \
mov %fs, SCTX_HOST_FS(reg); \
mov %gs, SCTX_HOST_GS(reg); \
movq %rsp, SCTX_HOST_RSP(reg); \
SAVE_GPR_STATE(reg)
#define LOAD_HOST_STATE(reg) \
mov SCTX_HOST_FS(reg), %fs; \
mov SCTX_HOST_GS(reg), %gs; \
movq SCTX_HOST_RSP(reg), %rsp; \
LOAD_GPR_STATE(reg)
/*
* This is where virtual machine vcpu start execution.
* int svm_launch(vmcb_pa, gswctx, hswctx)
* vmcb_pa - VMCB physical address is in %rdi.
* gswctx - Guest context is in %rsi.
* hswctx - Host context is in %rdx.
* Be friendly to DTrace FBT's prologue/epilogue pattern matching.
*
* Note: SVM guarantees host RSP and RAX will be restored
* back after guest exit. RAX is where VMCB Phy addr is so
* we are left with only RSP. RSP will hold base for guest
* software context which will have base for host software
* context.
* They are also responsible for saving/restoring the host %rbp across VMRUN.
*/
#define VENTER push %rbp ; mov %rsp,%rbp
#define VLEAVE pop %rbp
/*
* svm_launch(uint64_t vmcb, struct svm_regctx *gctx)
* %rdi: physical address of VMCB
* %rsi: pointer to guest context
*/
ENTRY(svm_launch)
/* Save host GPRs. */
SAVE_HOST_STATE(%rdx)
VENTER
/*
* Move the parameters to final destinations.
* RAX - VMCB phy addr.
* RSP - Guest software context.
* SCTX_GUEST_HOST(guest) - Host software context.
* Host register state saved across a VMRUN.
*
* All "callee saved registers" except:
* %rsp: because it is preserved by the processor across VMRUN.
* %rbp: because it is saved/restored by the function prologue/epilogue.
*/
movq %rdi, %rax
movq %rsi, %rsp
movq %rdx, SCTX_GUEST_HCTX_BASE(%rsp)
push %rbx
push %r12
push %r13
push %r14
push %r15
/* Load guest context. */
LOAD_GUEST_STATE(%rsp)
/* Save the physical address of the VMCB in %rax */
movq %rdi, %rax
push %rsi /* push guest context pointer on the stack */
/*
* Restore guest state.
*/
movq SCTX_R8(%rsi), %r8
movq SCTX_R9(%rsi), %r9
movq SCTX_R10(%rsi), %r10
movq SCTX_R11(%rsi), %r11
movq SCTX_R12(%rsi), %r12
movq SCTX_R13(%rsi), %r13
movq SCTX_R14(%rsi), %r14
movq SCTX_R15(%rsi), %r15
movq SCTX_RBP(%rsi), %rbp
movq SCTX_RBX(%rsi), %rbx
movq SCTX_RCX(%rsi), %rcx
movq SCTX_RDX(%rsi), %rdx
movq SCTX_RDI(%rsi), %rdi
movq SCTX_RSI(%rsi), %rsi /* %rsi must be restored last */
vmload %rax
vmrun %rax
vmsave %rax
/* Save guest state. */
SAVE_GUEST_STATE(%rsp)
pop %rax /* pop guest context pointer from the stack */
/* Restore host context base in RDX. */
movq SCTX_GUEST_HCTX_BASE(%rsp), %rdx
/* Restore host GPRs. */
LOAD_HOST_STATE(%rdx)
/*
* Save guest state.
*/
movq %r8, SCTX_R8(%rax)
movq %r9, SCTX_R9(%rax)
movq %r10, SCTX_R10(%rax)
movq %r11, SCTX_R11(%rax)
movq %r12, SCTX_R12(%rax)
movq %r13, SCTX_R13(%rax)
movq %r14, SCTX_R14(%rax)
movq %r15, SCTX_R15(%rax)
movq %rbp, SCTX_RBP(%rax)
movq %rbx, SCTX_RBX(%rax)
movq %rcx, SCTX_RCX(%rax)
movq %rdx, SCTX_RDX(%rax)
movq %rdi, SCTX_RDI(%rax)
movq %rsi, SCTX_RSI(%rax)
/* Restore host state */
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbx
VLEAVE
ret
END(svm_launch)