vmm: Add vm_gpa_hold_global wrapper function.
This handles the case that guest pages are being held not on behalf of a virtual CPU but globally. Previously this was handled by passing a vcpuid of -1 to vm_gpa_hold, but that will not work in the future when vm_gpa_hold is changed to accept a struct vcpu pointer. Reviewed by: corvink, markj Differential Revision: https://reviews.freebsd.org/D37160
This commit is contained in:
parent
0f435e6476
commit
28b561ad9d
@ -253,6 +253,8 @@ int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
|
||||
vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
|
||||
void *vm_gpa_hold(struct vm *, int vcpuid, vm_paddr_t gpa, size_t len,
|
||||
int prot, void **cookie);
|
||||
void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
|
||||
int prot, void **cookie);
|
||||
void vm_gpa_release(void *cookie);
|
||||
bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
|
||||
|
||||
|
@ -965,8 +965,8 @@ vm_iommu_modify(struct vm *vm, bool map)
|
||||
|
||||
gpa = mm->gpa;
|
||||
while (gpa < mm->gpa + mm->len) {
|
||||
vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
|
||||
&cookie);
|
||||
vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
|
||||
VM_PROT_WRITE, &cookie);
|
||||
KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
|
||||
vm_name(vm), gpa));
|
||||
|
||||
@ -1032,30 +1032,14 @@ vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
|
||||
return (error);
|
||||
}
|
||||
|
||||
void *
|
||||
vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
|
||||
void **cookie)
|
||||
static void *
|
||||
_vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
|
||||
void **cookie)
|
||||
{
|
||||
int i, count, pageoff;
|
||||
struct mem_map *mm;
|
||||
vm_page_t m;
|
||||
#ifdef INVARIANTS
|
||||
/*
|
||||
* All vcpus are frozen by ioctls that modify the memory map
|
||||
* (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
|
||||
* guaranteed if at least one vcpu is in the VCPU_FROZEN state.
|
||||
*/
|
||||
int state;
|
||||
KASSERT(vcpuid >= -1 && vcpuid < vm->maxcpus, ("%s: invalid vcpuid %d",
|
||||
__func__, vcpuid));
|
||||
for (i = 0; i < vm->maxcpus; i++) {
|
||||
if (vcpuid != -1 && vcpuid != i)
|
||||
continue;
|
||||
state = vcpu_get_state(vm, i, NULL);
|
||||
KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
|
||||
__func__, state));
|
||||
}
|
||||
#endif
|
||||
|
||||
pageoff = gpa & PAGE_MASK;
|
||||
if (len > PAGE_SIZE - pageoff)
|
||||
panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
|
||||
@ -1079,6 +1063,42 @@ vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
|
||||
void **cookie)
|
||||
{
|
||||
#ifdef INVARIANTS
|
||||
/*
|
||||
* The current vcpu should be frozen to ensure 'vm_memmap[]'
|
||||
* stability.
|
||||
*/
|
||||
int state = vcpu_get_state(vm, vcpuid, NULL);
|
||||
KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
|
||||
__func__, state));
|
||||
#endif
|
||||
return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
|
||||
}
|
||||
|
||||
void *
|
||||
vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
|
||||
void **cookie)
|
||||
{
|
||||
#ifdef INVARIANTS
|
||||
/*
|
||||
* All vcpus are frozen by ioctls that modify the memory map
|
||||
* (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
|
||||
* guaranteed if at least one vcpu is in the VCPU_FROZEN state.
|
||||
*/
|
||||
int state;
|
||||
for (int i = 0; i < vm->maxcpus; i++) {
|
||||
state = vcpu_get_state(vm, i, NULL);
|
||||
KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
|
||||
__func__, state));
|
||||
}
|
||||
#endif
|
||||
return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
|
||||
}
|
||||
|
||||
void
|
||||
vm_gpa_release(void *cookie)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user