Get rid of assumptions in the hypervisor that the host physical memory

associated with guest physical memory is contiguous.

In this case vm_malloc() was using vm_gpa2hpa() to indirectly infer whether
or not the address range had already been allocated.

Replace this instead with an explicit API 'vm_gpa_available()' that returns
TRUE if a page is available for allocation in guest physical address space.
This commit is contained in:
Neel Natu 2012-09-29 01:15:45 +00:00
parent 70593114cd
commit 341f19c949
3 changed files with 54 additions and 12 deletions

View File

@ -89,7 +89,7 @@ extern struct vmm_ops vmm_ops_amd;
struct vm *vm_create(const char *name);
void vm_destroy(struct vm *vm);
const char *vm_name(struct vm *vm);
int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa);
int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len);
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);

View File

@ -315,20 +315,63 @@ vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
VM_PROT_NONE, spok));
}
int
vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
/*
* Returns TRUE if 'gpa' is available for allocation and FALSE otherwise
*/
static boolean_t
vm_gpa_available(struct vm *vm, vm_paddr_t gpa)
{
int error;
vm_paddr_t hpa;
int i;
vm_paddr_t gpabase, gpalimit;
if (gpa & PAGE_MASK)
panic("vm_gpa_available: gpa (0x%016lx) not page aligned", gpa);
for (i = 0; i < vm->num_mem_segs; i++) {
gpabase = vm->mem_segs[i].gpa;
gpalimit = gpabase + vm->mem_segs[i].len;
if (gpa >= gpabase && gpa < gpalimit)
return (FALSE);
}
return (TRUE);
}
int
vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
{
int error, available, allocated;
vm_paddr_t g, hpa;
const boolean_t spok = TRUE; /* superpage mappings are ok */
if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
return (EINVAL);
available = allocated = 0;
g = gpa;
while (g < gpa + len) {
if (vm_gpa_available(vm, g))
available++;
else
allocated++;
g += PAGE_SIZE;
}
/*
* find the hpa if already it was already vm_malloc'd.
* If there are some allocated and some available pages in the address
* range then it is an error.
*/
hpa = vm_gpa2hpa(vm, gpa, len);
if (hpa != ((vm_paddr_t)-1))
goto out;
if (allocated && available)
return (EINVAL);
/*
* If the entire address range being requested has already been
* allocated then there isn't anything more to do.
*/
if (allocated && available == 0)
return (0);
if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
return (E2BIG);
@ -350,8 +393,7 @@ vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
vm->mem_segs[vm->num_mem_segs].hpa = hpa;
vm->mem_segs[vm->num_mem_segs].len = len;
vm->num_mem_segs++;
out:
*ret_hpa = hpa;
return (0);
}

View File

@ -295,7 +295,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break;
case VM_MAP_MEMORY:
seg = (struct vm_memory_segment *)data;
error = vm_malloc(sc->vm, seg->gpa, seg->len, &seg->hpa);
error = vm_malloc(sc->vm, seg->gpa, seg->len);
break;
case VM_GET_MEMORY_SEG:
seg = (struct vm_memory_segment *)data;