vhost: remove zero copy memory region generation logic

Currently zero copy feature isn't generic as it couples closely with nic.
It isn't put in the vhost lib in this version.
gpa(guest physical address) to hpa(host physical address) mapping region
logic is removed.

Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Acked-by: Changchun Ouyang <changchun.ouyang@intel.com>
This commit is contained in:
Huawei Xie 2014-10-09 02:54:39 +08:00 committed by Thomas Monjalon
parent 68e6490476
commit 0a739691fc
2 changed files with 0 additions and 196 deletions

View File

@ -40,7 +40,6 @@
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
#define PAGE_SIZE 4096
/* Enum for virtqueue management. */
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
@ -108,20 +107,6 @@ struct virtio_memory_regions {
uint64_t address_offset; /* Offset of region for address translation. */
};
/*
* Information relating to memory regions including offsets to
* addresses in host physical space.
*/
struct virtio_memory_regions_hpa {
/* Base guest physical address of region. */
uint64_t guest_phys_address;
/* End guest physical address of region. */
uint64_t guest_phys_address_end;
/* Size of region. */
uint64_t memory_size;
/* Offset of region for gpa to hpa translation. */
uint64_t host_phys_addr_offset;
};
/*
* Memory structure includes region and mapping information.
@ -131,10 +116,6 @@ struct virtio_memory {
uint64_t mapped_address; /* Mapped address of memory file base in our applications memory space. */
uint64_t mapped_size; /* Total size of memory file. */
uint32_t nregions; /* Number of memory regions. */
/* Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
/* Memory region information for gpa to hpa translation. */
struct virtio_memory_regions_hpa *regions_hpa;
/* Memory region information. */
struct virtio_memory_regions regions[0];
};

View File

@ -344,8 +344,6 @@ cleanup_device(struct virtio_net *dev)
/* Unmap QEMU memory file if mapped. */
if (dev->mem) {
munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
if (dev->mem->regions_hpa)
free(dev->mem->regions_hpa);
free(dev->mem);
}
@ -577,153 +575,6 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
return 0;
}
/*
* Calculate the region count of physical continous regions for one particular
* region of whose vhost virtual address is continous. The particular region
* start from vva_start, with size of 'size' in argument.
*/
static uint32_t check_hpa_regions(uint64_t vva_start, uint64_t size)
{
uint32_t i, nregions = 0, page_size = PAGE_SIZE;
uint64_t cur_phys_addr = 0, next_phys_addr = 0;
if (vva_start % page_size) {
LOG_DEBUG(VHOST_CONFIG,
"in check_countinous: vva start(%p) mod page_size(%d) "
"has remainder\n",
(void *)(uintptr_t)vva_start, page_size);
return 0;
}
if (size % page_size) {
LOG_DEBUG(VHOST_CONFIG,
"in check_countinous: "
"size((%"PRIu64")) mod page_size(%d) has remainder\n",
size, page_size);
return 0;
}
for (i = 0; i < size - page_size; i = i + page_size) {
cur_phys_addr
= rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));
next_phys_addr = rte_mem_virt2phy(
(void *)(uintptr_t)(vva_start + i + page_size));
if ((cur_phys_addr + page_size) != next_phys_addr) {
++nregions;
LOG_DEBUG(VHOST_CONFIG,
"in check_continuous: hva addr:(%p) is not "
"continuous with hva addr:(%p), diff:%d\n",
(void *)(uintptr_t)(vva_start + (uint64_t)i),
(void *)(uintptr_t)(vva_start + (uint64_t)i
+ page_size), page_size);
LOG_DEBUG(VHOST_CONFIG,
"in check_continuous: hpa addr:(%p) is not "
"continuous with hpa addr:(%p), "
"diff:(%"PRIu64")\n",
(void *)(uintptr_t)cur_phys_addr,
(void *)(uintptr_t)next_phys_addr,
(next_phys_addr-cur_phys_addr));
}
}
return nregions;
}
/*
* Divide each region whose vhost virtual address is continous into a few
* sub-regions, make sure the physical address within each sub-region are
* continous. And fill offset(to GPA) and size etc. information of each
* sub-region into regions_hpa.
*/
static uint32_t fill_hpa_memory_regions(void *memory)
{
uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = PAGE_SIZE;
uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;
struct virtio_memory *virtio_memory = (struct virtio_memory *)memory;
struct virtio_memory_regions_hpa *mem_region_hpa
= virtio_memory->regions_hpa;
if (mem_region_hpa == NULL)
return 0;
for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {
vva_start = virtio_memory->regions[regionidx].guest_phys_address
+ virtio_memory->regions[regionidx].address_offset;
mem_region_hpa[regionidx_hpa].guest_phys_address
= virtio_memory->regions[regionidx].guest_phys_address;
mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
rte_mem_virt2phy((void *)(uintptr_t)(vva_start))
- mem_region_hpa[regionidx_hpa].guest_phys_address;
LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address));
LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: host phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
for (i = 0, k = 0;
i < virtio_memory->regions[regionidx].memory_size
- page_size;
i += page_size) {
cur_phys_addr = rte_mem_virt2phy(
(void *)(uintptr_t)(vva_start + i));
next_phys_addr = rte_mem_virt2phy(
(void *)(uintptr_t)(vva_start
+ i + page_size));
if ((cur_phys_addr + page_size) != next_phys_addr) {
mem_region_hpa[regionidx_hpa].guest_phys_address_end =
mem_region_hpa[regionidx_hpa].guest_phys_address
+ k + page_size;
mem_region_hpa[regionidx_hpa].memory_size
= k + page_size;
LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
"phys addr end [%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: guest phys addr "
"size [%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].memory_size));
mem_region_hpa[regionidx_hpa + 1].guest_phys_address
= mem_region_hpa[regionidx_hpa].guest_phys_address_end;
++regionidx_hpa;
mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
next_phys_addr
- mem_region_hpa[regionidx_hpa].guest_phys_address;
LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
" phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address));
LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: host phys addr "
"start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
k = 0;
} else {
k += page_size;
}
}
mem_region_hpa[regionidx_hpa].guest_phys_address_end
= mem_region_hpa[regionidx_hpa].guest_phys_address
+ k + page_size;
mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
"[%d]:(%p)\n", regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
"[%d]:(%p)\n", regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].memory_size));
++regionidx_hpa;
}
return regionidx_hpa;
}
/*
* Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
@ -817,7 +668,6 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
}
}
mem->nregions = valid_regions;
mem->nregions_hpa = mem->nregions;
dev->mem = mem;
/*
@ -828,34 +678,7 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address
+ dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;
dev->mem->nregions_hpa
+= check_hpa_regions(
dev->mem->regions[regionidx].guest_phys_address
+ dev->mem->regions[regionidx].address_offset,
dev->mem->regions[regionidx].memory_size);
}
if (dev->mem->regions_hpa != NULL) {
free(dev->mem->regions_hpa);
dev->mem->regions_hpa = NULL;
}
dev->mem->regions_hpa = (struct virtio_memory_regions_hpa *) calloc(1,
(sizeof(struct virtio_memory_regions_hpa)
* dev->mem->nregions_hpa));
if (dev->mem->regions_hpa == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to allocate memory for "
"dev->mem->regions_hpa.\n", dev->device_fh);
return -1;
}
if (fill_hpa_memory_regions(
(void *)dev->mem) != dev->mem->nregions_hpa) {
RTE_LOG(ERR, VHOST_CONFIG,
"in set_mem_table: hpa memory regions number mismatch: "
"[%d]\n", dev->mem->nregions_hpa);
return -1;
}
return 0;
}