vhost: export guest memory regions

Some vhost-user driver may need this info to setup its own page tables
for GPA (guest physical addr) to HPA (host physical addr) translation.
SPDK (Storage Performance Development Kit) is one example.

Besides, by exporting this memory info, we could also export the
gpa_to_vva() as an inline function, which helps for performance.
Otherwise, it has to be referenced indirectly by a "vid".

Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Yuanhan Liu 2017-04-01 15:22:43 +08:00
parent 93433b639d
commit eb32247457
5 changed files with 70 additions and 32 deletions

View File

@ -35,6 +35,7 @@ DPDK_17.05 {
rte_vhost_driver_enable_features;
rte_vhost_driver_get_features;
rte_vhost_driver_set_features;
rte_vhost_get_mem_table;
rte_vhost_get_mtu;
} DPDK_16.07;

View File

@ -58,6 +58,28 @@
/* Enum for virtqueue management. */
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
/**
* Information relating to memory regions including offsets to
* addresses in QEMUs memory file.
*/
struct rte_vhost_mem_region {
uint64_t guest_phys_addr;
uint64_t guest_user_addr;
uint64_t host_user_addr;
uint64_t size;
void *mmap_addr;
uint64_t mmap_size;
int fd;
};
/**
* Memory structure includes region and mapping information.
*/
struct rte_vhost_memory {
uint32_t nregions;
struct rte_vhost_mem_region regions[0];
};
/**
* Device and vring operations.
*/
@ -248,4 +270,20 @@ uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
/**
* Get guest mem table: a list of memory regions.
*
* An rte_vhost_vhost_memory object will be allocated internaly, to hold the
* guest memory regions. Application should free it at destroy_device()
* callback.
*
* @param vid
* vhost device ID
* @param mem
* To store the returned mem regions
* @return
* 0 on success, -1 on failure
*/
int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
#endif /* _VIRTIO_NET_H_ */

View File

@ -359,6 +359,29 @@ rte_vhost_get_ifname(int vid, char *buf, size_t len)
return 0;
}
int
rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
{
struct virtio_net *dev;
struct rte_vhost_memory *m;
size_t size;
dev = get_device(vid);
if (!dev)
return -1;
size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
m = malloc(size);
if (!m)
return -1;
m->nregions = dev->mem->nregions;
memcpy(m->regions, dev->mem->regions, size);
*mem = m;
return 0;
}
uint16_t
rte_vhost_avail_entries(int vid, uint16_t queue_id)
{

View File

@ -166,7 +166,7 @@ struct guest_page {
*/
struct virtio_net {
/* Frontend (QEMU) memory and memory region information */
struct virtio_memory *mem;
struct rte_vhost_memory *mem;
uint64_t features;
uint64_t protocol_features;
int vid;
@ -192,30 +192,6 @@ struct virtio_net {
struct guest_page *guest_pages;
} __rte_cache_aligned;
/**
* Information relating to memory regions including offsets to
* addresses in QEMUs memory file.
*/
struct virtio_memory_region {
uint64_t guest_phys_addr;
uint64_t guest_user_addr;
uint64_t host_user_addr;
uint64_t size;
void *mmap_addr;
uint64_t mmap_size;
int fd;
};
/**
* Memory structure includes region and mapping information.
*/
struct virtio_memory {
uint32_t nregions;
struct virtio_memory_region regions[0];
};
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
@ -255,7 +231,7 @@ extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
static inline uint64_t __attribute__((always_inline))
gpa_to_vva(struct virtio_net *dev, uint64_t gpa)
{
struct virtio_memory_region *reg;
struct rte_vhost_mem_region *reg;
uint32_t i;
for (i = 0; i < dev->mem->nregions; i++) {

View File

@ -92,7 +92,7 @@ static void
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
struct virtio_memory_region *reg;
struct rte_vhost_mem_region *reg;
if (!dev || !dev->mem)
return;
@ -310,7 +310,7 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused)
static uint64_t
qva_to_vva(struct virtio_net *dev, uint64_t qva)
{
struct virtio_memory_region *reg;
struct rte_vhost_mem_region *reg;
uint32_t i;
/* Find the region where the address lives. */
@ -438,7 +438,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
}
static void
add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg,
add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t page_size)
{
uint64_t reg_size = reg->size;
@ -498,7 +498,7 @@ static int
vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
{
struct VhostUserMemory memory = pmsg->payload.memory;
struct virtio_memory_region *reg;
struct rte_vhost_mem_region *reg;
void *mmap_addr;
uint64_t mmap_size;
uint64_t mmap_offset;
@ -525,8 +525,8 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
sizeof(struct guest_page));
}
dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct virtio_memory) +
sizeof(struct virtio_memory_region) * memory.nregions, 0);
dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",