vhost: allocate async memory dynamically

Allocate async internal memory buffer by rte_malloc(), replacing array
declaration inside vq structure. Dynamic allocation can help to save
memory footprint when async path is not registered.

Signed-off-by: Patrick Fu <patrick.fu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Patrick Fu 2020-10-13 09:45:44 +08:00 committed by Ferruh Yigit
parent 6b3c81db8b
commit 9287d3a5a7
2 changed files with 47 additions and 26 deletions

View File

@ -324,6 +324,24 @@ cleanup_device(struct virtio_net *dev, int destroy)
}
}
static void
vhost_free_async_mem(struct vhost_virtqueue *vq)
{
if (vq->async_pkts_pending)
rte_free(vq->async_pkts_pending);
if (vq->async_pkts_info)
rte_free(vq->async_pkts_info);
if (vq->it_pool)
rte_free(vq->it_pool);
if (vq->vec_pool)
rte_free(vq->vec_pool);
vq->async_pkts_pending = NULL;
vq->async_pkts_info = NULL;
vq->it_pool = NULL;
vq->vec_pool = NULL;
}
void
free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
@ -331,10 +349,7 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
rte_free(vq->shadow_used_packed);
else {
rte_free(vq->shadow_used_split);
if (vq->async_pkts_pending)
rte_free(vq->async_pkts_pending);
if (vq->async_pkts_info)
rte_free(vq->async_pkts_info);
vhost_free_async_mem(vq);
}
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
@ -1524,6 +1539,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
struct rte_vhost_async_features f;
int node;
if (dev == NULL || ops == NULL)
return -1;
@ -1556,19 +1572,32 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
goto reg_out;
}
vq->async_pkts_pending = rte_malloc(NULL,
#ifdef RTE_LIBRTE_VHOST_NUMA
if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
VHOST_LOG_CONFIG(ERR,
"unable to get numa information in async register. "
"allocating async buffer memory on the caller thread node\n");
node = SOCKET_ID_ANY;
}
#else
node = SOCKET_ID_ANY;
#endif
vq->async_pkts_pending = rte_malloc_socket(NULL,
vq->size * sizeof(uintptr_t),
RTE_CACHE_LINE_SIZE);
vq->async_pkts_info = rte_malloc(NULL,
RTE_CACHE_LINE_SIZE, node);
vq->async_pkts_info = rte_malloc_socket(NULL,
vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE);
if (!vq->async_pkts_pending || !vq->async_pkts_info) {
if (vq->async_pkts_pending)
rte_free(vq->async_pkts_pending);
if (vq->async_pkts_info)
rte_free(vq->async_pkts_info);
RTE_CACHE_LINE_SIZE, node);
vq->it_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
RTE_CACHE_LINE_SIZE, node);
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
RTE_CACHE_LINE_SIZE, node);
if (!vq->async_pkts_pending || !vq->async_pkts_info ||
!vq->it_pool || !vq->vec_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vq data "
"(vid %d, qid: %d)\n", vid, queue_id);
@ -1616,15 +1645,7 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
goto out;
}
if (vq->async_pkts_pending) {
rte_free(vq->async_pkts_pending);
vq->async_pkts_pending = NULL;
}
if (vq->async_pkts_info) {
rte_free(vq->async_pkts_info);
vq->async_pkts_info = NULL;
}
vhost_free_async_mem(vq);
vq->async_ops.transfer_data = NULL;
vq->async_ops.check_completed_copies = NULL;

View File

@ -198,8 +198,8 @@ struct vhost_virtqueue {
/* operation callbacks for async dma */
struct rte_vhost_async_channel_ops async_ops;
struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
struct rte_vhost_iov_iter *it_pool;
struct iovec *vec_pool;
/* async data transfer status */
uintptr_t **async_pkts_pending;