vhost: add thread-unsafe async registration

This patch adds thread unsafe version for async register and
unregister functions.

Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Jiayu Hu 2021-07-06 04:29:34 -04:00 committed by Chenbo Xia
parent acbc38887b
commit fa51f1aa08
4 changed files with 214 additions and 77 deletions

View File

@ -256,6 +256,14 @@ The following is an overview of some key Vhost API functions:
vhost invokes this function to get the copy data completed by async
devices.
* ``rte_vhost_async_channel_register_thread_unsafe(vid, queue_id, config, ops)``
Register an async copy device channel for a vhost queue without
performing any locking.
This function is only safe to call in vhost callback functions
(i.e., struct vhost_device_ops).
* ``rte_vhost_async_channel_unregister(vid, queue_id)``
Unregister the async copy device channel from a vhost queue.
@ -268,6 +276,14 @@ The following is an overview of some key Vhost API functions:
devices for all vhost queues in destroy_device(), when a
virtio device is paused or shut down.
* ``rte_vhost_async_channel_unregister_thread_unsafe(vid, queue_id)``
Unregister the async copy device channel for a vhost queue without
performing any locking.
This function is only safe to call in vhost callback functions
(i.e., struct vhost_device_ops).
* ``rte_vhost_submit_enqueue_burst(vid, queue_id, pkts, count, comp_pkts, comp_count)``
Submit an enqueue request to transmit ``count`` packets from host to guest

View File

@ -140,6 +140,47 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
__rte_experimental
int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
/**
* Register an async channel for a vhost queue without performing any
* locking
*
* @note This function does not perform any locking, and is only safe to
* call in vhost callback functions.
*
* @param vid
* vhost device id async channel to be attached to
* @param queue_id
* vhost queue id async channel to be attached to
* @param config
* Async channel configuration
* @param ops
* Async channel operation callbacks
* @return
* 0 on success, -1 on failures
*/
__rte_experimental
int rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
struct rte_vhost_async_config config,
struct rte_vhost_async_channel_ops *ops);
/**
* Unregister an async channel for a vhost queue without performing any
* locking
*
* @note This function does not perform any locking, and is only safe to
* call in vhost callback functions.
*
* @param vid
* vhost device id async channel to be detached from
* @param queue_id
* vhost queue id async channel to be detached from
* @return
* 0 on success, -1 on failures
*/
__rte_experimental
int rte_vhost_async_channel_unregister_thread_unsafe(int vid,
uint16_t queue_id);
/**
* This function submits enqueue data to async engine. Successfully
* enqueued packets can be transfer completed or being occupied by DMA

View File

@ -82,4 +82,6 @@ EXPERIMENTAL {
# added in 21.08
rte_vhost_async_get_inflight;
rte_vhost_async_channel_register_thread_unsafe;
rte_vhost_async_channel_unregister_thread_unsafe;
};

View File

@ -1619,6 +1619,87 @@ int rte_vhost_extern_callback_register(int vid,
return 0;
}
static __rte_always_inline int
async_channel_register(int vid, uint16_t queue_id,
struct rte_vhost_async_config config,
struct rte_vhost_async_channel_ops *ops)
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
if (unlikely(vq->async_registered)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: channel already registered "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
vq->async_pkts_info = rte_malloc_socket(NULL,
vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_pkts_info) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async_pkts_info "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
vq->it_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->it_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for it_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->vec_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vec_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
if (vq_is_packed(dev)) {
vq->async_buffers_packed = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem_packed),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_buffers_packed) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async buffers "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
} else {
vq->async_descs_split = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_descs_split) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async descs "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
}
vq->async_ops.check_completed_copies = ops->check_completed_copies;
vq->async_ops.transfer_data = ops->transfer_data;
vq->async_threshold = config.async_threshold;
vq->async_registered = true;
return 0;
}
int
rte_vhost_async_channel_register(int vid, uint16_t queue_id,
struct rte_vhost_async_config config,
@ -1626,6 +1707,44 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
int ret;
if (dev == NULL || ops == NULL)
return -1;
if (queue_id >= VHOST_MAX_VRING)
return -1;
vq = dev->virtqueue[queue_id];
if (unlikely(vq == NULL || !dev->async_copy))
return -1;
if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
VHOST_LOG_CONFIG(ERR,
"async copy is not supported on non-inorder mode "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
if (unlikely(ops->check_completed_copies == NULL ||
ops->transfer_data == NULL))
return -1;
rte_spinlock_lock(&vq->access_lock);
ret = async_channel_register(vid, queue_id, config, ops);
rte_spinlock_unlock(&vq->access_lock);
return ret;
}
int
rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
struct rte_vhost_async_config config,
struct rte_vhost_async_channel_ops *ops)
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
if (dev == NULL || ops == NULL)
return -1;
@ -1649,82 +1768,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
ops->transfer_data == NULL))
return -1;
rte_spinlock_lock(&vq->access_lock);
if (unlikely(vq->async_registered)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: channel already registered "
"(vid %d, qid: %d)\n", vid, queue_id);
goto reg_out;
}
vq->async_pkts_info = rte_malloc_socket(NULL,
vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_pkts_info) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async_pkts_info "
"(vid %d, qid: %d)\n", vid, queue_id);
goto reg_out;
}
vq->it_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->it_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for it_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
goto reg_out;
}
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->vec_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vec_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
goto reg_out;
}
if (vq_is_packed(dev)) {
vq->async_buffers_packed = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem_packed),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_buffers_packed) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async buffers "
"(vid %d, qid: %d)\n", vid, queue_id);
goto reg_out;
}
} else {
vq->async_descs_split = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_descs_split) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async descs "
"(vid %d, qid: %d)\n", vid, queue_id);
goto reg_out;
}
}
vq->async_ops.check_completed_copies = ops->check_completed_copies;
vq->async_ops.transfer_data = ops->transfer_data;
vq->async_threshold = config.async_threshold;
vq->async_registered = true;
reg_out:
rte_spinlock_unlock(&vq->access_lock);
return 0;
return async_channel_register(vid, queue_id, config, ops);
}
int
@ -1775,6 +1819,41 @@ out:
return ret;
}
int
rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
if (dev == NULL)
return -1;
if (queue_id >= VHOST_MAX_VRING)
return -1;
vq = dev->virtqueue[queue_id];
if (vq == NULL)
return -1;
if (!vq->async_registered)
return 0;
if (vq->async_pkts_inflight_n) {
VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
"async inflight packets must be completed before unregistration.\n");
return -1;
}
vhost_free_async_mem(vq);
vq->async_ops.transfer_data = NULL;
vq->async_ops.check_completed_copies = NULL;
vq->async_registered = false;
return 0;
}
int rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
{
struct vhost_virtqueue *vq;
@ -1805,7 +1884,6 @@ int rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
rte_spinlock_unlock(&vq->access_lock);
return ret;
}
RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);