vhost: move async data in dedicated structure

This patch moves async-related metadata from vhost_virtqueue
to a dedicated struct. It makes it clear which fields are
async related, and also saves some memory when async feature
is not in use.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
Reviewed-by: Jiayu Hu <jiayu.hu@intel.com>
This commit is contained in:
Maxime Coquelin 2021-10-26 18:28:50 +02:00
parent 3c3c54cfa6
commit ee8024b3d4
4 changed files with 142 additions and 159 deletions

View File

@ -340,19 +340,18 @@ cleanup_device(struct virtio_net *dev, int destroy)
static void
vhost_free_async_mem(struct vhost_virtqueue *vq)
{
rte_free(vq->async_pkts_info);
if (!vq->async)
return;
rte_free(vq->async_buffers_packed);
vq->async_buffers_packed = NULL;
rte_free(vq->async_descs_split);
vq->async_descs_split = NULL;
rte_free(vq->async->pkts_info);
rte_free(vq->it_pool);
rte_free(vq->vec_pool);
rte_free(vq->async->buffers_packed);
vq->async->buffers_packed = NULL;
rte_free(vq->async->descs_split);
vq->async->descs_split = NULL;
vq->async_pkts_info = NULL;
vq->it_pool = NULL;
vq->vec_pool = NULL;
rte_free(vq->async);
vq->async = NULL;
}
void
@ -1632,77 +1631,63 @@ async_channel_register(int vid, uint16_t queue_id,
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
struct vhost_async *async;
int node = vq->numa_node;
if (unlikely(vq->async_registered)) {
if (unlikely(vq->async)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: channel already registered "
"(vid %d, qid: %d)\n", vid, queue_id);
"async register failed: already registered (vid %d, qid: %d)\n",
vid, queue_id);
return -1;
}
vq->async_pkts_info = rte_malloc_socket(NULL,
vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_pkts_info) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async_pkts_info "
"(vid %d, qid: %d)\n", vid, queue_id);
async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
if (!async) {
VHOST_LOG_CONFIG(ERR, "failed to allocate async metadata (vid %d, qid: %d)\n",
vid, queue_id);
return -1;
}
vq->it_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->it_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for it_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
}
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->vec_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vec_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE, node);
if (!async->pkts_info) {
VHOST_LOG_CONFIG(ERR, "failed to allocate async_pkts_info (vid %d, qid: %d)\n",
vid, queue_id);
goto out_free_async;
}
if (vq_is_packed(dev)) {
vq->async_buffers_packed = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem_packed),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_buffers_packed) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async buffers "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
async->buffers_packed = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem_packed),
RTE_CACHE_LINE_SIZE, node);
if (!async->buffers_packed) {
VHOST_LOG_CONFIG(ERR, "failed to allocate async buffers (vid %d, qid: %d)\n",
vid, queue_id);
goto out_free_inflight;
}
} else {
vq->async_descs_split = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE, vq->numa_node);
if (!vq->async_descs_split) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async descs "
"(vid %d, qid: %d)\n", vid, queue_id);
return -1;
async->descs_split = rte_malloc_socket(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE, node);
if (!async->descs_split) {
VHOST_LOG_CONFIG(ERR, "failed to allocate async descs (vid %d, qid: %d)\n",
vid, queue_id);
goto out_free_inflight;
}
}
vq->async_ops.check_completed_copies = ops->check_completed_copies;
vq->async_ops.transfer_data = ops->transfer_data;
async->ops.check_completed_copies = ops->check_completed_copies;
async->ops.transfer_data = ops->transfer_data;
vq->async_registered = true;
vq->async = async;
return 0;
out_free_inflight:
rte_free(async->pkts_info);
out_free_async:
rte_free(async);
return -1;
}
int
@ -1796,7 +1781,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
ret = 0;
if (!vq->async_registered)
if (!vq->async)
return ret;
if (!rte_spinlock_trylock(&vq->access_lock)) {
@ -1805,7 +1790,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
return -1;
}
if (vq->async_pkts_inflight_n) {
if (vq->async->pkts_inflight_n) {
VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
"async inflight packets must be completed before unregistration.\n");
ret = -1;
@ -1813,11 +1798,6 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
}
vhost_free_async_mem(vq);
vq->async_ops.transfer_data = NULL;
vq->async_ops.check_completed_copies = NULL;
vq->async_registered = false;
out:
rte_spinlock_unlock(&vq->access_lock);
@ -1841,10 +1821,10 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
if (vq == NULL)
return -1;
if (!vq->async_registered)
if (!vq->async)
return 0;
if (vq->async_pkts_inflight_n) {
if (vq->async->pkts_inflight_n) {
VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
"async inflight packets must be completed before unregistration.\n");
return -1;
@ -1852,10 +1832,6 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
vhost_free_async_mem(vq);
vq->async_ops.transfer_data = NULL;
vq->async_ops.check_completed_copies = NULL;
vq->async_registered = false;
return 0;
}
@ -1877,7 +1853,7 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
if (vq == NULL)
return ret;
if (!vq->async_registered)
if (!vq->async)
return ret;
if (!rte_spinlock_trylock(&vq->access_lock)) {
@ -1886,7 +1862,7 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
return ret;
}
ret = vq->async_pkts_inflight_n;
ret = vq->async->pkts_inflight_n;
rte_spinlock_unlock(&vq->access_lock);
return ret;

View File

@ -119,6 +119,32 @@ struct vring_used_elem_packed {
uint32_t count;
};
struct vhost_async {
/* operation callbacks for DMA */
struct rte_vhost_async_channel_ops ops;
struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
/* data transfer status */
struct async_inflight_info *pkts_info;
uint16_t pkts_idx;
uint16_t pkts_inflight_n;
uint16_t last_pkts_n;
union {
struct vring_used_elem *descs_split;
struct vring_used_elem_packed *buffers_packed;
};
union {
uint16_t desc_idx_split;
uint16_t buffer_idx_packed;
};
union {
uint16_t last_desc_idx_split;
uint16_t last_buffer_idx_packed;
};
};
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
@ -193,32 +219,7 @@ struct vhost_virtqueue {
struct rte_vhost_resubmit_info *resubmit_inflight;
uint64_t global_counter;
/* operation callbacks for async dma */
struct rte_vhost_async_channel_ops async_ops;
struct rte_vhost_iov_iter *it_pool;
struct iovec *vec_pool;
/* async data transfer status */
struct async_inflight_info *async_pkts_info;
uint16_t async_pkts_idx;
uint16_t async_pkts_inflight_n;
uint16_t async_last_pkts_n;
union {
struct vring_used_elem *async_descs_split;
struct vring_used_elem_packed *async_buffers_packed;
};
union {
uint16_t async_desc_idx_split;
uint16_t async_buffer_idx_packed;
};
union {
uint16_t last_async_desc_idx_split;
uint16_t last_async_buffer_idx_packed;
};
/* vq async features */
bool async_registered;
struct vhost_async *async;
int notif_enable;
#define VIRTIO_UNINITIALIZED_NOTIF (-1)

View File

@ -2208,8 +2208,8 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
"set queue enable: %d to qp idx: %d\n",
enable, index);
if (enable && dev->virtqueue[index]->async_registered) {
if (dev->virtqueue[index]->async_pkts_inflight_n) {
if (enable && dev->virtqueue[index]->async) {
if (dev->virtqueue[index]->async->pkts_inflight_n) {
VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
"async inflight packets must be completed first\n");
return RTE_VHOST_MSG_RESULT_ERR;

View File

@ -1510,12 +1510,13 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
uint16_t num_buffers;
uint16_t avail_head;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
struct vhost_async *async = vq->async;
struct rte_vhost_iov_iter *it_pool = async->it_pool;
struct iovec *vec_pool = async->vec_pool;
struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
struct iovec *src_iovec = vec_pool;
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
struct async_inflight_info *pkts_info = vq->async_pkts_info;
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
int32_t n_xfer;
uint16_t segs_await = 0;
@ -1556,7 +1557,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
&it_pool[it_idx + 1]);
slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
pkts_info[slot_idx].descs = num_buffers;
pkts_info[slot_idx].mbuf = pkts[pkt_idx];
@ -1574,7 +1575,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
n_xfer = vq->async_ops.transfer_data(dev->vid,
n_xfer = async->ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
@ -1606,7 +1607,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
}
if (pkt_burst_idx) {
n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
} else {
@ -1638,15 +1639,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
/* keep used descriptors */
if (likely(vq->shadow_used_idx)) {
uint16_t to = vq->async_desc_idx_split & (vq->size - 1);
uint16_t to = async->desc_idx_split & (vq->size - 1);
store_dma_desc_info_split(vq->shadow_used_split,
vq->async_descs_split, vq->size, 0, to,
async->descs_split, vq->size, 0, to,
vq->shadow_used_idx);
vq->async_desc_idx_split += vq->shadow_used_idx;
vq->async_pkts_idx += pkt_idx;
vq->async_pkts_inflight_n += pkt_idx;
async->desc_idx_split += vq->shadow_used_idx;
async->pkts_idx += pkt_idx;
async->pkts_inflight_n += pkt_idx;
vq->shadow_used_idx = 0;
}
@ -1798,7 +1799,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
{
uint16_t descs_err = 0;
uint16_t buffers_err = 0;
struct async_inflight_info *pkts_info = vq->async_pkts_info;
struct async_inflight_info *pkts_info = vq->async->pkts_info;
*pkt_idx -= nr_err;
/* calculate the sum of buffers and descs of DMA-error packets. */
@ -1829,12 +1830,13 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t num_buffers;
uint16_t num_descs;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
struct vhost_async *async = vq->async;
struct rte_vhost_iov_iter *it_pool = async->it_pool;
struct iovec *vec_pool = async->vec_pool;
struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
struct iovec *src_iovec = vec_pool;
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
struct async_inflight_info *pkts_info = vq->async_pkts_info;
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint16_t slot_idx = 0;
uint16_t segs_await = 0;
@ -1851,7 +1853,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
&it_pool[it_idx], &it_pool[it_idx + 1]) < 0))
break;
slot_idx = (vq->async_pkts_idx + pkt_idx) % vq->size;
slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
async_fill_desc(&tdes[pkt_burst_idx++], &it_pool[it_idx],
&it_pool[it_idx + 1]);
@ -1873,7 +1875,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
n_xfer = vq->async_ops.transfer_data(dev->vid,
n_xfer = async->ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
@ -1904,7 +1906,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
} while (pkt_idx < count);
if (pkt_burst_idx) {
n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
n_xfer = async->ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
n_pkts = n_xfer;
} else {
@ -1922,20 +1924,20 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
if (likely(vq->shadow_used_idx)) {
/* keep used descriptors. */
store_dma_desc_info_packed(vq->shadow_used_packed, vq->async_buffers_packed,
vq->size, 0, vq->async_buffer_idx_packed,
store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
vq->size, 0, async->buffer_idx_packed,
vq->shadow_used_idx);
vq->async_buffer_idx_packed += vq->shadow_used_idx;
if (vq->async_buffer_idx_packed >= vq->size)
vq->async_buffer_idx_packed -= vq->size;
async->buffer_idx_packed += vq->shadow_used_idx;
if (async->buffer_idx_packed >= vq->size)
async->buffer_idx_packed -= vq->size;
vq->async_pkts_idx += pkt_idx;
if (vq->async_pkts_idx >= vq->size)
vq->async_pkts_idx -= vq->size;
async->pkts_idx += pkt_idx;
if (async->pkts_idx >= vq->size)
async->pkts_idx -= vq->size;
vq->shadow_used_idx = 0;
vq->async_pkts_inflight_n += pkt_idx;
async->pkts_inflight_n += pkt_idx;
}
return pkt_idx;
@ -1944,28 +1946,29 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
static __rte_always_inline void
write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
{
struct vhost_async *async = vq->async;
uint16_t nr_left = n_descs;
uint16_t nr_copy;
uint16_t to, from;
do {
from = vq->last_async_desc_idx_split & (vq->size - 1);
from = async->last_desc_idx_split & (vq->size - 1);
nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
to = vq->last_used_idx & (vq->size - 1);
if (to + nr_copy <= vq->size) {
rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
nr_copy * sizeof(struct vring_used_elem));
} else {
uint16_t size = vq->size - to;
rte_memcpy(&vq->used->ring[to], &vq->async_descs_split[from],
rte_memcpy(&vq->used->ring[to], &async->descs_split[from],
size * sizeof(struct vring_used_elem));
rte_memcpy(&vq->used->ring[0], &vq->async_descs_split[from + size],
rte_memcpy(&vq->used->ring[0], &async->descs_split[from + size],
(nr_copy - size) * sizeof(struct vring_used_elem));
}
vq->last_async_desc_idx_split += nr_copy;
async->last_desc_idx_split += nr_copy;
vq->last_used_idx += nr_copy;
nr_left -= nr_copy;
} while (nr_left > 0);
@ -1975,20 +1978,21 @@ static __rte_always_inline void
write_back_completed_descs_packed(struct vhost_virtqueue *vq,
uint16_t n_buffers)
{
struct vhost_async *async = vq->async;
uint16_t nr_left = n_buffers;
uint16_t from, to;
do {
from = vq->last_async_buffer_idx_packed;
from = async->last_buffer_idx_packed;
to = (from + nr_left) % vq->size;
if (to > from) {
vhost_update_used_packed(vq, vq->async_buffers_packed + from, to - from);
vq->last_async_buffer_idx_packed += nr_left;
vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
async->last_buffer_idx_packed += nr_left;
nr_left = 0;
} else {
vhost_update_used_packed(vq, vq->async_buffers_packed + from,
vhost_update_used_packed(vq, async->buffers_packed + from,
vq->size - from);
vq->last_async_buffer_idx_packed = 0;
async->last_buffer_idx_packed = 0;
nr_left -= vq->size - from;
}
} while (nr_left > 0);
@ -1999,6 +2003,7 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
struct vhost_virtqueue *vq;
struct vhost_async *async;
struct async_inflight_info *pkts_info;
int32_t n_cpl;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
@ -2006,15 +2011,16 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
uint16_t from, i;
vq = dev->virtqueue[queue_id];
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
async = vq->async;
pkts_idx = async->pkts_idx % vq->size;
pkts_info = async->pkts_info;
vq_size = vq->size;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
vq_size, async->pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
n_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (count > async->last_pkts_n) {
n_cpl = async->ops.check_completed_copies(dev->vid,
queue_id, 0, count - async->last_pkts_n);
if (likely(n_cpl >= 0)) {
n_pkts_cpl = n_cpl;
} else {
@ -2025,10 +2031,10 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
}
}
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_cpl += async->last_pkts_n;
n_pkts_put = RTE_MIN(n_pkts_cpl, count);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
async->last_pkts_n = n_pkts_cpl;
return 0;
}
@ -2045,8 +2051,8 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
pkts[i] = pkts_info[from].mbuf;
}
}
vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
vq->async_pkts_inflight_n -= n_pkts_put;
async->last_pkts_n = n_pkts_cpl - n_pkts_put;
async->pkts_inflight_n -= n_pkts_put;
if (likely(vq->enabled && vq->access_ok)) {
if (vq_is_packed(dev)) {
@ -2062,11 +2068,11 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
}
} else {
if (vq_is_packed(dev)) {
vq->last_async_buffer_idx_packed += n_buffers;
if (vq->last_async_buffer_idx_packed >= vq->size)
vq->last_async_buffer_idx_packed -= vq->size;
async->last_buffer_idx_packed += n_buffers;
if (async->last_buffer_idx_packed >= vq->size)
async->last_buffer_idx_packed -= vq->size;
} else {
vq->last_async_desc_idx_split += n_descs;
async->last_desc_idx_split += n_descs;
}
}
@ -2093,7 +2099,7 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
if (unlikely(!vq->async_registered)) {
if (unlikely(!vq->async)) {
VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
dev->vid, __func__, queue_id);
return 0;
@ -2128,7 +2134,7 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
if (unlikely(!vq->async_registered)) {
if (unlikely(!vq->async)) {
VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
dev->vid, __func__, queue_id);
return 0;
@ -2157,7 +2163,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
rte_spinlock_lock(&vq->access_lock);
if (unlikely(!vq->enabled || !vq->async_registered))
if (unlikely(!vq->enabled || !vq->async))
goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))