vhost: keep a reference to virtqueue index

Having a back reference to the index of the vq in the dev->virtqueue[]
array makes it possible to unify the internal API, with only passing dev
and vq.
It also allows displaying the vq index in log messages.

Remove virtqueue index checks where unneeded (like in static helpers
called from a loop on all available virtqueue).
Move virtqueue index validity checks the sooner possible.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
David Marchand 2022-07-25 22:32:05 +02:00 committed by Thomas Monjalon
parent 5a5a72a875
commit 57e414e3ec
6 changed files with 91 additions and 123 deletions

View File

@ -293,10 +293,9 @@ vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)
}
int
vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
int socket = 0;
if (vq->iotlb_pool) {
@ -319,7 +318,7 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
TAILQ_INIT(&vq->iotlb_pending_list);
snprintf(pool_name, sizeof(pool_name), "iotlb_%u_%d_%d",
getpid(), dev->vid, vq_index);
getpid(), dev->vid, vq->index);
VHOST_LOG_CONFIG(dev->ifname, DEBUG, "IOTLB cache name: %s\n", pool_name);
/* If already created, free it and recreate */

View File

@ -47,6 +47,6 @@ void vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqu
void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,
uint64_t size, uint8_t perm);
void vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq);
int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);
int vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq);
#endif /* _VHOST_IOTLB_H_ */

View File

@ -575,25 +575,14 @@ vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
}
static void
init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
init_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t vring_idx)
{
struct vhost_virtqueue *vq;
int numa_node = SOCKET_ID_ANY;
if (vring_idx >= VHOST_MAX_VRING) {
VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to init vring, out of bound (%d)\n",
vring_idx);
return;
}
vq = dev->virtqueue[vring_idx];
if (!vq) {
VHOST_LOG_CONFIG(dev->ifname, ERR, "virtqueue not allocated (%d)\n", vring_idx);
return;
}
memset(vq, 0, sizeof(struct vhost_virtqueue));
vq->index = vring_idx;
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF;
@ -607,31 +596,16 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
#endif
vq->numa_node = numa_node;
vhost_user_iotlb_init(dev, vring_idx);
vhost_user_iotlb_init(dev, vq);
}
static void
reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
reset_vring_queue(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
struct vhost_virtqueue *vq;
int callfd;
if (vring_idx >= VHOST_MAX_VRING) {
VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to reset vring, out of bound (%d)\n",
vring_idx);
return;
}
vq = dev->virtqueue[vring_idx];
if (!vq) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to reset vring, virtqueue not allocated (%d)\n",
vring_idx);
return;
}
callfd = vq->callfd;
init_vring_queue(dev, vring_idx);
init_vring_queue(dev, vq, vq->index);
vq->callfd = callfd;
}
@ -655,7 +629,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
}
dev->virtqueue[i] = vq;
init_vring_queue(dev, i);
init_vring_queue(dev, vq, i);
rte_spinlock_init(&vq->access_lock);
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
@ -681,8 +655,16 @@ reset_device(struct virtio_net *dev)
dev->protocol_features = 0;
dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
for (i = 0; i < dev->nr_vring; i++)
reset_vring_queue(dev, i);
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (!vq) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to reset vring, virtqueue not allocated (%d)\n", i);
continue;
}
reset_vring_queue(dev, vq);
}
}
/*
@ -1661,17 +1643,15 @@ rte_vhost_extern_callback_register(int vid,
}
static __rte_always_inline int
async_channel_register(int vid, uint16_t queue_id)
async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
struct vhost_async *async;
int node = vq->numa_node;
if (unlikely(vq->async)) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"async register failed: already registered (qid: %d)\n",
queue_id);
vq->index);
return -1;
}
@ -1679,7 +1659,7 @@ async_channel_register(int vid, uint16_t queue_id)
if (!async) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to allocate async metadata (qid: %d)\n",
queue_id);
vq->index);
return -1;
}
@ -1688,7 +1668,7 @@ async_channel_register(int vid, uint16_t queue_id)
if (!async->pkts_info) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to allocate async_pkts_info (qid: %d)\n",
queue_id);
vq->index);
goto out_free_async;
}
@ -1697,7 +1677,7 @@ async_channel_register(int vid, uint16_t queue_id)
if (!async->pkts_cmpl_flag) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to allocate async pkts_cmpl_flag (qid: %d)\n",
queue_id);
vq->index);
goto out_free_async;
}
@ -1708,7 +1688,7 @@ async_channel_register(int vid, uint16_t queue_id)
if (!async->buffers_packed) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to allocate async buffers (qid: %d)\n",
queue_id);
vq->index);
goto out_free_inflight;
}
} else {
@ -1718,7 +1698,7 @@ async_channel_register(int vid, uint16_t queue_id)
if (!async->descs_split) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to allocate async descs (qid: %d)\n",
queue_id);
vq->index);
goto out_free_inflight;
}
}
@ -1753,7 +1733,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id)
return -1;
rte_spinlock_lock(&vq->access_lock);
ret = async_channel_register(vid, queue_id);
ret = async_channel_register(dev, vq);
rte_spinlock_unlock(&vq->access_lock);
return ret;
@ -1782,7 +1762,7 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
return -1;
}
return async_channel_register(vid, queue_id);
return async_channel_register(dev, vq);
}
int

View File

@ -309,6 +309,9 @@ struct vhost_virtqueue {
/* Currently unused as polling mode is enabled */
int kickfd;
/* Index of this vq in dev->virtqueue[] */
uint32_t index;
/* inflight share memory info */
union {
struct rte_vhost_inflight_info_split *inflight_split;

View File

@ -240,22 +240,20 @@ vhost_backend_cleanup(struct virtio_net *dev)
}
static void
vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
int enable)
vhost_user_notify_queue_state(struct virtio_net *dev, struct vhost_virtqueue *vq,
int enable)
{
struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
struct vhost_virtqueue *vq = dev->virtqueue[index];
/* Configure guest notifications on enable */
if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)
vhost_enable_guest_notification(dev, vq, vq->notif_enable);
if (vdpa_dev && vdpa_dev->ops->set_vring_state)
vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
vdpa_dev->ops->set_vring_state(dev->vid, vq->index, enable);
if (dev->notify_ops->vring_state_changed)
dev->notify_ops->vring_state_changed(dev->vid,
index, enable);
dev->notify_ops->vring_state_changed(dev->vid, vq->index, enable);
}
/*
@ -494,7 +492,7 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
*/
#ifdef RTE_LIBRTE_VHOST_NUMA
static void
numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq, int index)
numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
{
int node, dev_node;
struct virtio_net *dev;
@ -519,7 +517,7 @@ numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq, int index)
if (ret) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"unable to get virtqueue %d numa information.\n",
index);
vq->index);
return;
}
@ -530,15 +528,15 @@ numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq, int index)
if (!vq) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to realloc virtqueue %d on node %d\n",
index, node);
(*pvq)->index, node);
return;
}
*pvq = vq;
if (vq != dev->virtqueue[index]) {
if (vq != dev->virtqueue[vq->index]) {
VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated virtqueue on node %d\n", node);
dev->virtqueue[index] = vq;
vhost_user_iotlb_init(dev, index);
dev->virtqueue[vq->index] = vq;
vhost_user_iotlb_init(dev, vq);
}
if (vq_is_packed(dev)) {
@ -666,11 +664,10 @@ out_dev_realloc:
}
#else
static void
numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq, int index)
numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
{
RTE_SET_USED(pdev);
RTE_SET_USED(pvq);
RTE_SET_USED(index);
}
#endif
@ -741,8 +738,7 @@ log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
}
static void
translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq,
int vq_index)
translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
{
struct vhost_virtqueue *vq;
struct virtio_net *dev;
@ -771,7 +767,7 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq,
return;
}
numa_realloc(&dev, &vq, vq_index);
numa_realloc(&dev, &vq);
*pdev = dev;
*pvq = vq;
@ -813,7 +809,7 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq,
return;
}
numa_realloc(&dev, &vq, vq_index);
numa_realloc(&dev, &vq);
*pdev = dev;
*pvq = vq;
@ -891,7 +887,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev,
if ((vq->enabled && (dev->features &
(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
access_ok) {
translate_ring_addresses(&dev, &vq, ctx->msg.payload.addr.index);
translate_ring_addresses(&dev, &vq);
*pdev = dev;
}
@ -1397,7 +1393,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
*/
vring_invalidate(dev, vq);
translate_ring_addresses(&dev, &vq, i);
translate_ring_addresses(&dev, &vq);
*pdev = dev;
}
}
@ -1777,7 +1773,7 @@ vhost_user_set_vring_call(struct virtio_net **pdev,
if (vq->ready) {
vq->ready = false;
vhost_user_notify_queue_state(dev, file.index, 0);
vhost_user_notify_queue_state(dev, vq, 0);
}
if (vq->callfd >= 0)
@ -2026,7 +2022,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev,
/* Interpret ring addresses only when ring is started. */
vq = dev->virtqueue[file.index];
translate_ring_addresses(&dev, &vq, file.index);
translate_ring_addresses(&dev, &vq);
*pdev = dev;
/*
@ -2040,7 +2036,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev,
if (vq->ready) {
vq->ready = false;
vhost_user_notify_queue_state(dev, file.index, 0);
vhost_user_notify_queue_state(dev, vq, 0);
}
if (vq->kickfd >= 0)
@ -2583,7 +2579,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
if (is_vring_iotlb(dev, vq, imsg)) {
rte_spinlock_lock(&vq->access_lock);
translate_ring_addresses(&dev, &vq, i);
translate_ring_addresses(&dev, &vq);
*pdev = dev;
rte_spinlock_unlock(&vq->access_lock);
}
@ -3148,7 +3144,7 @@ skip_to_post_handle:
if (cur_ready != (vq && vq->ready)) {
vq->ready = cur_ready;
vhost_user_notify_queue_state(dev, i, cur_ready);
vhost_user_notify_queue_state(dev, vq, cur_ready);
}
}

View File

@ -1555,22 +1555,12 @@ virtio_dev_rx_packed(struct virtio_net *dev,
}
static __rte_always_inline uint32_t
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
VHOST_LOG_DATA(dev->ifname, ERR,
"%s: invalid virtqueue idx %d.\n",
__func__, queue_id);
return 0;
}
vq = dev->virtqueue[queue_id];
rte_spinlock_lock(&vq->access_lock);
if (unlikely(!vq->enabled))
@ -1620,7 +1610,14 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
return 0;
}
return virtio_dev_rx(dev, queue_id, pkts, count);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
VHOST_LOG_DATA(dev->ifname, ERR,
"%s: invalid virtqueue idx %d.\n",
__func__, queue_id);
return 0;
}
return virtio_dev_rx(dev, dev->virtqueue[queue_id], pkts, count);
}
static __rte_always_inline uint16_t
@ -1669,8 +1666,7 @@ store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
static __rte_noinline uint32_t
virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
int16_t dma_id, uint16_t vchan_id)
struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint32_t pkt_idx = 0;
@ -1732,7 +1728,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
VHOST_LOG_DATA(dev->ifname, DEBUG,
"%s: failed to transfer %u packets for queue %u.\n",
__func__, pkt_err, queue_id);
__func__, pkt_err, vq->index);
/* update number of completed packets */
pkt_idx = n_xfer;
@ -1878,8 +1874,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
static __rte_noinline uint32_t
virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
int16_t dma_id, uint16_t vchan_id)
struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
{
uint32_t pkt_idx = 0;
uint32_t remained = count;
@ -1924,7 +1919,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
if (unlikely(pkt_err)) {
VHOST_LOG_DATA(dev->ifname, DEBUG,
"%s: failed to transfer %u packets for queue %u.\n",
__func__, pkt_err, queue_id);
__func__, pkt_err, vq->index);
dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
}
@ -2045,11 +2040,9 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
}
static __rte_always_inline uint16_t
vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id)
vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id, uint16_t vchan_id)
{
struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
uint16_t nr_cpl_pkts = 0;
@ -2156,7 +2149,7 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
goto out;
}
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count, dma_id, vchan_id);
vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
vq->stats.inflight_completed += n_pkts_cpl;
@ -2216,12 +2209,11 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
}
if ((queue_id & 1) == 0)
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
pkts, count, dma_id, vchan_id);
else {
n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count,
dma_id, vchan_id);
else
n_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,
dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
}
dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
vq->stats.inflight_completed += n_pkts_cpl;
@ -2275,12 +2267,11 @@ rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
}
if ((queue_id & 1) == 0)
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
pkts, count, dma_id, vchan_id);
else {
n_pkts_cpl = vhost_poll_enqueue_completed(dev, vq, pkts, count,
dma_id, vchan_id);
else
n_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,
dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
}
dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
vq->stats.inflight_completed += n_pkts_cpl;
@ -2292,19 +2283,12 @@ out_access_unlock:
}
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
{
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
VHOST_LOG_DATA(dev->ifname, ERR,
"%s: invalid virtqueue idx %d.\n",
__func__, queue_id);
return 0;
}
if (unlikely(!dma_copy_track[dma_id].vchans ||
!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
@ -2314,8 +2298,6 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
return 0;
}
vq = dev->virtqueue[queue_id];
rte_spinlock_lock(&vq->access_lock);
if (unlikely(!vq->enabled || !vq->async))
@ -2333,11 +2315,11 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
goto out;
if (vq_is_packed(dev))
nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
pkts, count, dma_id, vchan_id);
nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, pkts, count,
dma_id, vchan_id);
else
nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
pkts, count, dma_id, vchan_id);
nb_tx = virtio_dev_rx_async_submit_split(dev, vq, pkts, count,
dma_id, vchan_id);
vq->stats.inflight_submitted += nb_tx;
@ -2368,7 +2350,15 @@ rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
return 0;
}
return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, dma_id, vchan_id);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
VHOST_LOG_DATA(dev->ifname, ERR,
"%s: invalid virtqueue idx %d.\n",
__func__, queue_id);
return 0;
}
return virtio_dev_rx_async_submit(dev, dev->virtqueue[queue_id], pkts, count,
dma_id, vchan_id);
}
static inline bool