vhost: support async packed ring dequeue

This patch implements packed ring dequeue data path
for asynchronous vhost.

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Cheng Jiang 2022-06-13 08:21:59 +00:00 committed by Maxime Coquelin
parent 4a8ca6590d
commit fe8477ebbd

View File

@ -27,7 +27,7 @@
#define MAX_BATCH_LEN 256
static __rte_always_inline uint16_t
async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id, bool legacy_ol_flags);
@ -2207,12 +2207,7 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
pkts, count, dma_id, vchan_id);
else {
if (unlikely(vq_is_packed(dev)))
VHOST_LOG_DATA(ERR,
"(%s) %s: async dequeue does not support packed ring.\n",
dev->ifname, __func__);
else
n_pkts_cpl = async_poll_dequeue_completed_split(dev, vq, pkts, count,
n_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,
dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
}
@ -2271,12 +2266,7 @@ rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
pkts, count, dma_id, vchan_id);
else {
if (unlikely(vq_is_packed(dev)))
VHOST_LOG_DATA(ERR,
"(%s) %s: async dequeue does not support packed ring.\n",
dev->ifname, __func__);
else
n_pkts_cpl = async_poll_dequeue_completed_split(dev, vq, pkts, count,
n_pkts_cpl = async_poll_dequeue_completed(dev, vq, pkts, count,
dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
}
@ -3339,7 +3329,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
}
static __rte_always_inline uint16_t
async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id, bool legacy_ol_flags)
{
@ -3354,7 +3344,7 @@ async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueu
from = start_idx;
while (vq->async->pkts_cmpl_flag[from] && count--) {
vq->async->pkts_cmpl_flag[from] = false;
from = (from + 1) & (vq->size - 1);
from = (from + 1) % vq->size;
nr_cpl_pkts++;
}
@ -3362,7 +3352,7 @@ async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueu
return 0;
for (i = 0; i < nr_cpl_pkts; i++) {
from = (start_idx + i) & (vq->size - 1);
from = (start_idx + i) % vq->size;
pkts[i] = pkts_info[from].mbuf;
if (virtio_net_with_host_offload(dev))
@ -3371,10 +3361,14 @@ async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueu
}
/* write back completed descs to used ring and update used idx */
write_back_completed_descs_split(vq, nr_cpl_pkts);
__atomic_add_fetch(&vq->used->idx, nr_cpl_pkts, __ATOMIC_RELEASE);
vhost_vring_call_split(dev, vq);
if (vq_is_packed(dev)) {
write_back_completed_descs_packed(vq, nr_cpl_pkts);
vhost_vring_call_packed(dev, vq);
} else {
write_back_completed_descs_split(vq, nr_cpl_pkts);
__atomic_add_fetch(&vq->used->idx, nr_cpl_pkts, __ATOMIC_RELEASE);
vhost_vring_call_split(dev, vq);
}
vq->async->pkts_inflight_n -= nr_cpl_pkts;
return nr_cpl_pkts;
@ -3511,8 +3505,8 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
out:
/* DMA device may serve other queues, unconditionally check completed. */
nr_done_pkts = async_poll_dequeue_completed_split(dev, vq, pkts, pkts_size,
dma_id, vchan_id, legacy_ol_flags);
nr_done_pkts = async_poll_dequeue_completed(dev, vq, pkts, pkts_size,
dma_id, vchan_id, legacy_ol_flags);
return nr_done_pkts;
}
@ -3539,6 +3533,171 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
pkts, count, dma_id, vchan_id, false);
}
static __rte_always_inline void
vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq, uint16_t buf_id)
{
struct vhost_async *async = vq->async;
uint16_t idx = async->buffer_idx_packed;
async->buffers_packed[idx].id = buf_id;
async->buffers_packed[idx].len = 0;
async->buffers_packed[idx].count = 1;
async->buffer_idx_packed++;
if (async->buffer_idx_packed >= vq->size)
async->buffer_idx_packed -= vq->size;
}
static __rte_always_inline int
virtio_dev_tx_async_single_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool,
struct rte_mbuf *pkts,
uint16_t slot_idx,
bool legacy_ol_flags)
{
int err;
uint16_t buf_id, desc_count = 0;
uint16_t nr_vec = 0;
uint32_t buf_len;
struct buf_vector buf_vec[BUF_VECTOR_MAX];
static bool allocerr_warned;
if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, &desc_count,
buf_vec, &nr_vec, &buf_id, &buf_len,
VHOST_ACCESS_RO) < 0))
return -1;
if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
if (!allocerr_warned) {
VHOST_LOG_DATA(ERR, "(%s) Failed mbuf alloc of size %d from %s.\n",
dev->ifname, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
return -1;
}
err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts, mbuf_pool,
legacy_ol_flags, slot_idx, true);
if (unlikely(err)) {
rte_pktmbuf_free(pkts);
if (!allocerr_warned) {
VHOST_LOG_DATA(ERR, "(%s) Failed to copy desc to mbuf on.\n", dev->ifname);
allocerr_warned = true;
}
return -1;
}
/* update async shadow packed ring */
vhost_async_shadow_dequeue_single_packed(vq, buf_id);
return err;
}
static __rte_always_inline uint16_t
virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
{
uint16_t pkt_idx;
uint16_t slot_idx = 0;
uint16_t nr_done_pkts = 0;
uint16_t pkt_err = 0;
uint32_t n_xfer;
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
struct rte_mbuf *pkts_prealloc[MAX_PKT_BURST];
VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n", dev->vid, count);
async_iter_reset(async);
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count))
goto out;
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
struct rte_mbuf *pkt = pkts_prealloc[pkt_idx];
rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
if (unlikely(virtio_dev_tx_async_single_packed(dev, vq, mbuf_pool, pkt,
slot_idx, legacy_ol_flags))) {
rte_pktmbuf_free_bulk(&pkts_prealloc[pkt_idx], count - pkt_idx);
break;
}
pkts_info[slot_idx].mbuf = pkt;
vq_inc_last_avail_packed(vq, 1);
}
n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
async->iov_iter, pkt_idx);
async->pkts_inflight_n += n_xfer;
pkt_err = pkt_idx - n_xfer;
if (unlikely(pkt_err)) {
pkt_idx -= pkt_err;
/**
* recover DMA-copy related structures and free pktmbuf for DMA-error pkts.
*/
if (async->buffer_idx_packed >= pkt_err)
async->buffer_idx_packed -= pkt_err;
else
async->buffer_idx_packed += vq->size - pkt_err;
while (pkt_err-- > 0) {
rte_pktmbuf_free(pkts_info[slot_idx % vq->size].mbuf);
slot_idx--;
}
/* recover available ring */
if (vq->last_avail_idx >= pkt_err) {
vq->last_avail_idx -= pkt_err;
} else {
vq->last_avail_idx += vq->size - pkt_err;
vq->avail_wrap_counter ^= 1;
}
}
async->pkts_idx += pkt_idx;
if (async->pkts_idx >= vq->size)
async->pkts_idx -= vq->size;
out:
nr_done_pkts = async_poll_dequeue_completed(dev, vq, pkts, count,
dma_id, vchan_id, legacy_ol_flags);
return nr_done_pkts;
}
__rte_noinline
static uint16_t
virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
uint16_t count, uint16_t dma_id, uint16_t vchan_id)
{
return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id, true);
}
__rte_noinline
static uint16_t
virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
uint16_t count, uint16_t dma_id, uint16_t vchan_id)
{
return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id, false);
}
uint16_t
rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
@ -3641,25 +3800,22 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
count -= 1;
}
if (unlikely(vq_is_packed(dev))) {
static bool not_support_pack_log;
if (!not_support_pack_log) {
VHOST_LOG_DATA(ERR,
"(%s) %s: async dequeue does not support packed ring.\n",
dev->ifname, __func__);
not_support_pack_log = true;
}
count = 0;
goto out;
if (vq_is_packed(dev)) {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
count = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
else
count = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
} else {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
count = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
else
count = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
}
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
count = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool, pkts,
count, dma_id, vchan_id);
else
count = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool, pkts,
count, dma_id, vchan_id);
*nr_inflight = vq->async->pkts_inflight_n;
out: