vhost: allocate and free packets in bulk in Tx packed
Move allocation out further and perform all allocation in bulk. The same goes for freeing packets. In the process, also introduce virtio_dev_pktmbuf_prep and make virtio_dev_pktmbuf_alloc use that. Signed-off-by: Balazs Nemeth <bnemeth@redhat.com> Reviewed-by: David Marchand <david.marchand@redhat.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
parent
56fa279124
commit
a287ac2891
@ -2134,6 +2134,24 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __rte_always_inline int
|
||||
virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
|
||||
uint32_t data_len)
|
||||
{
|
||||
if (rte_pktmbuf_tailroom(pkt) >= data_len)
|
||||
return 0;
|
||||
|
||||
/* attach an external buffer if supported */
|
||||
if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
|
||||
return 0;
|
||||
|
||||
/* check if chained buffers are allowed */
|
||||
if (!dev->linearbuf)
|
||||
return 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a host supported pktmbuf.
|
||||
*/
|
||||
@ -2149,23 +2167,15 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rte_pktmbuf_tailroom(pkt) >= data_len)
|
||||
return pkt;
|
||||
if (virtio_dev_pktmbuf_prep(dev, pkt, data_len)) {
|
||||
/* Data doesn't fit into the buffer and the host supports
|
||||
* only linear buffers
|
||||
*/
|
||||
rte_pktmbuf_free(pkt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* attach an external buffer if supported */
|
||||
if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
|
||||
return pkt;
|
||||
|
||||
/* check if chained buffers are allowed */
|
||||
if (!dev->linearbuf)
|
||||
return pkt;
|
||||
|
||||
/* Data doesn't fit into the buffer and the host supports
|
||||
* only linear buffers
|
||||
*/
|
||||
rte_pktmbuf_free(pkt);
|
||||
|
||||
return NULL;
|
||||
return pkt;
|
||||
}
|
||||
|
||||
static __rte_noinline uint16_t
|
||||
@ -2261,7 +2271,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
|
||||
static __rte_always_inline int
|
||||
vhost_reserve_avail_batch_packed(struct virtio_net *dev,
|
||||
struct vhost_virtqueue *vq,
|
||||
struct rte_mempool *mbuf_pool,
|
||||
struct rte_mbuf **pkts,
|
||||
uint16_t avail_idx,
|
||||
uintptr_t *desc_addrs,
|
||||
@ -2306,9 +2315,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
|
||||
}
|
||||
|
||||
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
|
||||
pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
|
||||
if (!pkts[i])
|
||||
goto free_buf;
|
||||
if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
|
||||
goto err;
|
||||
}
|
||||
|
||||
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
|
||||
@ -2316,7 +2324,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
|
||||
|
||||
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
|
||||
if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
|
||||
goto free_buf;
|
||||
goto err;
|
||||
}
|
||||
|
||||
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
|
||||
@ -2327,17 +2335,13 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
|
||||
|
||||
return 0;
|
||||
|
||||
free_buf:
|
||||
for (i = 0; i < PACKED_BATCH_SIZE; i++)
|
||||
rte_pktmbuf_free(pkts[i]);
|
||||
|
||||
err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static __rte_always_inline int
|
||||
virtio_dev_tx_batch_packed(struct virtio_net *dev,
|
||||
struct vhost_virtqueue *vq,
|
||||
struct rte_mempool *mbuf_pool,
|
||||
struct rte_mbuf **pkts)
|
||||
{
|
||||
uint16_t avail_idx = vq->last_avail_idx;
|
||||
@ -2347,8 +2351,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
|
||||
uint16_t ids[PACKED_BATCH_SIZE];
|
||||
uint16_t i;
|
||||
|
||||
if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
|
||||
avail_idx, desc_addrs, ids))
|
||||
if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
|
||||
desc_addrs, ids))
|
||||
return -1;
|
||||
|
||||
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
|
||||
@ -2381,7 +2385,7 @@ static __rte_always_inline int
|
||||
vhost_dequeue_single_packed(struct virtio_net *dev,
|
||||
struct vhost_virtqueue *vq,
|
||||
struct rte_mempool *mbuf_pool,
|
||||
struct rte_mbuf **pkts,
|
||||
struct rte_mbuf *pkts,
|
||||
uint16_t *buf_id,
|
||||
uint16_t *desc_count)
|
||||
{
|
||||
@ -2398,8 +2402,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
|
||||
VHOST_ACCESS_RO) < 0))
|
||||
return -1;
|
||||
|
||||
*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
|
||||
if (unlikely(*pkts == NULL)) {
|
||||
if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
|
||||
if (!allocerr_warned) {
|
||||
VHOST_LOG_DATA(ERR,
|
||||
"Failed mbuf alloc of size %d from %s on %s.\n",
|
||||
@ -2409,7 +2412,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
|
||||
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
|
||||
mbuf_pool);
|
||||
if (unlikely(err)) {
|
||||
if (!allocerr_warned) {
|
||||
@ -2418,7 +2421,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
|
||||
dev->ifname);
|
||||
allocerr_warned = true;
|
||||
}
|
||||
rte_pktmbuf_free(*pkts);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -2429,7 +2431,7 @@ static __rte_always_inline int
|
||||
virtio_dev_tx_single_packed(struct virtio_net *dev,
|
||||
struct vhost_virtqueue *vq,
|
||||
struct rte_mempool *mbuf_pool,
|
||||
struct rte_mbuf **pkts)
|
||||
struct rte_mbuf *pkts)
|
||||
{
|
||||
|
||||
uint16_t buf_id, desc_count = 0;
|
||||
@ -2461,11 +2463,14 @@ virtio_dev_tx_packed(struct virtio_net *dev,
|
||||
{
|
||||
uint32_t pkt_idx = 0;
|
||||
|
||||
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
|
||||
return 0;
|
||||
|
||||
do {
|
||||
rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
|
||||
|
||||
if (count - pkt_idx >= PACKED_BATCH_SIZE) {
|
||||
if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
|
||||
if (!virtio_dev_tx_batch_packed(dev, vq,
|
||||
&pkts[pkt_idx])) {
|
||||
pkt_idx += PACKED_BATCH_SIZE;
|
||||
continue;
|
||||
@ -2473,11 +2478,14 @@ virtio_dev_tx_packed(struct virtio_net *dev,
|
||||
}
|
||||
|
||||
if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
|
||||
&pkts[pkt_idx]))
|
||||
pkts[pkt_idx]))
|
||||
break;
|
||||
pkt_idx++;
|
||||
} while (pkt_idx < count);
|
||||
|
||||
if (pkt_idx != count)
|
||||
rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
|
||||
|
||||
if (vq->shadow_used_idx) {
|
||||
do_data_copy_dequeue(vq);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user