vhost: return packets to upper layer
This patch makes virtio_dev_merge_tx return the received packets to app layer. Previously virtio_tx_route was called to route these packets and then free them. Signed-off-by: Huawei Xie <huawei.xie@intel.com> Acked-by: Changchun Ouyang <changchun.ouyang@intel.com>
This commit is contained in:
parent
7f456f6d61
commit
20f16ce646
@ -514,8 +514,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, struct rte_mbuf **pkts,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* This function works for TX packets with mergeable feature enabled. */
|
/* This function works for TX packets with mergeable feature enabled. */
|
||||||
static inline void __attribute__((always_inline))
|
static inline uint16_t __attribute__((always_inline))
|
||||||
virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
|
virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
|
||||||
{
|
{
|
||||||
struct rte_mbuf *m, *prev;
|
struct rte_mbuf *m, *prev;
|
||||||
struct vhost_virtqueue *vq;
|
struct vhost_virtqueue *vq;
|
||||||
@ -534,7 +534,7 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
|
|||||||
|
|
||||||
/* If there are no available buffers then return. */
|
/* If there are no available buffers then return. */
|
||||||
if (vq->last_used_idx == avail_idx)
|
if (vq->last_used_idx == avail_idx)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_tx()\n",
|
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_tx()\n",
|
||||||
dev->device_fh);
|
dev->device_fh);
|
||||||
@ -545,6 +545,7 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
|
|||||||
/*get the number of free entries in the ring*/
|
/*get the number of free entries in the ring*/
|
||||||
free_entries = (avail_idx - vq->last_used_idx);
|
free_entries = (avail_idx - vq->last_used_idx);
|
||||||
|
|
||||||
|
free_entries = RTE_MIN(free_entries, count);
|
||||||
/* Limit to MAX_PKT_BURST. */
|
/* Limit to MAX_PKT_BURST. */
|
||||||
free_entries = RTE_MIN(free_entries, MAX_PKT_BURST);
|
free_entries = RTE_MIN(free_entries, MAX_PKT_BURST);
|
||||||
|
|
||||||
@ -601,7 +602,7 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
|
|||||||
if (unlikely(m == NULL)) {
|
if (unlikely(m == NULL)) {
|
||||||
RTE_LOG(ERR, VHOST_DATA,
|
RTE_LOG(ERR, VHOST_DATA,
|
||||||
"Failed to allocate memory for mbuf.\n");
|
"Failed to allocate memory for mbuf.\n");
|
||||||
return;
|
return entry_success;
|
||||||
}
|
}
|
||||||
|
|
||||||
seg_num++;
|
seg_num++;
|
||||||
@ -703,9 +704,9 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
|
|||||||
|
|
||||||
m->nb_segs = seg_num;
|
m->nb_segs = seg_num;
|
||||||
|
|
||||||
|
pkts[entry_success] = m;
|
||||||
vq->last_used_idx++;
|
vq->last_used_idx++;
|
||||||
entry_success++;
|
entry_success++;
|
||||||
rte_pktmbuf_free(m);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rte_compiler_barrier();
|
rte_compiler_barrier();
|
||||||
@ -713,5 +714,6 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
|
|||||||
/* Kick guest if required. */
|
/* Kick guest if required. */
|
||||||
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
|
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
|
||||||
eventfd_write((int)vq->kickfd, 1);
|
eventfd_write((int)vq->kickfd, 1);
|
||||||
|
return entry_success;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user