net/virtio: add Tx preparation

Virtio requires pseudo-header checksum in TCP/UDP checksum to do
offload, but it was lost when Tx prepare is introduced. Also
rte_validate_tx_offload() should be used to validate Tx offloads.

Also it is incorrect to do virtio_tso_fix_cksum() after prepend
to mbuf without taking prepended size into account, since layer 2/3/4
lengths provide incorrect offsets after prepend.

Fixes: 4fb7e803eb1a ("ethdev: add Tx preparation")
Cc: stable@dpdk.org

Signed-off-by: Dilshod Urazov <dilshod.urazov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
This commit is contained in:
Dilshod Urazov 2019-06-17 12:31:37 +01:00 committed by Ferruh Yigit
parent 0e34bced78
commit 00a5ea02e1
3 changed files with 35 additions and 1 deletions

View File

@ -1485,6 +1485,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
if (vtpci_packed_queue(hw)) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring %s Tx path on port %u",

View File

@ -89,6 +89,9 @@ uint16_t virtio_recv_mergeable_pkts_packed(void *rx_queue,
uint16_t virtio_recv_pkts_inorder(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t virtio_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,

View File

@ -559,7 +559,6 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
/* TCP Segmentation Offload */
if (cookie->ol_flags & PKT_TX_TCP_SEG) {
virtio_tso_fix_cksum(cookie);
hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
@ -1949,6 +1948,37 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
return nb_rx;
}
uint16_t
virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
uint16_t nb_tx;
int error;
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *m = tx_pkts[nb_tx];
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
error = rte_validate_tx_offload(m);
if (unlikely(error)) {
rte_errno = -error;
break;
}
#endif
error = rte_net_intel_cksum_prepare(m);
if (unlikely(error)) {
rte_errno = -error;
break;
}
if (m->ol_flags & PKT_TX_TCP_SEG)
virtio_tso_fix_cksum(m);
}
return nb_tx;
}
uint16_t
virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)