net/virtio: refactor Tx offload helper

Purely cosmetic but it is rather odd to have an "offload" helper that
checks if it actually must do something.
We already have the same checks in most callers, so move this branch
in them.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Flavio Leitner <fbl@sysclose.org>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
David Marchand 2021-05-03 18:43:43 +02:00 committed by Maxime Coquelin
parent 004d8e854a
commit 85a4fa2f5a
4 changed files with 40 additions and 46 deletions

View File

@ -448,7 +448,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
if (!vq->hw->has_tx_offload)
virtqueue_clear_net_hdr(hdr);
else
virtqueue_xmit_offload(hdr, cookies[i], true);
virtqueue_xmit_offload(hdr, cookies[i]);
start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
start_dp[idx].len = cookies[i]->data_len + head_size;
@ -495,7 +495,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
if (!vq->hw->has_tx_offload)
virtqueue_clear_net_hdr(hdr);
else
virtqueue_xmit_offload(hdr, cookie, true);
virtqueue_xmit_offload(hdr, cookie);
dp->addr = rte_mbuf_data_iova(cookie) - head_size;
dp->len = cookie->data_len + head_size;
@ -581,7 +581,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
idx = start_dp[idx].next;
}
virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
if (vq->hw->has_tx_offload)
virtqueue_xmit_offload(hdr, cookie);
do {
start_dp[idx].addr = rte_mbuf_data_iova(cookie);

View File

@ -115,7 +115,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
struct virtio_net_hdr *, -head_size);
virtqueue_xmit_offload(hdr, tx_pkts[i], true);
virtqueue_xmit_offload(hdr, tx_pkts[i]);
}
}

View File

@ -134,7 +134,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
struct virtio_net_hdr *, -head_size);
virtqueue_xmit_offload(hdr, tx_pkts[i], true);
virtqueue_xmit_offload(hdr, tx_pkts[i]);
}
}

View File

@ -617,52 +617,44 @@ virtqueue_notify(struct virtqueue *vq)
} while (0)
static inline void
virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
struct rte_mbuf *cookie,
uint8_t offload)
virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
{
if (offload) {
uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
if (cookie->ol_flags & PKT_TX_TCP_SEG)
csum_l4 |= PKT_TX_TCP_CKSUM;
if (cookie->ol_flags & PKT_TX_TCP_SEG)
csum_l4 |= PKT_TX_TCP_CKSUM;
switch (csum_l4) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_udp_hdr,
dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
switch (csum_l4) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
case PKT_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
case PKT_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
default:
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
break;
}
default:
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
break;
}
/* TCP Segmentation Offload */
if (cookie->ol_flags & PKT_TX_TCP_SEG) {
hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
hdr->gso_size = cookie->tso_segsz;
hdr->hdr_len =
cookie->l2_len +
cookie->l3_len +
cookie->l4_len;
} else {
ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
}
/* TCP Segmentation Offload */
if (cookie->ol_flags & PKT_TX_TCP_SEG) {
hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
hdr->gso_size = cookie->tso_segsz;
hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
} else {
ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
}
}
@ -741,7 +733,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
}
}
virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
if (vq->hw->has_tx_offload)
virtqueue_xmit_offload(hdr, cookie);
do {
uint16_t flags;