net/enic: support TSO

The enic TSO implementation requires that the length of the Eth/IP/TCP
headers be passed to the NIC. Other than that, it's just a matter of
setting the mss and offload mode on a per packet basis.

In TSO mode, IP and TCP checksums are offloaded even if not requested
with mb->ol_flags.

Signed-off-by: John Daley <johndale@cisco.com>
This commit is contained in:
John Daley 2017-01-09 15:04:28 -08:00 committed by Ferruh Yigit
parent 0bc9f022a1
commit 026afc76b0
3 changed files with 82 additions and 10 deletions

View File

@ -10,6 +10,7 @@ Queue start/stop = Y
MTU update = Y MTU update = Y
Jumbo frame = Y Jumbo frame = Y
Scattered Rx = Y Scattered Rx = Y
TSO = Y
Promiscuous mode = Y Promiscuous mode = Y
Unicast MAC filter = Y Unicast MAC filter = Y
Multicast MAC filter = Y Multicast MAC filter = Y

View File

@ -475,7 +475,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM; DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO;
device_info->default_rxconf = (struct rte_eth_rxconf) { device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
}; };

View File

@ -37,6 +37,9 @@
#include "enic_compat.h" #include "enic_compat.h"
#include "rq_enet_desc.h" #include "rq_enet_desc.h"
#include "enic.h" #include "enic.h"
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#define RTE_PMD_USE_PREFETCH #define RTE_PMD_USE_PREFETCH
@ -129,6 +132,60 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
} }
/* Find the offset to L5. This is needed by enic TSO implementation.
* Return 0 if not a TCP packet or can't figure out the length.
*/
static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
{
struct ether_hdr *eh;
struct vlan_hdr *vh;
struct ipv4_hdr *ip4;
struct ipv6_hdr *ip6;
struct tcp_hdr *th;
uint8_t hdr_len;
uint16_t ether_type;
/* offset past Ethernet header */
eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
ether_type = eh->ether_type;
hdr_len = sizeof(struct ether_hdr);
if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
ether_type = vh->eth_proto;
hdr_len += sizeof(struct vlan_hdr);
}
/* offset past IP header */
switch (rte_be_to_cpu_16(ether_type)) {
case ETHER_TYPE_IPv4:
ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
if (ip4->next_proto_id != IPPROTO_TCP)
return 0;
hdr_len += (ip4->version_ihl & 0xf) * 4;
break;
case ETHER_TYPE_IPv6:
ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
if (ip6->proto != IPPROTO_TCP)
return 0;
hdr_len += sizeof(struct ipv6_hdr);
break;
default:
return 0;
}
if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
return 0;
/* offset past TCP header */
th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
hdr_len += (th->data_off >> 4) * 4;
if (hdr_len > mbuf->pkt_len)
return 0;
return hdr_len;
}
static inline uint8_t static inline uint8_t
enic_cq_rx_check_err(struct cq_desc *cqd) enic_cq_rx_check_err(struct cq_desc *cqd)
{ {
@ -466,6 +523,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint8_t vlan_tag_insert; uint8_t vlan_tag_insert;
uint8_t eop; uint8_t eop;
uint64_t bus_addr; uint64_t bus_addr;
uint8_t offload_mode;
uint16_t header_len;
enic_cleanup_wq(enic, wq); enic_cleanup_wq(enic, wq);
wq_desc_avail = vnic_wq_desc_avail(wq); wq_desc_avail = vnic_wq_desc_avail(wq);
@ -497,13 +556,17 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
desc_p = descs + head_idx; desc_p = descs + head_idx;
eop = (data_len == pkt_len); eop = (data_len == pkt_len);
offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
header_len = 0;
if (ol_flags & ol_flags_mask) { if (tx_pkt->tso_segsz) {
if (ol_flags & PKT_TX_VLAN_PKT) { header_len = tso_header_len(tx_pkt);
vlan_tag_insert = 1; if (header_len) {
vlan_id = tx_pkt->vlan_tci; offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
mss = tx_pkt->tso_segsz;
} }
}
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
if (ol_flags & PKT_TX_IP_CKSUM) if (ol_flags & PKT_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM; mss |= ENIC_CALC_IP_CKSUM;
@ -516,8 +579,14 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
} }
} }
wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop, if (ol_flags & PKT_TX_VLAN_PKT) {
eop, 0, vlan_tag_insert, vlan_id, 0); vlan_tag_insert = 1;
vlan_id = tx_pkt->vlan_tci;
}
wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
offload_mode, eop, eop, 0, vlan_tag_insert,
vlan_id, 0);
*desc_p = desc_tmp; *desc_p = desc_tmp;
buf = &wq->bufs[head_idx]; buf = &wq->bufs[head_idx];
@ -537,8 +606,9 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ tx_pkt->data_off); + tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *) wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len, &desc_tmp, bus_addr, data_len,
mss, 0, 0, eop, eop, 0, mss, 0, offload_mode, eop, eop,
vlan_tag_insert, vlan_id, 0); 0, vlan_tag_insert, vlan_id,
0);
*desc_p = desc_tmp; *desc_p = desc_tmp;
buf = &wq->bufs[head_idx]; buf = &wq->bufs[head_idx];