net/thunderx: support Rx VLAN offload

This feature is used to offload stripping of VLAN header from received
packets and update vlan_tci field in mbuf when
DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
This commit is contained in:
Rakesh Kudurumalla 2018-07-18 20:35:02 +05:30 committed by Ferruh Yigit
parent 5e64c8120c
commit d3bf25644b
5 changed files with 101 additions and 11 deletions

View File

@ -699,6 +699,7 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
else
val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
nic->vlan_strip = enable;
nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
}

View File

@ -52,6 +52,8 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
bool cleanup);
static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
RTE_INIT(nicvf_init_log)
{
@ -916,16 +918,21 @@ nicvf_set_rx_function(struct rte_eth_dev *dev)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
const eth_rx_burst_t rx_burst_func[2][2] = {
/* [NORMAL/SCATTER] [NO_CKSUM/CKSUM] */
[0][0] = nicvf_recv_pkts_no_offload,
[0][1] = nicvf_recv_pkts_cksum,
[1][0] = nicvf_recv_pkts_multiseg_no_offload,
[1][1] = nicvf_recv_pkts_multiseg_cksum,
const eth_rx_burst_t rx_burst_func[2][2][2] = {
/* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
[0][0][0] = nicvf_recv_pkts_no_offload,
[0][0][1] = nicvf_recv_pkts_vlan_strip,
[0][1][0] = nicvf_recv_pkts_cksum,
[0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
[1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
[1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
[1][1][0] = nicvf_recv_pkts_multiseg_cksum,
[1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
};
dev->rx_pkt_burst =
rx_burst_func[dev->data->scattered_rx][nic->offload_cksum];
rx_burst_func[dev->data->scattered_rx]
[nic->offload_cksum][nic->vlan_strip];
}
static int
@ -1473,7 +1480,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
struct rte_mbuf *mbuf;
uint16_t rx_start, rx_end;
uint16_t tx_start, tx_end;
bool vlan_strip;
int mask;
PMD_INIT_FUNC_TRACE();
@ -1594,9 +1601,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
/* Configure VLAN Strip */
vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_STRIP);
nicvf_vlan_hw_strip(nic, vlan_strip);
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ret = nicvf_vlan_offload_config(dev, mask);
/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
* to the 64bit memory address.
@ -1990,6 +1997,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
.dev_infos_get = nicvf_dev_info_get,
.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
.mtu_set = nicvf_dev_set_mtu,
.vlan_offload_set = nicvf_vlan_offload_set,
.reta_update = nicvf_dev_reta_update,
.reta_query = nicvf_dev_reta_query,
.rss_hash_update = nicvf_dev_rss_hash_update,
@ -2006,6 +2014,30 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
.get_reg = nicvf_dev_get_regs,
};
static int
nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
{
struct rte_eth_rxmode *rxmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_STRIP_MASK) {
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
nicvf_vlan_hw_strip(nic, true);
else
nicvf_vlan_hw_strip(nic, false);
}
return 0;
}
static int
nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
nicvf_vlan_offload_config(dev, mask);
return 0;
}
static inline int
nicvf_set_first_skip(struct rte_eth_dev *dev)
{

View File

@ -445,6 +445,14 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
pkt->ol_flags = 0;
if (flag & NICVF_RX_OFFLOAD_CKSUM)
pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
if (unlikely(cqe_rx_w0.vlan_stripped)) {
pkt->ol_flags |= PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED;
pkt->vlan_tci =
rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
}
}
pkt->data_len = cqe_rx_w3.rb0_sz;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@ -485,6 +493,22 @@ nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
NICVF_RX_OFFLOAD_CKSUM);
}
uint16_t __hot
nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
uint16_t __hot
nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
static __rte_always_inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
uint64_t mbuf_phys_off,
@ -516,6 +540,13 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
pkt->ol_flags = 0;
if (flag & NICVF_RX_OFFLOAD_CKSUM)
pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
if (unlikely(cqe_rx_w0.vlan_stripped)) {
pkt->ol_flags |= PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED;
pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
}
}
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
*rx_pkt = pkt;
@ -594,6 +625,22 @@ nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
NICVF_RX_OFFLOAD_CKSUM);
}
uint16_t __hot
nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
uint16_t __hot
nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
uint32_t
nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{

View File

@ -10,6 +10,7 @@
#define NICVF_RX_OFFLOAD_NONE 0x1
#define NICVF_RX_OFFLOAD_CKSUM 0x2
#define NICVF_RX_OFFLOAD_VLAN_STRIP 0x4
#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
@ -93,11 +94,19 @@ uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t pkts);
uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t pkts);
uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,

View File

@ -88,6 +88,7 @@ struct nicvf {
bool pf_acked:1;
bool pf_nacked:1;
bool offload_cksum:1;
bool vlan_strip:1;
uint64_t hwcap;
uint8_t link_up;
uint8_t duplex;