net/octeontx2: support packet type

The fields from CQE needs to be converted to
ptype and rx ol flags in mbuf. This patch adds
create lookup memory for those items to be
used in Fastpath.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
Signed-off-by: Harman Kalra <hkalra@marvell.com>
This commit is contained in:
Jerin Jacob 2019-05-30 12:24:11 +05:30 committed by Ferruh Yigit
parent 0b16e2abd6
commit 6e892eabce
10 changed files with 336 additions and 0 deletions

View File

@ -20,6 +20,7 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
Inner RSS = Y
Packet type parsing = Y
Basic stats = Y
Stats per queue = Y
Extended stats = Y

View File

@ -20,6 +20,7 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
Inner RSS = Y
Packet type parsing = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y

View File

@ -16,6 +16,7 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
Inner RSS = Y
Packet type parsing = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y

View File

@ -16,6 +16,7 @@ Features
Features of the OCTEON TX2 Ethdev PMD are:
- Packet type information
- Promiscuous mode
- SR-IOV VF
- Lock-free Tx queue

View File

@ -36,6 +36,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_PMD) += \
otx2_mac.c \
otx2_link.c \
otx2_stats.c \
otx2_lookup.c \
otx2_ethdev.c \
otx2_ethdev_irq.c \
otx2_ethdev_ops.c \

View File

@ -8,6 +8,7 @@ sources = files(
'otx2_mac.c',
'otx2_link.c',
'otx2_stats.c',
'otx2_lookup.c',
'otx2_ethdev.c',
'otx2_ethdev_irq.c',
'otx2_ethdev_ops.c',

View File

@ -441,6 +441,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
rxq->pool = mp;
rxq->qlen = nix_qsize_to_val(qsize);
rxq->qsize = qsize;
rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
/* Alloc completion queue */
rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
@ -1290,6 +1291,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
.tx_queue_stop = otx2_nix_tx_queue_stop,
.rx_queue_start = otx2_nix_rx_queue_start,
.rx_queue_stop = otx2_nix_rx_queue_stop,
.dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
.stats_get = otx2_nix_dev_stats_get,
.stats_reset = otx2_nix_dev_stats_reset,
.get_reg = otx2_nix_dev_get_reg,

View File

@ -355,6 +355,12 @@ int otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev);
int otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev,
struct rte_ether_addr *addr);
/* Lookup configuration */
void *otx2_nix_fastpath_lookup_mem_get(void);
/* PTYPES */
const uint32_t *otx2_nix_supported_ptypes_get(struct rte_eth_dev *dev);
/* Mac address handling */
int otx2_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
struct rte_ether_addr *addr);

View File

@ -0,0 +1,315 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include <rte_common.h>
#include <rte_memzone.h>
#include "otx2_ethdev.h"
/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */
#define ERRCODE_ERRLEN_WIDTH 12
#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) *\
sizeof(uint32_t))
#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ)
const uint32_t *
otx2_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER_QINQ, /* LB */
RTE_PTYPE_L2_ETHER_VLAN, /* LB */
RTE_PTYPE_L2_ETHER_TIMESYNC, /* LB */
RTE_PTYPE_L2_ETHER_ARP, /* LC */
RTE_PTYPE_L2_ETHER_NSH, /* LC */
RTE_PTYPE_L2_ETHER_FCOE, /* LC */
RTE_PTYPE_L2_ETHER_MPLS, /* LC */
RTE_PTYPE_L3_IPV4, /* LC */
RTE_PTYPE_L3_IPV4_EXT, /* LC */
RTE_PTYPE_L3_IPV6, /* LC */
RTE_PTYPE_L3_IPV6_EXT, /* LC */
RTE_PTYPE_L4_TCP, /* LD */
RTE_PTYPE_L4_UDP, /* LD */
RTE_PTYPE_L4_SCTP, /* LD */
RTE_PTYPE_L4_ICMP, /* LD */
RTE_PTYPE_L4_IGMP, /* LD */
RTE_PTYPE_TUNNEL_GRE, /* LD */
RTE_PTYPE_TUNNEL_ESP, /* LD */
RTE_PTYPE_TUNNEL_NVGRE, /* LD */
RTE_PTYPE_TUNNEL_VXLAN, /* LE */
RTE_PTYPE_TUNNEL_GENEVE, /* LE */
RTE_PTYPE_TUNNEL_GTPC, /* LE */
RTE_PTYPE_TUNNEL_GTPU, /* LE */
RTE_PTYPE_TUNNEL_VXLAN_GPE, /* LE */
RTE_PTYPE_TUNNEL_MPLS_IN_GRE, /* LE */
RTE_PTYPE_TUNNEL_MPLS_IN_UDP, /* LE */
RTE_PTYPE_INNER_L2_ETHER,/* LF */
RTE_PTYPE_INNER_L3_IPV4, /* LG */
RTE_PTYPE_INNER_L3_IPV6, /* LG */
RTE_PTYPE_INNER_L4_TCP, /* LH */
RTE_PTYPE_INNER_L4_UDP, /* LH */
RTE_PTYPE_INNER_L4_SCTP, /* LH */
RTE_PTYPE_INNER_L4_ICMP, /* LH */
};
if (dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)
return ptypes;
else
return NULL;
}
/*
* +------------------ +------------------ +
* | | IL4 | IL3| IL2 | TU | L4 | L3 | L2 |
* +-------------------+-------------------+
*
* +-------------------+------------------ +
* | | LH | LG | LF | LE | LD | LC | LB |
* +-------------------+-------------------+
*
* ptype [LE - LD - LC - LB] = TU - L4 - L3 - T2
* ptype_tunnel[LH - LG - LF] = IL4 - IL3 - IL2 - TU
*
*/
static void
nix_create_non_tunnel_ptype_array(uint16_t *ptype)
{
uint8_t lb, lc, ld, le;
uint16_t idx, val;
for (idx = 0; idx < PTYPE_NON_TUNNEL_ARRAY_SZ; idx++) {
lb = idx & 0xF;
lc = (idx & 0xF0) >> 4;
ld = (idx & 0xF00) >> 8;
le = (idx & 0xF000) >> 12;
val = RTE_PTYPE_UNKNOWN;
switch (lb) {
case NPC_LT_LB_QINQ:
val |= RTE_PTYPE_L2_ETHER_QINQ;
break;
case NPC_LT_LB_CTAG:
val |= RTE_PTYPE_L2_ETHER_VLAN;
break;
}
switch (lc) {
case NPC_LT_LC_ARP:
val |= RTE_PTYPE_L2_ETHER_ARP;
break;
case NPC_LT_LC_NSH:
val |= RTE_PTYPE_L2_ETHER_NSH;
break;
case NPC_LT_LC_FCOE:
val |= RTE_PTYPE_L2_ETHER_FCOE;
break;
case NPC_LT_LC_MPLS:
val |= RTE_PTYPE_L2_ETHER_MPLS;
break;
case NPC_LT_LC_IP:
val |= RTE_PTYPE_L3_IPV4;
break;
case NPC_LT_LC_IP_OPT:
val |= RTE_PTYPE_L3_IPV4_EXT;
break;
case NPC_LT_LC_IP6:
val |= RTE_PTYPE_L3_IPV6;
break;
case NPC_LT_LC_IP6_EXT:
val |= RTE_PTYPE_L3_IPV6_EXT;
break;
case NPC_LT_LC_PTP:
val |= RTE_PTYPE_L2_ETHER_TIMESYNC;
break;
}
switch (ld) {
case NPC_LT_LD_TCP:
val |= RTE_PTYPE_L4_TCP;
break;
case NPC_LT_LD_UDP:
val |= RTE_PTYPE_L4_UDP;
break;
case NPC_LT_LD_SCTP:
val |= RTE_PTYPE_L4_SCTP;
break;
case NPC_LT_LD_ICMP:
val |= RTE_PTYPE_L4_ICMP;
break;
case NPC_LT_LD_IGMP:
val |= RTE_PTYPE_L4_IGMP;
break;
case NPC_LT_LD_GRE:
val |= RTE_PTYPE_TUNNEL_GRE;
break;
case NPC_LT_LD_NVGRE:
val |= RTE_PTYPE_TUNNEL_NVGRE;
break;
case NPC_LT_LD_ESP:
val |= RTE_PTYPE_TUNNEL_ESP;
break;
}
switch (le) {
case NPC_LT_LE_VXLAN:
val |= RTE_PTYPE_TUNNEL_VXLAN;
break;
case NPC_LT_LE_VXLANGPE:
val |= RTE_PTYPE_TUNNEL_VXLAN_GPE;
break;
case NPC_LT_LE_GENEVE:
val |= RTE_PTYPE_TUNNEL_GENEVE;
break;
case NPC_LT_LE_GTPC:
val |= RTE_PTYPE_TUNNEL_GTPC;
break;
case NPC_LT_LE_GTPU:
val |= RTE_PTYPE_TUNNEL_GTPU;
break;
case NPC_LT_LE_TU_MPLS_IN_GRE:
val |= RTE_PTYPE_TUNNEL_MPLS_IN_GRE;
break;
case NPC_LT_LE_TU_MPLS_IN_UDP:
val |= RTE_PTYPE_TUNNEL_MPLS_IN_UDP;
break;
}
ptype[idx] = val;
}
}
#define TU_SHIFT(x) ((x) >> PTYPE_WIDTH)
static void
nix_create_tunnel_ptype_array(uint16_t *ptype)
{
uint8_t le, lf, lg;
uint16_t idx, val;
/* Skip non tunnel ptype array memory */
ptype = ptype + PTYPE_NON_TUNNEL_ARRAY_SZ;
for (idx = 0; idx < PTYPE_TUNNEL_ARRAY_SZ; idx++) {
le = idx & 0xF;
lf = (idx & 0xF0) >> 4;
lg = (idx & 0xF00) >> 8;
val = RTE_PTYPE_UNKNOWN;
switch (le) {
case NPC_LT_LF_TU_ETHER:
val |= TU_SHIFT(RTE_PTYPE_INNER_L2_ETHER);
break;
}
switch (lf) {
case NPC_LT_LG_TU_IP:
val |= TU_SHIFT(RTE_PTYPE_INNER_L3_IPV4);
break;
case NPC_LT_LG_TU_IP6:
val |= TU_SHIFT(RTE_PTYPE_INNER_L3_IPV6);
break;
}
switch (lg) {
case NPC_LT_LH_TU_TCP:
val |= TU_SHIFT(RTE_PTYPE_INNER_L4_TCP);
break;
case NPC_LT_LH_TU_UDP:
val |= TU_SHIFT(RTE_PTYPE_INNER_L4_UDP);
break;
case NPC_LT_LH_TU_SCTP:
val |= TU_SHIFT(RTE_PTYPE_INNER_L4_SCTP);
break;
case NPC_LT_LH_TU_ICMP:
val |= TU_SHIFT(RTE_PTYPE_INNER_L4_ICMP);
break;
}
ptype[idx] = val;
}
}
static void
nix_create_rx_ol_flags_array(void *mem)
{
uint16_t idx, errcode, errlev;
uint32_t val, *ol_flags;
/* Skip ptype array memory */
ol_flags = (uint32_t *)((uint8_t *)mem + PTYPE_ARRAY_SZ);
for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
errlev = idx & 0xf;
errcode = (idx & 0xff0) >> 4;
val = PKT_RX_IP_CKSUM_UNKNOWN;
val |= PKT_RX_L4_CKSUM_UNKNOWN;
val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
switch (errlev) {
case NPC_ERRLEV_RE:
/* Mark all errors as BAD checksum errors */
if (errcode) {
val |= PKT_RX_IP_CKSUM_BAD;
val |= PKT_RX_L4_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= PKT_RX_L4_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LC:
if (errcode == NPC_EC_OIP4_CSUM ||
errcode == NPC_EC_IP_FRAG_OFFSET_1) {
val |= PKT_RX_IP_CKSUM_BAD;
val |= PKT_RX_EIP_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LG:
if (errcode == NPC_EC_IIP4_CSUM)
val |= PKT_RX_IP_CKSUM_BAD;
else
val |= PKT_RX_IP_CKSUM_GOOD;
break;
case NPC_ERRLEV_NIX:
if (errcode == NIX_RX_PERRCODE_OL4_CHK) {
val |= PKT_RX_OUTER_L4_CKSUM_BAD;
val |= PKT_RX_L4_CKSUM_BAD;
} else if (errcode == NIX_RX_PERRCODE_IL4_CHK) {
val |= PKT_RX_L4_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= PKT_RX_L4_CKSUM_GOOD;
}
break;
}
ol_flags[idx] = val;
}
}
void *
otx2_nix_fastpath_lookup_mem_get(void)
{
const char name[] = "otx2_nix_fastpath_lookup_mem";
const struct rte_memzone *mz;
void *mem;
mz = rte_memzone_lookup(name);
if (mz != NULL)
return mz->addr;
/* Request for the first time */
mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
SOCKET_ID_ANY, 0, OTX2_ALIGN);
if (mz != NULL) {
mem = mz->addr;
/* Form the ptype array lookup memory */
nix_create_non_tunnel_ptype_array(mem);
nix_create_tunnel_ptype_array(mem);
/* Form the rx ol_flags based on errcode */
nix_create_rx_ol_flags_array(mem);
return mem;
}
return NULL;
}

View File

@ -5,6 +5,13 @@
#ifndef __OTX2_RX_H__
#define __OTX2_RX_H__
#define PTYPE_WIDTH 12
#define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_WIDTH)
#define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_WIDTH)
#define PTYPE_ARRAY_SZ ((PTYPE_NON_TUNNEL_ARRAY_SZ +\
PTYPE_TUNNEL_ARRAY_SZ) *\
sizeof(uint16_t))
#define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
#endif /* __OTX2_RX_H__ */