net/octeontx: support multi segment

Adding multi segment support to the octeontx PMD. Also
adding the logic to share rx/tx ofloads with the eventdev
code.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
This commit is contained in:
Harman Kalra 2020-03-16 15:03:37 +05:30 committed by Ferruh Yigit
parent 41fe7a3a11
commit 85221a0c7c
9 changed files with 250 additions and 28 deletions

View File

@ -9,6 +9,7 @@ Link status = Y
Lock-free Tx queue = Y
Queue start/stop = P
Jumbo frame = Y
Scattered Rx = Y
Promiscuous mode = Y
Unicast MAC filter = Y
CRC offload = Y

View File

@ -20,6 +20,7 @@ Features of the OCTEON TX Ethdev PMD are:
- Promiscuous mode
- Port hardware statistics
- Jumbo frames
- Scatter-Gather IO support
- Link state information
- SR-IOV VF
- Multiple queues for TX

View File

@ -300,7 +300,7 @@ sso_event_tx_adapter_enqueue(void *port,
dq = &txq->dq;
if (__octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va, dq->fc_status_va,
m) < 0)
m, OCCTX_TX_OFFLOAD_NONE) < 0)
return 0;
return 1;

View File

@ -215,4 +215,36 @@ enum lf_type_e {
LF_UDP_VXLAN = OCCTX_PKI_LTYPE_UDP_VXLAN,
LF_NVGRE = OCCTX_PKI_LTYPE_NVGRE,
};
/* Word 0 of HW segment buflink structure */
typedef union octtx_pki_buflink_w0_u {
uint64_t v;
struct {
uint64_t size:16;
uint64_t rsvd1:15;
uint64_t invfree:1;
/** Aura number of the next segment */
uint64_t aura:16;
uint64_t sw:9;
uint64_t later_invfree:1;
uint64_t rsvd2:5;
/** 1 if aura number is set */
uint64_t has_aura:1;
} s;
} octtx_pki_buflink_w0_t;
/* Word 1 of HW segment buflink structure */
typedef union octtx_pki_buflink_w1_u {
uint64_t v;
struct {
uint64_t addr;
} s;
} octtx_pki_buflink_w1_t;
/* HW structure linking packet segments into singly linked list */
typedef struct octtx_pki_buflink_s {
octtx_pki_buflink_w0_t w0; /* Word 0 of the buflink */
octtx_pki_buflink_w1_t w1; /* Word 1 of the buflink */
} octtx_pki_buflink_t;
#endif /* __OCTEONTX_PKI_VAR_H__ */

View File

@ -36,7 +36,10 @@
/* pko_send_hdr_s + pko_send_link */
#define PKO_CMD_SZ (2 << 1)
#define PKO_SEND_GATHER_SUBDC (0x0ull << 60)
#define PKO_SEND_BUFLINK_SUBDC (0x0ull << 60)
#define PKO_SEND_BUFLINK_LDTYPE(x) ((x) << 58)
#define PKO_SEND_BUFLINK_GAUAR(x) ((x) << 24)
#define PKO_SEND_GATHER_SUBDC (0x2ull << 60)
#define PKO_SEND_GATHER_LDTYPE(x) ((x) << 58)
#define PKO_SEND_GATHER_GAUAR(x) ((x) << 24)

View File

@ -24,6 +24,10 @@
#include "octeontx_rxtx.h"
#include "octeontx_logs.h"
struct evdev_priv_data {
OFFLOAD_FLAGS; /*Sequence should not be changed */
} __rte_cache_aligned;
struct octeontx_vdev_init_params {
uint8_t nr_port;
};
@ -257,6 +261,43 @@ devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
info->max_num_events;
}
static uint16_t
octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
{
struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
uint16_t flags = 0;
/* Created function for supoorting future offloads */
if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
flags |= OCCTX_TX_MULTI_SEG_F;
return flags;
}
static uint16_t
octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
{
struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct rte_eth_conf *conf = &data->dev_conf;
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
if (rxmode->mq_mode == ETH_MQ_RX_RSS)
flags |= OCCTX_RX_OFFLOAD_RSS_F;
if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
flags |= OCCTX_RX_MULTI_SEG_F;
eth_dev->data->scattered_rx = 1;
/* If scatter mode is enabled, TX should also be in multi
* seg mode, else memory leak will occur
*/
nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
}
return flags;
}
static int
octeontx_dev_configure(struct rte_eth_dev *dev)
{
@ -321,6 +362,11 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
nic->pki.hash_enable = true;
nic->pki.initialized = false;
nic->rx_offloads |= rxmode->offloads;
nic->tx_offloads |= txmode->offloads;
nic->rx_offload_flags |= octeontx_rx_offload_flags(dev);
nic->tx_offload_flags |= octeontx_tx_offload_flags(dev);
return 0;
}
@ -359,6 +405,51 @@ octeontx_dev_close(struct rte_eth_dev *dev)
dev->rx_pkt_burst = NULL;
}
static int
octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
{
struct rte_eth_dev *eth_dev = rxq->eth_dev;
struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct rte_pktmbuf_pool_private *mbp_priv;
struct evdev_priv_data *evdev_priv;
struct rte_eventdev *dev;
uint32_t buffsz;
/* Get rx buffer size */
mbp_priv = rte_mempool_get_priv(rxq->pool);
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
/* Setup scatter mode if needed by jumbo */
if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
}
/* Sharing offload flags via eventdev priv region */
dev = &rte_eventdevs[rxq->evdev];
evdev_priv = dev->data->dev_private;
evdev_priv->rx_offload_flags = nic->rx_offload_flags;
evdev_priv->tx_offload_flags = nic->tx_offload_flags;
return 0;
}
static void
octeontx_set_tx_function(struct rte_eth_dev *dev)
{
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
const eth_tx_burst_t tx_burst_func[2] = {
[0] = octeontx_xmit_pkts,
[1] = octeontx_xmit_pkts_mseg,
};
dev->tx_pkt_burst =
tx_burst_func[!!(nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)];
}
static int
octeontx_dev_start(struct rte_eth_dev *dev)
{
@ -371,7 +462,7 @@ octeontx_dev_start(struct rte_eth_dev *dev)
/*
* Tx start
*/
dev->tx_pkt_burst = octeontx_xmit_pkts;
octeontx_set_tx_function(dev);
ret = octeontx_pko_channel_start(nic->base_ochan);
if (ret < 0) {
octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
@ -599,10 +690,8 @@ octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
struct rte_ether_addr *addr)
{
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
uint8_t prom_mode = dev->data->promiscuous;
int ret;
dev->data->promiscuous = 0;
ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
if (ret == 0) {
/* Update same mac address to BGX CAM table */
@ -610,7 +699,6 @@ octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
0);
}
if (ret < 0) {
dev->data->promiscuous = prom_mode;
octeontx_log_err("failed to set MAC address on port %d",
nic->port_id);
}
@ -977,7 +1065,9 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
rxq->evdev = nic->evdev;
rxq->ev_queues = ev_queues;
rxq->ev_ports = ev_ports;
rxq->pool = mb_pool;
octeontx_recheck_rx_offloads(rxq);
dev->data->rx_queues[qidx] = rxq;
dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;

View File

@ -29,8 +29,12 @@
#define OCTEONTX_MAX_BGX_PORTS 4
#define OCTEONTX_MAX_LMAC_PER_BGX 4
#define OCTEONTX_RX_OFFLOADS DEV_RX_OFFLOAD_CHECKSUM
#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE
#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME)
#define OCTEONTX_TX_OFFLOADS (DEV_TX_OFFLOAD_MT_LOCKFREE | \
DEV_TX_OFFLOAD_MULTI_SEGS)
static inline struct octeontx_nic *
octeontx_pmd_priv(struct rte_eth_dev *dev)
@ -73,6 +77,10 @@ struct octeontx_nic {
uint16_t ev_queues;
uint16_t ev_ports;
uint64_t rx_offloads;
uint16_t rx_offload_flags;
uint64_t tx_offloads;
uint16_t tx_offload_flags;
} __rte_cache_aligned;
struct octeontx_txq {
@ -88,6 +96,7 @@ struct octeontx_rxq {
struct rte_eth_dev *eth_dev;
uint16_t ev_queues;
uint16_t ev_ports;
struct rte_mempool *pool;
} __rte_cache_aligned;
#endif /* __OCTEONTX_ETHDEV_H__ */

View File

@ -32,8 +32,34 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_cio_wmb();
while (count < nb_pkts) {
res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
dq->fc_status_va,
tx_pkts[count]);
dq->fc_status_va, tx_pkts[count],
OCCTX_TX_OFFLOAD_NONE);
if (res < 0)
break;
count++;
}
return count; /* return number of pkts transmitted */
}
uint16_t __hot
octeontx_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
int count;
struct octeontx_txq *txq = tx_queue;
octeontx_dq_t *dq = &txq->dq;
int res;
count = 0;
rte_cio_wmb();
while (count < nb_pkts) {
res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
dq->fc_status_va, tx_pkts[count],
OCCTX_TX_OFFLOAD_NONE |
OCCTX_TX_MULTI_SEG_F);
if (res < 0)
break;

View File

@ -7,6 +7,19 @@
#include <rte_ethdev_driver.h>
#define OFFLOAD_FLAGS \
uint16_t rx_offload_flags; \
uint16_t tx_offload_flags
#define BIT(nr) (1UL << (nr))
#define OCCTX_RX_OFFLOAD_NONE (0)
#define OCCTX_RX_OFFLOAD_RSS_F BIT(0)
#define OCCTX_RX_MULTI_SEG_F BIT(15)
#define OCCTX_TX_OFFLOAD_NONE (0)
#define OCCTX_TX_MULTI_SEG_F BIT(15)
/* Packet type table */
#define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
@ -98,33 +111,76 @@ ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
static __rte_always_inline int
__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
struct rte_mbuf *tx_pkt)
struct rte_mbuf *tx_pkt, const uint16_t flag)
{
uint64_t cmd_buf[4] __rte_cache_aligned;
uint16_t gaura_id;
uint8_t sz = (4 + (!!(flag & OCCTX_TX_MULTI_SEG_F) * 10));
/* Max size of PKO SEND desc is 112 bytes*/
uint64_t cmd_buf[sz] __rte_cache_aligned;
uint8_t nb_segs, nb_desc = 0;
uint16_t gaura_id, len = 0;
struct rte_mbuf *m_next = NULL;
if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
return -ENOSPC;
/* Get the gaura Id */
gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
if (flag & OCCTX_TX_MULTI_SEG_F) {
nb_segs = tx_pkt->nb_segs;
/* Setup PKO_SEND_HDR_S */
cmd_buf[0] = tx_pkt->data_len & 0xffff;
cmd_buf[1] = 0x0;
cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
cmd_buf[nb_desc++] = 0x0;
/* Set don't free bit if reference count > 1 */
if (rte_mbuf_refcnt_read(tx_pkt) > 1)
cmd_buf[0] |= (1ULL << 58); /* SET DF */
do {
m_next = tx_pkt->next;
/* To handle case where mbufs belong to diff pools, like
* fragmentation
*/
gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
tx_pkt->pool->pool_id);
/* Setup PKO_SEND_GATHER_S */
cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC |
PKO_SEND_GATHER_LDTYPE(0x1ull) |
PKO_SEND_GATHER_GAUAR((long)gaura_id) |
PKO_SEND_GATHER_GAUAR((long)
gaura_id) |
tx_pkt->data_len;
/* Mark mempool object as "put" since it is freed by
* PKO.
*/
if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
tx_pkt->next = NULL;
__mempool_check_cookies(tx_pkt->pool,
(void **)&tx_pkt, 1, 0);
}
nb_desc++;
octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
nb_segs--;
len += tx_pkt->data_len;
tx_pkt = m_next;
} while (nb_segs);
} else {
/* Setup PKO_SEND_HDR_S */
cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
cmd_buf[nb_desc++] = 0x0;
/* Mark mempool object as "put" since it is freed by PKO */
if (!(cmd_buf[0] & (1ULL << 58)))
__mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
1, 0);
/* Get the gaura Id */
gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
tx_pkt->pool->pool_id);
/* Setup PKO_SEND_BUFLINK_S */
cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
tx_pkt->data_len;
cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
}
octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, nb_desc);
return 0;
}
@ -132,6 +188,10 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
uint16_t
octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
uint16_t
octeontx_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t
octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);