net/cnxk: add Tx queue setup and release

aDD tx queue setup and release for CN9K and CN10K.
Release is common while setup is platform dependent due
to differences in fast path Tx queue structures.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2021-06-23 10:16:16 +05:30 committed by Jerin Jacob
parent a86144cd9d
commit a24af6361e
11 changed files with 296 additions and 0 deletions

View File

@ -11,6 +11,7 @@ Multiprocess aware = Y
Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
RSS hash = Y
Inner RSS = Y
Linux = Y

View File

@ -11,6 +11,7 @@ Multiprocess aware = Y
Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
RSS hash = Y
Inner RSS = Y
Linux = Y

View File

@ -10,6 +10,7 @@ Multiprocess aware = Y
Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
RSS hash = Y
Inner RSS = Y
Linux = Y

View File

@ -2,6 +2,77 @@
* Copyright(C) 2021 Marvell.
*/
#include "cn10k_ethdev.h"
#include "cn10k_tx.h"
static void
nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn10k_eth_txq *txq,
uint16_t qid)
{
struct nix_send_ext_s *send_hdr_ext;
union nix_send_hdr_w0_u send_hdr_w0;
union nix_send_sg_s sg_w0;
RTE_SET_USED(dev);
/* Initialize the fields based on basic single segment packet */
memset(&txq->cmd, 0, sizeof(txq->cmd));
send_hdr_w0.u = 0;
sg_w0.u = 0;
if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
/* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
send_hdr_w0.sizem1 = 2;
send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[0];
send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
} else {
/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
send_hdr_w0.sizem1 = 1;
}
send_hdr_w0.sq = qid;
sg_w0.subdc = NIX_SUBDC_SG;
sg_w0.segs = 1;
sg_w0.ld_type = NIX_SENDLDTYPE_LDD;
txq->send_hdr_w0 = send_hdr_w0.u;
txq->sg_w0 = sg_w0.u;
rte_wmb();
}
static int
cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, unsigned int socket,
const struct rte_eth_txconf *tx_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct cn10k_eth_txq *txq;
struct roc_nix_sq *sq;
int rc;
RTE_SET_USED(socket);
/* Common Tx queue setup */
rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
sizeof(struct cn10k_eth_txq), tx_conf);
if (rc)
return rc;
sq = &dev->sqs[qid];
/* Update fast path queue */
txq = eth_dev->data->tx_queues[qid];
txq->fc_mem = sq->fc;
/* Store lmt base in tx queue for easy access */
txq->lmt_base = dev->nix.lmt_base;
txq->io_addr = sq->io_addr;
txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
nix_form_default_desc(dev, txq, qid);
txq->lso_tun_fmt = dev->lso_tun_fmt;
return 0;
}
static int
cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
@ -76,6 +147,7 @@ nix_eth_dev_ops_override(void)
/* Update platform specific ops */
cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
}

View File

@ -6,6 +6,19 @@
#include <cnxk_ethdev.h>
struct cn10k_eth_txq {
uint64_t send_hdr_w0;
uint64_t sg_w0;
int64_t fc_cache_pkts;
uint64_t *fc_mem;
uintptr_t lmt_base;
rte_iova_t io_addr;
uint16_t sqes_per_sqb_log2;
int16_t nb_sqb_bufs_adj;
uint64_t cmd[4];
uint64_t lso_tun_fmt;
} __plt_cache_aligned;
struct cn10k_eth_rxq {
uint64_t mbuf_initializer;
uintptr_t desc;

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#ifndef __CN10K_TX_H__
#define __CN10K_TX_H__
#define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
#define NIX_TX_OFFLOAD_TSO_F BIT(4)
#define NIX_TX_NEED_EXT_HDR \
(NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
#endif /* __CN10K_TX_H__ */

View File

@ -2,6 +2,75 @@
* Copyright(C) 2021 Marvell.
*/
#include "cn9k_ethdev.h"
#include "cn9k_tx.h"
static void
nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
uint16_t qid)
{
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
union nix_send_sg_s *sg;
RTE_SET_USED(dev);
/* Initialize the fields based on basic single segment packet */
memset(&txq->cmd, 0, sizeof(txq->cmd));
if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
/* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
send_hdr->w0.sizem1 = 2;
send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
sg = (union nix_send_sg_s *)&txq->cmd[4];
} else {
send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
/* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
send_hdr->w0.sizem1 = 1;
sg = (union nix_send_sg_s *)&txq->cmd[2];
}
send_hdr->w0.sq = qid;
sg->subdc = NIX_SUBDC_SG;
sg->segs = 1;
sg->ld_type = NIX_SENDLDTYPE_LDD;
rte_wmb();
}
static int
cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, unsigned int socket,
const struct rte_eth_txconf *tx_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct cn9k_eth_txq *txq;
struct roc_nix_sq *sq;
int rc;
RTE_SET_USED(socket);
/* Common Tx queue setup */
rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
sizeof(struct cn9k_eth_txq), tx_conf);
if (rc)
return rc;
sq = &dev->sqs[qid];
/* Update fast path queue */
txq = eth_dev->data->tx_queues[qid];
txq->fc_mem = sq->fc;
txq->lmt_addr = sq->lmt_addr;
txq->io_addr = sq->io_addr;
txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
nix_form_default_desc(dev, txq, qid);
txq->lso_tun_fmt = dev->lso_tun_fmt;
return 0;
}
static int
cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
@ -87,6 +156,7 @@ nix_eth_dev_ops_override(void)
/* Update platform specific ops */
cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
}

View File

@ -6,6 +6,17 @@
#include <cnxk_ethdev.h>
struct cn9k_eth_txq {
uint64_t cmd[8];
int64_t fc_cache_pkts;
uint64_t *fc_mem;
void *lmt_addr;
rte_iova_t io_addr;
uint64_t lso_tun_fmt;
uint16_t sqes_per_sqb_log2;
int16_t nb_sqb_bufs_adj;
} __plt_cache_aligned;
struct cn9k_eth_rxq {
uint64_t mbuf_initializer;
uint64_t data_off;

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#ifndef __CN9K_TX_H__
#define __CN9K_TX_H__
#define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
#define NIX_TX_OFFLOAD_TSO_F BIT(4)
#define NIX_TX_NEED_EXT_HDR \
(NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
#endif /* __CN9K_TX_H__ */

View File

@ -66,6 +66,103 @@ cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
return *tmp;
}
static inline uint8_t
nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
{
/*
* Maximum three segments can be supported with W8, Choose
* NIX_MAXSQESZ_W16 for multi segment offload.
*/
if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
return NIX_MAXSQESZ_W16;
else
return NIX_MAXSQESZ_W8;
}
int
cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_tx_q_sz,
const struct rte_eth_txconf *tx_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
struct cnxk_eth_txq_sp *txq_sp;
struct roc_nix_sq *sq;
size_t txq_sz;
int rc;
/* Free memory prior to re-allocation if needed. */
if (eth_dev->data->tx_queues[qid] != NULL) {
plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
eth_dev->data->tx_queues[qid] = NULL;
}
/* Setup ROC SQ */
sq = &dev->sqs[qid];
sq->qid = qid;
sq->nb_desc = nb_desc;
sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
rc = roc_nix_sq_init(&dev->nix, sq);
if (rc) {
plt_err("Failed to init sq=%d, rc=%d", qid, rc);
return rc;
}
rc = -ENOMEM;
txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
if (!txq_sp) {
plt_err("Failed to alloc tx queue mem");
rc |= roc_nix_sq_fini(sq);
return rc;
}
txq_sp->dev = dev;
txq_sp->qid = qid;
txq_sp->qconf.conf.tx = *tx_conf;
txq_sp->qconf.nb_desc = nb_desc;
plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
" nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
/* Store start of fast path area */
eth_dev->data->tx_queues[qid] = txq_sp + 1;
eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
static void
cnxk_nix_tx_queue_release(void *txq)
{
struct cnxk_eth_txq_sp *txq_sp;
struct cnxk_eth_dev *dev;
struct roc_nix_sq *sq;
uint16_t qid;
int rc;
if (!txq)
return;
txq_sp = cnxk_eth_txq_to_sp(txq);
dev = txq_sp->dev;
qid = txq_sp->qid;
plt_nix_dbg("Releasing txq %u", qid);
/* Cleanup ROC SQ */
sq = &dev->sqs[qid];
rc = roc_nix_sq_fini(sq);
if (rc)
plt_err("Failed to cleanup sq, rc=%d", rc);
/* Finally free */
plt_free(txq_sp);
}
int
cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_rx_q_sz,
@ -773,6 +870,7 @@ fail_configure:
struct eth_dev_ops cnxk_eth_dev_ops = {
.dev_infos_get = cnxk_nix_info_get,
.link_update = cnxk_nix_link_update,
.tx_queue_release = cnxk_nix_tx_queue_release,
.rx_queue_release = cnxk_nix_rx_queue_release,
};

View File

@ -197,6 +197,9 @@ int cnxk_nix_remove(struct rte_pci_device *pci_dev);
int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info);
int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_tx_q_sz,
const struct rte_eth_txconf *tx_conf);
int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_rx_q_sz,
const struct rte_eth_rxconf *rx_conf,