net/cnxk: support inline security setup for cn9k

Add support for inline inbound and outbound IPSec for SA create,
destroy and other NIX / CPT LF configurations.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2021-10-01 19:10:10 +05:30 committed by Jerin Jacob
parent 57f7b98283
commit 7eabd6c637
12 changed files with 1162 additions and 11 deletions

@ -36,6 +36,9 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
}
@ -101,6 +104,9 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
return flags;
}
@ -179,8 +185,10 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
const struct rte_eth_txconf *tx_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_cpt_lf *inl_lf;
struct cn9k_eth_txq *txq;
struct roc_nix_sq *sq;
uint16_t crypto_qid;
int rc;
RTE_SET_USED(socket);
@ -200,6 +208,19 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
/* Fetch CPT LF info for outbound if present */
if (dev->outb.lf_base) {
crypto_qid = qid % dev->outb.nb_crypto_qs;
inl_lf = dev->outb.lf_base + crypto_qid;
txq->cpt_io_addr = inl_lf->io_addr;
txq->cpt_fc = inl_lf->fc_addr;
txq->cpt_desc = inl_lf->nb_desc * 0.7;
txq->sa_base = (uint64_t)dev->outb.sa_base;
txq->sa_base |= eth_dev->data->port_id;
PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
}
nix_form_default_desc(dev, txq, qid);
txq->lso_tun_fmt = dev->lso_tun_fmt;
return 0;
@ -508,6 +529,8 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
nix_eth_dev_ops_override();
npc_flow_ops_override();
cn9k_eth_sec_ops_override();
/* Common probe */
rc = cnxk_nix_probe(pci_drv, pci_dev);
if (rc)

@ -5,6 +5,7 @@
#define __CN9K_ETHDEV_H__
#include <cnxk_ethdev.h>
#include <cnxk_security.h>
struct cn9k_eth_txq {
uint64_t cmd[8];
@ -15,6 +16,10 @@ struct cn9k_eth_txq {
uint64_t lso_tun_fmt;
uint16_t sqes_per_sqb_log2;
int16_t nb_sqb_bufs_adj;
rte_iova_t cpt_io_addr;
uint64_t sa_base;
uint64_t *cpt_fc;
uint16_t cpt_desc;
} __plt_cache_aligned;
struct cn9k_eth_rxq {
@ -32,8 +37,64 @@ struct cn9k_eth_rxq {
struct cnxk_timesync_info *tstamp;
} __plt_cache_aligned;
/* Private data in sw rsvd area of struct roc_onf_ipsec_inb_sa */
struct cn9k_inb_priv_data {
void *userdata;
struct cnxk_eth_sec_sess *eth_sec;
};
/* Private data in sw rsvd area of struct roc_onf_ipsec_outb_sa */
struct cn9k_outb_priv_data {
union {
uint64_t esn;
struct {
uint32_t seq;
uint32_t esn_hi;
};
};
/* Rlen computation data */
struct cnxk_ipsec_outb_rlens rlens;
/* IP identifier */
uint16_t ip_id;
/* SA index */
uint32_t sa_idx;
/* Flags */
uint16_t copy_salt : 1;
/* Salt */
uint32_t nonce;
/* User data pointer */
void *userdata;
/* Back pointer to eth sec session */
struct cnxk_eth_sec_sess *eth_sec;
};
struct cn9k_sec_sess_priv {
union {
struct {
uint32_t sa_idx;
uint8_t inb_sa : 1;
uint8_t rsvd1 : 2;
uint8_t roundup_byte : 5;
uint8_t roundup_len;
uint16_t partial_len;
};
uint64_t u64;
};
} __rte_packed;
/* Rx and Tx routines */
void cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev);
void cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
/* Security context setup */
void cn9k_eth_sec_ops_override(void);
#endif /* __CN9K_ETHDEV_H__ */

@ -0,0 +1,313 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#include <rte_cryptodev.h>
#include <rte_security.h>
#include <rte_security_driver.h>
#include <cn9k_ethdev.h>
#include <cnxk_security.h>
static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
{.aead = {
.algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
.aad_size = {
.min = 8,
.max = 12,
.increment = 4
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
{ /* IPsec Inline Protocol ESP Tunnel Ingress */
.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
.options = { 0 }
},
.crypto_capabilities = cn9k_eth_sec_crypto_caps,
.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
},
{ /* IPsec Inline Protocol ESP Tunnel Egress */
.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
.options = { 0 }
},
.crypto_capabilities = cn9k_eth_sec_crypto_caps,
.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
},
{
.action = RTE_SECURITY_ACTION_TYPE_NONE
}
};
static int
cn9k_eth_sec_session_create(void *device,
struct rte_security_session_conf *conf,
struct rte_security_session *sess,
struct rte_mempool *mempool)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_security_ipsec_xform *ipsec;
struct cn9k_sec_sess_priv sess_priv;
struct rte_crypto_sym_xform *crypto;
struct cnxk_eth_sec_sess *eth_sec;
bool inbound;
int rc = 0;
if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
return -ENOTSUP;
if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
return -ENOTSUP;
if (rte_security_dynfield_register() < 0)
return -ENOTSUP;
ipsec = &conf->ipsec;
crypto = conf->crypto_xform;
inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
/* Search if a session already exists */
if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
plt_err("%s SA with SPI %u already in use",
inbound ? "Inbound" : "Outbound", ipsec->spi);
return -EEXIST;
}
if (rte_mempool_get(mempool, (void **)&eth_sec)) {
plt_err("Could not allocate security session private data");
return -ENOMEM;
}
memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
sess_priv.u64 = 0;
if (inbound) {
struct cn9k_inb_priv_data *inb_priv;
struct roc_onf_ipsec_inb_sa *inb_sa;
PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
* device always for CN9K.
*/
inb_sa = (struct roc_onf_ipsec_inb_sa *)
roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
if (!inb_sa) {
plt_err("Failed to create ingress sa");
rc = -EFAULT;
goto mempool_put;
}
/* Check if SA is already in use */
if (inb_sa->ctl.valid) {
plt_err("Inbound SA with SPI %u already in use",
ipsec->spi);
rc = -EBUSY;
goto mempool_put;
}
memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
/* Fill inbound sa params */
rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
if (rc) {
plt_err("Failed to init inbound sa, rc=%d", rc);
goto mempool_put;
}
inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
/* Back pointer to get eth_sec */
inb_priv->eth_sec = eth_sec;
/* Save userdata in inb private area */
inb_priv->userdata = conf->userdata;
sess_priv.inb_sa = 1;
sess_priv.sa_idx = ipsec->spi;
/* Pointer from eth_sec -> inb_sa */
eth_sec->sa = inb_sa;
eth_sec->sess = sess;
eth_sec->sa_idx = ipsec->spi;
eth_sec->spi = ipsec->spi;
eth_sec->inb = true;
TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
dev->inb.nb_sess++;
} else {
struct cn9k_outb_priv_data *outb_priv;
struct roc_onf_ipsec_outb_sa *outb_sa;
uintptr_t sa_base = dev->outb.sa_base;
struct cnxk_ipsec_outb_rlens *rlens;
uint32_t sa_idx;
PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
/* Alloc an sa index */
rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
if (rc)
goto mempool_put;
outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
rlens = &outb_priv->rlens;
memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
/* Fill outbound sa params */
rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
if (rc) {
plt_err("Failed to init outbound sa, rc=%d", rc);
rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
goto mempool_put;
}
/* Save userdata */
outb_priv->userdata = conf->userdata;
outb_priv->sa_idx = sa_idx;
outb_priv->eth_sec = eth_sec;
/* Start sequence number with 1 */
outb_priv->seq = 1;
memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
outb_priv->copy_salt = 1;
/* Save rlen info */
cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
sess_priv.sa_idx = outb_priv->sa_idx;
sess_priv.roundup_byte = rlens->roundup_byte;
sess_priv.roundup_len = rlens->roundup_len;
sess_priv.partial_len = rlens->partial_len;
/* Pointer from eth_sec -> outb_sa */
eth_sec->sa = outb_sa;
eth_sec->sess = sess;
eth_sec->sa_idx = sa_idx;
eth_sec->spi = ipsec->spi;
TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
dev->outb.nb_sess++;
}
/* Sync SA content */
plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
inbound ? "inbound" : "outbound", eth_sec->spi,
eth_sec->sa_idx);
/*
* Update fast path info in priv area.
*/
set_sec_session_private_data(sess, (void *)sess_priv.u64);
return 0;
mempool_put:
rte_mempool_put(mempool, eth_sec);
return rc;
}
static int
cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_onf_ipsec_outb_sa *outb_sa;
struct roc_onf_ipsec_inb_sa *inb_sa;
struct cnxk_eth_sec_sess *eth_sec;
struct rte_mempool *mp;
eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
if (!eth_sec)
return -ENOENT;
if (eth_sec->inb) {
inb_sa = eth_sec->sa;
/* Disable SA */
inb_sa->ctl.valid = 0;
TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
dev->inb.nb_sess--;
} else {
outb_sa = eth_sec->sa;
/* Disable SA */
outb_sa->ctl.valid = 0;
/* Release Outbound SA index */
cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
dev->outb.nb_sess--;
}
/* Sync SA content */
plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
eth_sec->sa_idx);
/* Put eth_sec object back to pool */
mp = rte_mempool_from_obj(eth_sec);
set_sec_session_private_data(sess, NULL);
rte_mempool_put(mp, eth_sec);
return 0;
}
static const struct rte_security_capability *
cn9k_eth_sec_capabilities_get(void *device __rte_unused)
{
return cn9k_eth_sec_capabilities;
}
void
cn9k_eth_sec_ops_override(void)
{
static int init_once;
if (init_once)
return;
init_once = 1;
/* Update platform specific ops */
cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
}

@ -17,6 +17,7 @@
#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
#define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
#define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
#define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
/* Flags to control cqe_to_mbuf conversion function.
* Defining it from backwards to denote its been

@ -13,6 +13,7 @@
#define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
#define NIX_TX_OFFLOAD_TSO_F BIT(4)
#define NIX_TX_OFFLOAD_TSTAMP_F BIT(5)
#define NIX_TX_OFFLOAD_SECURITY_F BIT(6)
/* Flags to control xmit_prepare function.
* Defining it from backwards to denote its been

@ -38,6 +38,162 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
return speed_capa;
}
int
cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
{
struct roc_nix *nix = &dev->nix;
if (dev->inb.inl_dev == use_inl_dev)
return 0;
plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
dev->inb.nb_sess, !!dev->inb.inl_dev);
/* Change the mode */
dev->inb.inl_dev = use_inl_dev;
/* Update RoC for NPC rule insertion */
roc_nix_inb_mode_set(nix, use_inl_dev);
/* Setup lookup mem */
return cnxk_nix_lookup_mem_sa_base_set(dev);
}
static int
nix_security_setup(struct cnxk_eth_dev *dev)
{
struct roc_nix *nix = &dev->nix;
int i, rc = 0;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
/* Setup Inline Inbound */
rc = roc_nix_inl_inb_init(nix);
if (rc) {
plt_err("Failed to initialize nix inline inb, rc=%d",
rc);
return rc;
}
/* By default pick using inline device for poll mode.
* Will be overridden when event mode rq's are setup.
*/
cnxk_nix_inb_mode_set(dev, true);
}
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
struct plt_bitmap *bmap;
size_t bmap_sz;
void *mem;
/* Setup enough descriptors for all tx queues */
nix->outb_nb_desc = dev->outb.nb_desc;
nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
/* Setup Inline Outbound */
rc = roc_nix_inl_outb_init(nix);
if (rc) {
plt_err("Failed to initialize nix inline outb, rc=%d",
rc);
goto cleanup;
}
dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
goto done;
rc = -ENOMEM;
/* Allocate a bitmap to alloc and free sa indexes */
bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
if (mem == NULL) {
plt_err("Outbound SA bmap alloc failed");
rc |= roc_nix_inl_outb_fini(nix);
goto cleanup;
}
rc = -EIO;
bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
if (!bmap) {
plt_err("Outbound SA bmap init failed");
rc |= roc_nix_inl_outb_fini(nix);
plt_free(mem);
goto cleanup;
}
for (i = 0; i < dev->outb.max_sa; i++)
plt_bitmap_set(bmap, i);
dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
dev->outb.sa_bmap_mem = mem;
dev->outb.sa_bmap = bmap;
}
done:
return 0;
cleanup:
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
rc |= roc_nix_inl_inb_fini(nix);
return rc;
}
static int
nix_security_release(struct cnxk_eth_dev *dev)
{
struct rte_eth_dev *eth_dev = dev->eth_dev;
struct cnxk_eth_sec_sess *eth_sec, *tvar;
struct roc_nix *nix = &dev->nix;
int rc, ret = 0;
/* Cleanup Inline inbound */
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
/* Destroy inbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
cnxk_eth_sec_ops.session_destroy(eth_dev,
eth_sec->sess);
/* Clear lookup mem */
cnxk_nix_lookup_mem_sa_base_clear(dev);
rc = roc_nix_inl_inb_fini(nix);
if (rc)
plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
ret |= rc;
}
/* Cleanup Inline outbound */
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
/* Destroy outbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
cnxk_eth_sec_ops.session_destroy(eth_dev,
eth_sec->sess);
rc = roc_nix_inl_outb_fini(nix);
if (rc)
plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
ret |= rc;
plt_bitmap_free(dev->outb.sa_bmap);
plt_free(dev->outb.sa_bmap_mem);
dev->outb.sa_bmap = NULL;
dev->outb.sa_bmap_mem = NULL;
}
dev->inb.inl_dev = false;
roc_nix_inb_mode_set(nix, false);
dev->nb_rxq_sso = 0;
dev->inb.nb_sess = 0;
dev->outb.nb_sess = 0;
return ret;
}
static void
nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
{
@ -194,6 +350,12 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
eth_dev->data->tx_queues[qid] = NULL;
}
/* When Tx Security offload is enabled, increase tx desc count by
* max possible outbound desc count.
*/
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
nb_desc += dev->outb.nb_desc;
/* Setup ROC SQ */
sq = &dev->sqs[qid];
sq->qid = qid;
@ -266,6 +428,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
struct rte_mempool *mp)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *nix = &dev->nix;
struct cnxk_eth_rxq_sp *rxq_sp;
struct rte_mempool_ops *ops;
const char *platform_ops;
@ -303,6 +466,19 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
eth_dev->data->rx_queues[qid] = NULL;
}
/* Clam up cq limit to size of packet pool aura for LBK
* to avoid meta packet drop as LBK does not currently support
* backpressure.
*/
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
/* Use current RQ's aura limit if inl rq is not available */
if (!pkt_pool_limit)
pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
}
/* Setup ROC CQ */
cq = &dev->cqs[qid];
cq->qid = qid;
@ -328,6 +504,10 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rq->later_skip = sizeof(struct rte_mbuf);
rq->lpb_size = mp->elt_size;
/* Enable Inline IPSec on RQ, will not be used for Poll mode */
if (roc_nix_inl_inb_is_enabled(nix))
rq->ipsech_ena = true;
rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
if (rc) {
plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
@ -350,6 +530,13 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rxq_sp->qconf.nb_desc = nb_desc;
rxq_sp->qconf.mp = mp;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
/* Setup rq reference for inline dev if present */
rc = roc_nix_inl_dev_rq_get(rq);
if (rc)
goto free_mem;
}
plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
cq->nb_desc);
@ -370,6 +557,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
}
return 0;
free_mem:
plt_free(rxq_sp);
rq_fini:
rc |= roc_nix_rq_fini(rq);
cq_fini:
@ -394,11 +583,15 @@ cnxk_nix_rx_queue_release(void *rxq)
rxq_sp = cnxk_eth_rxq_to_sp(rxq);
dev = rxq_sp->dev;
qid = rxq_sp->qid;
rq = &dev->rqs[qid];
plt_nix_dbg("Releasing rxq %u", qid);
/* Release rq reference for inline dev if present */
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
roc_nix_inl_dev_rq_put(rq);
/* Cleanup ROC RQ */
rq = &dev->rqs[qid];
rc = roc_nix_rq_fini(rq);
if (rc)
plt_err("Failed to cleanup rq, rc=%d", rc);
@ -804,6 +997,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
rc = nix_store_queue_cfg_and_then_release(eth_dev);
if (rc)
goto fail_configure;
/* Cleanup security support */
rc = nix_security_release(dev);
if (rc)
goto fail_configure;
roc_nix_tm_fini(nix);
roc_nix_lf_free(nix);
}
@ -958,6 +1157,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
plt_err("Failed to initialize flow control rc=%d", rc);
goto cq_fini;
}
/* Setup Inline security support */
rc = nix_security_setup(dev);
if (rc)
goto cq_fini;
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
@ -965,7 +1170,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
if (dev->configured == 1) {
rc = nix_restore_queue_cfg(eth_dev);
if (rc)
goto cq_fini;
goto sec_release;
}
/* Update the mac address */
@ -987,6 +1192,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
dev->nb_txq = data->nb_tx_queues;
return 0;
sec_release:
rc |= nix_security_release(dev);
cq_fini:
roc_nix_unregister_cq_irqs(nix);
q_irq_fini:
@ -1284,12 +1491,25 @@ static int
cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_security_ctx *sec_ctx;
struct roc_nix *nix = &dev->nix;
struct rte_pci_device *pci_dev;
int rc, max_entries;
eth_dev->dev_ops = &cnxk_eth_dev_ops;
/* Alloc security context */
sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
if (!sec_ctx)
return -ENOMEM;
sec_ctx->device = eth_dev;
sec_ctx->ops = &cnxk_eth_sec_ops;
sec_ctx->flags =
(RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
eth_dev->security_ctx = sec_ctx;
TAILQ_INIT(&dev->inb.list);
TAILQ_INIT(&dev->outb.list);
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@ -1406,6 +1626,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
struct roc_nix *nix = &dev->nix;
int rc, i;
plt_free(eth_dev->security_ctx);
eth_dev->security_ctx = NULL;
/* Nothing to be done for secondary processes */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@ -1440,6 +1663,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
}
eth_dev->data->nb_rx_queues = 0;
/* Free security resources */
nix_security_release(dev);
/* Free tm resources */
roc_nix_tm_fini(nix);

@ -13,6 +13,9 @@
#include <rte_mbuf.h>
#include <rte_mbuf_pool_ops.h>
#include <rte_mempool.h>
#include <rte_security.h>
#include <rte_security_driver.h>
#include <rte_tailq.h>
#include <rte_time.h>
#include "roc_api.h"
@ -70,14 +73,14 @@
DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | \
DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS | \
DEV_TX_OFFLOAD_IPV4_CKSUM)
DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
#define CNXK_NIX_RX_OFFLOAD_CAPA \
(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP | \
DEV_RX_OFFLOAD_VLAN_STRIP)
DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_SECURITY)
#define RSS_IPV4_ENABLE \
(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
@ -112,6 +115,11 @@
#define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_TUNNEL_WIDTH)
#define PTYPE_ARRAY_SZ \
((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */
#define ERRCODE_ERRLEN_WIDTH 12
#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
/* Fastpath lookup */
#define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
@ -119,6 +127,9 @@
((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
(1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
/* Subtype from inline outbound error event */
#define CNXK_ETHDEV_SEC_OUTB_EV_SUB 0xFFUL
struct cnxk_fc_cfg {
enum rte_eth_fc_mode mode;
uint8_t rx_pause;
@ -144,6 +155,82 @@ struct cnxk_timesync_info {
uint64_t *tx_tstamp;
} __plt_cache_aligned;
/* Security session private data */
struct cnxk_eth_sec_sess {
/* List entry */
TAILQ_ENTRY(cnxk_eth_sec_sess) entry;
/* Inbound SA is from NIX_RX_IPSEC_SA_BASE or
* Outbound SA from roc_nix_inl_outb_sa_base_get()
*/
void *sa;
/* SA index */
uint32_t sa_idx;
/* SPI */
uint32_t spi;
/* Back pointer to session */
struct rte_security_session *sess;
/* Inbound */
bool inb;
/* Inbound session on inl dev */
bool inl_dev;
};
TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
/* Inbound security data */
struct cnxk_eth_dev_sec_inb {
/* IPSec inbound max SPI */
uint16_t max_spi;
/* Using inbound with inline device */
bool inl_dev;
/* Device argument to force inline device for inb */
bool force_inl_dev;
/* Active sessions */
uint16_t nb_sess;
/* List of sessions */
struct cnxk_eth_sec_sess_list list;
};
/* Outbound security data */
struct cnxk_eth_dev_sec_outb {
/* IPSec outbound max SA */
uint16_t max_sa;
/* Per CPT LF descriptor count */
uint32_t nb_desc;
/* SA Bitmap */
struct plt_bitmap *sa_bmap;
/* SA bitmap memory */
void *sa_bmap_mem;
/* SA base */
uint64_t sa_base;
/* CPT LF base */
struct roc_cpt_lf *lf_base;
/* Crypto queues => CPT lf count */
uint16_t nb_crypto_qs;
/* Active sessions */
uint16_t nb_sess;
/* List of sessions */
struct cnxk_eth_sec_sess_list list;
};
struct cnxk_eth_dev {
/* ROC NIX */
struct roc_nix nix;
@ -159,6 +246,7 @@ struct cnxk_eth_dev {
/* Configured queue count */
uint16_t nb_rxq;
uint16_t nb_txq;
uint16_t nb_rxq_sso;
uint8_t configured;
/* Max macfilter entries */
@ -223,6 +311,10 @@ struct cnxk_eth_dev {
/* Per queue statistics counters */
uint32_t txq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
uint32_t rxq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
/* Security data */
struct cnxk_eth_dev_sec_inb inb;
struct cnxk_eth_dev_sec_outb outb;
};
struct cnxk_eth_rxq_sp {
@ -261,6 +353,9 @@ extern struct eth_dev_ops cnxk_eth_dev_ops;
/* Common flow ops */
extern struct rte_flow_ops cnxk_flow_ops;
/* Common security ops */
extern struct rte_security_ops cnxk_eth_sec_ops;
/* Ops */
int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev);
@ -389,6 +484,18 @@ int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
/* Debug */
int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
struct rte_dev_reg_info *regs);
/* Security */
int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p);
int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx);
int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);
int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);
__rte_internal
int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
uint32_t spi, bool inb);
struct cnxk_eth_sec_sess *
cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
struct rte_security_session *sess);
/* Other private functions */
int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
@ -499,4 +606,14 @@ cnxk_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
}
}
static __rte_always_inline uintptr_t
cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem)
{
uintptr_t sa_base_tbl;
sa_base_tbl = (uintptr_t)lookup_mem;
sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
return *((const uintptr_t *)sa_base_tbl + port);
}
#endif /* __CNXK_ETHDEV_H__ */

@ -7,6 +7,61 @@
#include "cnxk_ethdev.h"
static int
parse_outb_nb_desc(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
val = atoi(value);
*(uint16_t *)extra_args = val;
return 0;
}
static int
parse_outb_nb_crypto_qs(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
val = atoi(value);
if (val < 1 || val > 64)
return -EINVAL;
*(uint16_t *)extra_args = val;
return 0;
}
static int
parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
val = atoi(value);
*(uint16_t *)extra_args = val;
return 0;
}
static int
parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
val = atoi(value);
*(uint16_t *)extra_args = val;
return 0;
}
static int
parse_flow_max_priority(const char *key, const char *value, void *extra_args)
{
@ -117,15 +172,25 @@ parse_switch_header_type(const char *key, const char *value, void *extra_args)
#define CNXK_SWITCH_HEADER_TYPE "switch_header"
#define CNXK_RSS_TAG_AS_XOR "tag_as_xor"
#define CNXK_LOCK_RX_CTX "lock_rx_ctx"
#define CNXK_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
#define CNXK_IPSEC_OUT_MAX_SA "ipsec_out_max_sa"
#define CNXK_OUTB_NB_DESC "outb_nb_desc"
#define CNXK_FORCE_INB_INL_DEV "force_inb_inl_dev"
#define CNXK_OUTB_NB_CRYPTO_QS "outb_nb_crypto_qs"
int
cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
{
uint16_t reta_sz = ROC_NIX_RSS_RETA_SZ_64;
uint16_t sqb_count = CNXK_NIX_TX_MAX_SQB;
uint16_t ipsec_in_max_spi = BIT(8) - 1;
uint16_t ipsec_out_max_sa = BIT(12);
uint16_t flow_prealloc_size = 1;
uint16_t switch_header_type = 0;
uint16_t flow_max_priority = 3;
uint16_t force_inb_inl_dev = 0;
uint16_t outb_nb_crypto_qs = 1;
uint16_t outb_nb_desc = 8200;
uint16_t rss_tag_as_xor = 0;
uint16_t scalar_enable = 0;
uint8_t lock_rx_ctx = 0;
@ -153,10 +218,27 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
rte_kvargs_process(kvlist, CNXK_RSS_TAG_AS_XOR, &parse_flag,
&rss_tag_as_xor);
rte_kvargs_process(kvlist, CNXK_LOCK_RX_CTX, &parse_flag, &lock_rx_ctx);
rte_kvargs_process(kvlist, CNXK_IPSEC_IN_MAX_SPI,
&parse_ipsec_in_max_spi, &ipsec_in_max_spi);
rte_kvargs_process(kvlist, CNXK_IPSEC_OUT_MAX_SA,
&parse_ipsec_out_max_sa, &ipsec_out_max_sa);
rte_kvargs_process(kvlist, CNXK_OUTB_NB_DESC, &parse_outb_nb_desc,
&outb_nb_desc);
rte_kvargs_process(kvlist, CNXK_OUTB_NB_CRYPTO_QS,
&parse_outb_nb_crypto_qs, &outb_nb_crypto_qs);
rte_kvargs_process(kvlist, CNXK_FORCE_INB_INL_DEV, &parse_flag,
&force_inb_inl_dev);
rte_kvargs_free(kvlist);
null_devargs:
dev->scalar_ena = !!scalar_enable;
dev->inb.force_inl_dev = !!force_inb_inl_dev;
dev->inb.max_spi = ipsec_in_max_spi;
dev->outb.max_sa = ipsec_out_max_sa;
dev->outb.nb_desc = outb_nb_desc;
dev->outb.nb_crypto_qs = outb_nb_crypto_qs;
dev->nix.ipsec_in_max_spi = ipsec_in_max_spi;
dev->nix.ipsec_out_max_sa = ipsec_out_max_sa;
dev->nix.rss_tag_as_xor = !!rss_tag_as_xor;
dev->nix.max_sqb_count = sqb_count;
dev->nix.reta_sz = reta_sz;
@ -177,4 +259,8 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
CNXK_FLOW_PREALLOC_SIZE "=<1-32>"
CNXK_FLOW_MAX_PRIORITY "=<1-32>"
CNXK_SWITCH_HEADER_TYPE "=<higig2|dsa|chlen90b>"
CNXK_RSS_TAG_AS_XOR "=1");
CNXK_RSS_TAG_AS_XOR "=1"
CNXK_IPSEC_IN_MAX_SPI "=<1-65535>"
CNXK_OUTB_NB_DESC "=<1-65535>"
CNXK_OUTB_NB_CRYPTO_QS "=<1-64>"
CNXK_FORCE_INB_INL_DEV "=1");

@ -0,0 +1,278 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#include <cnxk_ethdev.h>
#define CNXK_NIX_INL_SELFTEST "selftest"
#define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
#define CNXK_NIX_INL_DEV_NAME RTE_STR(cnxk_nix_inl_dev_)
#define CNXK_NIX_INL_DEV_NAME_LEN \
(sizeof(CNXK_NIX_INL_DEV_NAME) + PCI_PRI_STR_SIZE)
static inline int
bitmap_ctzll(uint64_t slab)
{
if (slab == 0)
return 0;
return __builtin_ctzll(slab);
}
int
cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p)
{
uint32_t pos, idx;
uint64_t slab;
int rc;
if (!dev->outb.sa_bmap)
return -ENOTSUP;
pos = 0;
slab = 0;
/* Scan from the beginning */
plt_bitmap_scan_init(dev->outb.sa_bmap);
/* Scan bitmap to get the free sa index */
rc = plt_bitmap_scan(dev->outb.sa_bmap, &pos, &slab);
/* Empty bitmap */
if (rc == 0) {
plt_err("Outbound SA' exhausted, use 'ipsec_out_max_sa' "
"devargs to increase");
return -ERANGE;
}
/* Get free SA index */
idx = pos + bitmap_ctzll(slab);
plt_bitmap_clear(dev->outb.sa_bmap, idx);
*idx_p = idx;
return 0;
}
int
cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx)
{
if (idx >= dev->outb.max_sa)
return -EINVAL;
/* Check if it is already free */
if (plt_bitmap_get(dev->outb.sa_bmap, idx))
return -EINVAL;
/* Mark index as free */
plt_bitmap_set(dev->outb.sa_bmap, idx);
return 0;
}
struct cnxk_eth_sec_sess *
cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, uint32_t spi, bool inb)
{
struct cnxk_eth_sec_sess_list *list;
struct cnxk_eth_sec_sess *eth_sec;
list = inb ? &dev->inb.list : &dev->outb.list;
TAILQ_FOREACH(eth_sec, list, entry) {
if (eth_sec->spi == spi)
return eth_sec;
}
return NULL;
}
struct cnxk_eth_sec_sess *
cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
struct rte_security_session *sess)
{
struct cnxk_eth_sec_sess *eth_sec = NULL;
/* Search in inbound list */
TAILQ_FOREACH(eth_sec, &dev->inb.list, entry) {
if (eth_sec->sess == sess)
return eth_sec;
}
/* Search in outbound list */
TAILQ_FOREACH(eth_sec, &dev->outb.list, entry) {
if (eth_sec->sess == sess)
return eth_sec;
}
return NULL;
}
static unsigned int
cnxk_eth_sec_session_get_size(void *device __rte_unused)
{
return sizeof(struct cnxk_eth_sec_sess);
}
struct rte_security_ops cnxk_eth_sec_ops = {
.session_get_size = cnxk_eth_sec_session_get_size
};
static int
parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
val = atoi(value);
*(uint16_t *)extra_args = val;
return 0;
}
static int
parse_selftest(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
val = atoi(value);
*(uint8_t *)extra_args = !!(val == 1);
return 0;
}
static int
nix_inl_parse_devargs(struct rte_devargs *devargs,
struct roc_nix_inl_dev *inl_dev)
{
uint32_t ipsec_in_max_spi = BIT(8) - 1;
struct rte_kvargs *kvlist;
uint8_t selftest = 0;
if (devargs == NULL)
goto null_devargs;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
goto exit;
rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
&selftest);
rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
&parse_ipsec_in_max_spi, &ipsec_in_max_spi);
rte_kvargs_free(kvlist);
null_devargs:
inl_dev->ipsec_in_max_spi = ipsec_in_max_spi;
inl_dev->selftest = selftest;
return 0;
exit:
return -EINVAL;
}
static inline char *
nix_inl_dev_to_name(struct rte_pci_device *pci_dev, char *name)
{
snprintf(name, CNXK_NIX_INL_DEV_NAME_LEN,
CNXK_NIX_INL_DEV_NAME PCI_PRI_FMT, pci_dev->addr.domain,
pci_dev->addr.bus, pci_dev->addr.devid,
pci_dev->addr.function);
return name;
}
static int
cnxk_nix_inl_dev_remove(struct rte_pci_device *pci_dev)
{
char name[CNXK_NIX_INL_DEV_NAME_LEN];
const struct rte_memzone *mz;
struct roc_nix_inl_dev *dev;
int rc;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
mz = rte_memzone_lookup(nix_inl_dev_to_name(pci_dev, name));
if (!mz)
return 0;
dev = mz->addr;
/* Cleanup inline dev */
rc = roc_nix_inl_dev_fini(dev);
if (rc) {
plt_err("Failed to cleanup inl dev, rc=%d(%s)", rc,
roc_error_msg_get(rc));
return rc;
}
rte_memzone_free(mz);
return 0;
}
static int
cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
{
char name[CNXK_NIX_INL_DEV_NAME_LEN];
struct roc_nix_inl_dev *inl_dev;
const struct rte_memzone *mz;
int rc = -ENOMEM;
RTE_SET_USED(pci_drv);
rc = roc_plt_init();
if (rc) {
plt_err("Failed to initialize platform model, rc=%d", rc);
return rc;
}
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
mz = rte_memzone_reserve_aligned(nix_inl_dev_to_name(pci_dev, name),
sizeof(*inl_dev), SOCKET_ID_ANY, 0,
RTE_CACHE_LINE_SIZE);
if (mz == NULL)
return rc;
inl_dev = mz->addr;
inl_dev->pci_dev = pci_dev;
/* Parse devargs string */
rc = nix_inl_parse_devargs(pci_dev->device.devargs, inl_dev);
if (rc) {
plt_err("Failed to parse devargs rc=%d", rc);
goto free_mem;
}
rc = roc_nix_inl_dev_init(inl_dev);
if (rc) {
plt_err("Failed to init nix inl device, rc=%d(%s)", rc,
roc_error_msg_get(rc));
goto free_mem;
}
return 0;
free_mem:
rte_memzone_free(mz);
return rc;
}
static const struct rte_pci_id cnxk_nix_inl_pci_map[] = {
{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_PF)},
{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_VF)},
{
.vendor_id = 0,
},
};
static struct rte_pci_driver cnxk_nix_inl_pci = {
.id_table = cnxk_nix_inl_pci_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
.probe = cnxk_nix_inl_dev_probe,
.remove = cnxk_nix_inl_dev_remove,
};
RTE_PMD_REGISTER_PCI(cnxk_nix_inl, cnxk_nix_inl_pci);
RTE_PMD_REGISTER_PCI_TABLE(cnxk_nix_inl, cnxk_nix_inl_pci_map);
RTE_PMD_REGISTER_KMOD_DEP(cnxk_nix_inl, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
CNXK_NIX_INL_SELFTEST "=1"
CNXK_NIX_INL_IPSEC_IN_MAX_SPI "=<1-65535>");

@ -7,12 +7,8 @@
#include "cnxk_ethdev.h"
/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */
#define ERRCODE_ERRLEN_WIDTH 12
#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
#define SA_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uint64_t))
#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_TBL_SZ)
#define SA_BASE_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uintptr_t))
#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ)
const uint32_t *
cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
@ -324,3 +320,45 @@ cnxk_nix_fastpath_lookup_mem_get(void)
}
return NULL;
}
int
cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev)
{
void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
uint16_t port = dev->eth_dev->data->port_id;
uintptr_t sa_base_tbl;
uintptr_t sa_base;
uint8_t sa_w;
if (!lookup_mem)
return -EIO;
sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, dev->inb.inl_dev);
if (!sa_base)
return -ENOTSUP;
sa_w = plt_log2_u32(dev->nix.ipsec_in_max_spi + 1);
/* Set SA Base in lookup mem */
sa_base_tbl = (uintptr_t)lookup_mem;
sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
*((uintptr_t *)sa_base_tbl + port) = sa_base | sa_w;
return 0;
}
int
cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev)
{
void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
uint16_t port = dev->eth_dev->data->port_id;
uintptr_t sa_base_tbl;
if (!lookup_mem)
return -EIO;
/* Set SA Base in lookup mem */
sa_base_tbl = (uintptr_t)lookup_mem;
sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
*((uintptr_t *)sa_base_tbl + port) = 0;
return 0;
}

@ -12,6 +12,7 @@ sources = files(
'cnxk_ethdev.c',
'cnxk_ethdev_devargs.c',
'cnxk_ethdev_ops.c',
'cnxk_ethdev_sec.c',
'cnxk_link.c',
'cnxk_lookup.c',
'cnxk_ptp.c',
@ -23,6 +24,7 @@ sources = files(
# CN9K
sources += files(
'cn9k_ethdev.c',
'cn9k_ethdev_sec.c',
'cn9k_rte_flow.c',
'cn9k_rx.c',
'cn9k_rx_mseg.c',

@ -1,3 +1,8 @@
DPDK_22 {
local: *;
};
INTERNAL {
global:
cnxk_nix_inb_mode_set;
};