net/cnxk: enable packet marking callbacks

cnxk platform supports red/yellow packet marking based on TM
configuration. This patch set hooks to enable/disable packet
marking for VLAN DEI, IP DSCP and IP ECN. Marking enabled only
in scalar mode.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Satha Rao 2022-02-24 23:59:25 -05:00 committed by Jerin Jacob
parent 0b7e667ee3
commit 50e2c7fdc1
13 changed files with 514 additions and 17 deletions

View File

@ -161,6 +161,7 @@ New Features
* Added queue based priority flow control support for CN9K & CN10K.
* Added support for IP reassembly for inline inbound IPsec packets.
* Added support for packet marking in traffic manager.
* **Updated Mellanox mlx5 driver.**

View File

@ -555,7 +555,8 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
if (flags & NIX_TX_OFFLOAD_TSO_F)
cn10k_nix_xmit_prepare_tso(m, flags);
cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec,
txq->mark_flag, txq->mark_fmt);
laddr = lmt_addr;
/* Prepare CPT instruction and get nixtx addr if

View File

@ -784,7 +784,8 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
rte_io_wmb();
txq = cn9k_sso_hws_xtract_meta(m, txq_data);
cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
txq->mark_fmt);
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
uint64_t ol_flags = m->ol_flags;

View File

@ -110,6 +110,9 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
if (dev->tx_mark)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
return flags;
}
@ -169,6 +172,7 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *nix = &dev->nix;
uint64_t mark_fmt, mark_flag;
struct roc_cpt_lf *inl_lf;
struct cn10k_eth_txq *txq;
struct roc_nix_sq *sq;
@ -206,6 +210,11 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
}
/* Restore marking flag from roc */
mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
nix_form_default_desc(dev, txq, qid);
txq->lso_tun_fmt = dev->lso_tun_fmt;
return 0;
@ -546,6 +555,118 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
return rc;
}
static int
cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
uint64_t mark_fmt, mark_flag;
int rc, i;
rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
mark_red, error);
if (rc)
goto exit;
mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
if (mark_flag) {
dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
dev->tx_mark = true;
} else {
dev->tx_mark = false;
if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
}
cn10k_eth_set_tx_function(eth_dev);
exit:
return rc;
}
static int
cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
uint64_t mark_fmt, mark_flag;
int rc, i;
rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
error);
if (rc)
goto exit;
mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
if (mark_flag) {
dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
dev->tx_mark = true;
} else {
dev->tx_mark = false;
if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
}
cn10k_eth_set_tx_function(eth_dev);
exit:
return rc;
}
static int
cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
uint64_t mark_fmt, mark_flag;
int rc, i;
rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
mark_red, error);
if (rc)
goto exit;
mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
if (mark_flag) {
dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
dev->tx_mark = true;
} else {
dev->tx_mark = false;
if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
}
cn10k_eth_set_tx_function(eth_dev);
exit:
return rc;
}
/* Update platform specific eth dev ops */
static void
nix_eth_dev_ops_override(void)
@ -575,6 +696,22 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set;
}
/* Update platform specific tm ops */
static void
nix_tm_ops_override(void)
{
static int init_once;
if (init_once)
return;
init_once = 1;
/* Update platform specific ops */
cnxk_tm_ops.mark_vlan_dei = cn10k_nix_tm_mark_vlan_dei;
cnxk_tm_ops.mark_ip_ecn = cn10k_nix_tm_mark_ip_ecn;
cnxk_tm_ops.mark_ip_dscp = cn10k_nix_tm_mark_ip_dscp;
}
static void
npc_flow_ops_override(void)
{
@ -614,6 +751,7 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
nix_eth_dev_ops_override();
nix_tm_ops_override();
npc_flow_ops_override();
cn10k_eth_sec_ops_override();

View File

@ -21,6 +21,8 @@ struct cn10k_eth_txq {
uint16_t cpt_desc;
uint64_t lso_tun_fmt;
uint64_t ts_mem;
uint64_t mark_flag : 8;
uint64_t mark_fmt : 48;
} __plt_cache_aligned;
struct cn10k_eth_rxq {

View File

@ -511,13 +511,16 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
static __rte_always_inline void
cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
const uint64_t lso_tun_fmt, bool *sec)
const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag,
uint64_t mark_fmt)
{
uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
uint64_t ol_flags = 0, mask;
union nix_send_hdr_w1_u w1;
union nix_send_sg_s *sg;
uint16_t mark_form = 0;
send_hdr = (struct nix_send_hdr_s *)cmd;
if (flags & NIX_TX_NEED_EXT_HDR) {
@ -525,7 +528,9 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
sg = (union nix_send_sg_s *)(cmd + 4);
/* Clear previous markings */
send_hdr_ext->w0.lso = 0;
send_hdr_ext->w0.mark_en = 0;
send_hdr_ext->w1.u = 0;
ol_flags = m->ol_flags;
} else {
sg = (union nix_send_sg_s *)(cmd + 2);
}
@ -621,6 +626,10 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
RTE_MBUF_F_TX_IPV6));
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
@ -630,6 +639,22 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
/* Fill for VLAN marking only when VLAN insertion enabled */
mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
(send_hdr_ext->w1.vlan1_ins_ena ||
send_hdr_ext->w1.vlan0_ins_ena));
/* Mask requested flags with packet data information */
mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
send_hdr_ext->w0.mark_en = !!mark_off;
send_hdr_ext->w0.markform = mark_form & 0x7F;
send_hdr_ext->w0.markptr = markptr;
}
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
@ -841,6 +866,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
uintptr_t pa, lbase = txq->lmt_base;
uint16_t lmt_id, burst, left, i;
uintptr_t c_lbase = lbase;
uint64_t mark_fmt = 0;
uint8_t mark_flag = 0;
rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
uint16_t c_lmt_id;
@ -860,6 +887,11 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
if (flags & NIX_TX_OFFLOAD_TSO_F)
lso_tun_fmt = txq->lso_tun_fmt;
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
mark_fmt = txq->mark_fmt;
mark_flag = txq->mark_flag;
}
/* Get LMT base address and LMT ID as lcore id */
ROC_LMT_BASE_ID_GET(lbase, lmt_id);
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
@ -887,7 +919,7 @@ again:
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
&sec);
&sec, mark_flag, mark_fmt);
laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
@ -967,6 +999,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
uint16_t segdw, lmt_id, burst, left, i;
uint8_t lnum, c_lnum, c_loff;
uintptr_t c_lbase = lbase;
uint64_t mark_fmt = 0;
uint8_t mark_flag = 0;
uint64_t data0, data1;
rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
@ -988,6 +1022,11 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
if (flags & NIX_TX_OFFLOAD_TSO_F)
lso_tun_fmt = txq->lso_tun_fmt;
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
mark_fmt = txq->mark_fmt;
mark_flag = txq->mark_flag;
}
/* Get LMT base address and LMT ID as lcore id */
ROC_LMT_BASE_ID_GET(lbase, lmt_id);
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
@ -1017,7 +1056,7 @@ again:
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
&sec);
&sec, mark_flag, mark_fmt);
laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);

View File

@ -53,7 +53,7 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
#undef T
};
if (dev->scalar_ena) {
if (dev->scalar_ena || dev->tx_mark) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);

View File

@ -110,6 +110,9 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
if (dev->tx_mark)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
return flags;
}
@ -168,6 +171,7 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
const struct rte_eth_txconf *tx_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
uint64_t mark_fmt, mark_flag;
struct roc_cpt_lf *inl_lf;
struct cn9k_eth_txq *txq;
struct roc_nix_sq *sq;
@ -204,6 +208,10 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
}
mark_fmt = roc_nix_tm_mark_format_get(&dev->nix, &mark_flag);
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
nix_form_default_desc(dev, txq, qid);
txq->lso_tun_fmt = dev->lso_tun_fmt;
return 0;
@ -490,6 +498,118 @@ cn9k_nix_rx_metadata_negotiate(struct rte_eth_dev *eth_dev, uint64_t *features)
return 0;
}
static int
cn9k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
uint64_t mark_fmt, mark_flag;
int rc, i;
rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
mark_red, error);
if (rc)
goto exit;
mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
if (mark_flag) {
dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
dev->tx_mark = true;
} else {
dev->tx_mark = false;
if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
}
cn9k_eth_set_tx_function(eth_dev);
exit:
return rc;
}
static int
cn9k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
uint64_t mark_fmt, mark_flag;
int rc, i;
rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
error);
if (rc)
goto exit;
mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
if (mark_flag) {
dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
dev->tx_mark = true;
} else {
dev->tx_mark = false;
if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
}
cn9k_eth_set_tx_function(eth_dev);
exit:
return rc;
}
static int
cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
uint64_t mark_fmt, mark_flag;
int rc, i;
rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
mark_red, error);
if (rc)
goto exit;
mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
if (mark_flag) {
dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
dev->tx_mark = true;
} else {
dev->tx_mark = false;
if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
}
cn9k_eth_set_tx_function(eth_dev);
exit:
return rc;
}
/* Update platform specific eth dev ops */
static void
nix_eth_dev_ops_override(void)
@ -515,6 +635,22 @@ nix_eth_dev_ops_override(void)
cn9k_nix_timesync_read_tx_timestamp;
}
/* Update platform specific eth dev ops */
static void
nix_tm_ops_override(void)
{
static int init_once;
if (init_once)
return;
init_once = 1;
/* Update platform specific ops */
cnxk_tm_ops.mark_vlan_dei = cn9k_nix_tm_mark_vlan_dei;
cnxk_tm_ops.mark_ip_ecn = cn9k_nix_tm_mark_ip_ecn;
cnxk_tm_ops.mark_ip_dscp = cn9k_nix_tm_mark_ip_dscp;
}
static void
npc_flow_ops_override(void)
{
@ -554,6 +690,7 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
nix_eth_dev_ops_override();
nix_tm_ops_override();
npc_flow_ops_override();
cn9k_eth_sec_ops_override();

View File

@ -22,6 +22,8 @@ struct cn9k_eth_txq {
uint64_t sa_base;
uint64_t *cpt_fc;
uint16_t cpt_desc;
uint64_t mark_flag : 8;
uint64_t mark_fmt : 48;
} __plt_cache_aligned;
struct cn9k_eth_rxq {

View File

@ -135,13 +135,16 @@ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
static __rte_always_inline void
cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
const uint64_t lso_tun_fmt)
const uint64_t lso_tun_fmt, uint8_t mark_flag,
uint64_t mark_fmt)
{
uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
uint64_t ol_flags = 0, mask;
union nix_send_hdr_w1_u w1;
union nix_send_sg_s *sg;
uint16_t mark_form = 0;
send_hdr = (struct nix_send_hdr_s *)cmd;
if (flags & NIX_TX_NEED_EXT_HDR) {
@ -149,7 +152,9 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
sg = (union nix_send_sg_s *)(cmd + 4);
/* Clear previous markings */
send_hdr_ext->w0.lso = 0;
send_hdr_ext->w0.mark_en = 0;
send_hdr_ext->w1.u = 0;
ol_flags = m->ol_flags;
} else {
sg = (union nix_send_sg_s *)(cmd + 2);
}
@ -245,6 +250,10 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
RTE_MBUF_F_TX_IPV6));
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
@ -254,6 +263,21 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
/* Fill for VLAN marking only when VLAN insertion enabled */
mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
(send_hdr_ext->w1.vlan1_ins_ena ||
send_hdr_ext->w1.vlan0_ins_ena));
/* Mask requested flags with packet data information */
mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
send_hdr_ext->w0.mark_en = !!mark_off;
send_hdr_ext->w0.markform = mark_form & 0x7F;
send_hdr_ext->w0.markptr = markptr;
}
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
@ -502,8 +526,9 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
{
struct cn9k_eth_txq *txq = tx_queue;
const rte_iova_t io_addr = txq->io_addr;
uint64_t lso_tun_fmt, mark_fmt = 0;
void *lmt_addr = txq->lmt_addr;
uint64_t lso_tun_fmt;
uint8_t mark_flag = 0;
uint16_t i;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
@ -518,6 +543,11 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
mark_fmt = txq->mark_fmt;
mark_flag = txq->mark_flag;
}
/* Lets commit any changes in the packet here as no further changes
* to the packet will be done unless no fast free is enabled.
*/
@ -525,7 +555,8 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
rte_io_wmb();
for (i = 0; i < pkts; i++) {
cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
mark_flag, mark_fmt);
cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, 4,
flags);
cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
@ -543,8 +574,9 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct cn9k_eth_txq *txq = tx_queue;
const rte_iova_t io_addr = txq->io_addr;
uint64_t lso_tun_fmt, mark_fmt = 0;
void *lmt_addr = txq->lmt_addr;
uint64_t lso_tun_fmt;
uint8_t mark_flag = 0;
uint16_t segdw;
uint64_t i;
@ -560,6 +592,11 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
mark_fmt = txq->mark_fmt;
mark_flag = txq->mark_flag;
}
/* Lets commit any changes in the packet here as no further changes
* to the packet will be done unless no fast free is enabled.
*/
@ -567,7 +604,8 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_io_wmb();
for (i = 0; i < pkts; i++) {
cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
mark_flag, mark_fmt);
segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags,
segdw, flags);

View File

@ -49,7 +49,7 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
#undef T
};
if (dev->scalar_ena) {
if (dev->scalar_ena || dev->tx_mark) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);

View File

@ -18,6 +18,7 @@
#include <rte_security_driver.h>
#include <rte_tailq.h>
#include <rte_time.h>
#include <rte_tm_driver.h>
#include "roc_api.h"
@ -139,6 +140,15 @@
#define CNXK_NIX_PFC_CHAN_COUNT 16
#define CNXK_TM_MARK_VLAN_DEI BIT_ULL(0)
#define CNXK_TM_MARK_IP_DSCP BIT_ULL(1)
#define CNXK_TM_MARK_IP_ECN BIT_ULL(2)
#define CNXK_TM_MARK_MASK \
(CNXK_TM_MARK_VLAN_DEI | CNXK_TM_MARK_IP_DSCP | CNXK_TM_MARK_IP_ECN)
#define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull)
struct cnxk_fc_cfg {
enum rte_eth_fc_mode mode;
uint8_t rx_pause;
@ -350,6 +360,7 @@ struct cnxk_eth_dev {
uint16_t flags;
uint8_t ptype_disable;
bool scalar_ena;
bool tx_mark;
bool ptp_en;
bool rx_mark_update; /* Enable/Disable mark update to mbuf */
@ -464,6 +475,9 @@ extern struct rte_flow_ops cnxk_flow_ops;
/* Common security ops */
extern struct rte_security_ops cnxk_eth_sec_ops;
/* Common tm ops */
extern struct rte_tm_ops cnxk_tm_ops;
/* Ops */
int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev);
@ -540,6 +554,15 @@ uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
uint16_t queue_idx, uint16_t tx_rate);
int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error);
int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error);
int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error);
/* MTR */
int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);

View File

@ -88,10 +88,16 @@ cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
RTE_TM_STATS_N_PKTS_RED_DROPPED |
RTE_TM_STATS_N_BYTES_RED_DROPPED;
for (i = 0; i < RTE_COLORS; i++) {
cap->mark_vlan_dei_supported[i] = false;
cap->mark_ip_ecn_tcp_supported[i] = false;
cap->mark_ip_dscp_supported[i] = false;
cap->mark_vlan_dei_supported[RTE_COLOR_GREEN] = false;
cap->mark_ip_ecn_tcp_supported[RTE_COLOR_GREEN] = false;
cap->mark_ip_ecn_sctp_supported[RTE_COLOR_GREEN] = false;
cap->mark_ip_dscp_supported[RTE_COLOR_GREEN] = false;
for (i = RTE_COLOR_YELLOW; i < RTE_COLORS; i++) {
cap->mark_vlan_dei_supported[i] = true;
cap->mark_ip_ecn_tcp_supported[i] = true;
cap->mark_ip_ecn_sctp_supported[i] = true;
cap->mark_ip_dscp_supported[i] = true;
}
return 0;
@ -599,7 +605,112 @@ exit:
return rc;
}
const struct rte_tm_ops cnxk_tm_ops = {
int
cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
int rc;
if (mark_green) {
error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
error->message = "Green VLAN marking not supported";
return -EINVAL;
}
if (eth_dev->data->dev_started) {
error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
error->message = "VLAN DEI mark for running ports not "
"supported";
return -EBUSY;
}
rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_VLAN_DEI,
mark_yellow, mark_red);
if (rc) {
error->type = roc_nix_tm_err_to_rte_err(rc);
error->message = roc_error_msg_get(rc);
}
return rc;
}
int
cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
int rc;
if (mark_green) {
error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
error->message = "Green IP ECN marking not supported";
return -EINVAL;
}
if (eth_dev->data->dev_started) {
error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
error->message = "IP ECN mark for running ports not "
"supported";
return -EBUSY;
}
rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_ECN,
mark_yellow, mark_red);
if (rc < 0)
goto exit;
rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_ECN,
mark_yellow, mark_red);
exit:
if (rc < 0) {
error->type = roc_nix_tm_err_to_rte_err(rc);
error->message = roc_error_msg_get(rc);
}
return rc;
}
int
cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
struct rte_tm_error *error)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct roc_nix *roc_nix = &dev->nix;
int rc;
if (mark_green) {
error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
error->message = "Green IP DSCP marking not supported";
return -EINVAL;
}
if (eth_dev->data->dev_started) {
error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
error->message = "IP DSCP mark for running ports not "
"supported";
return -EBUSY;
}
rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_DSCP,
mark_yellow, mark_red);
if (rc < 0)
goto exit;
rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_DSCP,
mark_yellow, mark_red);
exit:
if (rc < 0) {
error->type = roc_nix_tm_err_to_rte_err(rc);
error->message = roc_error_msg_get(rc);
}
return rc;
}
struct rte_tm_ops cnxk_tm_ops = {
.node_type_get = cnxk_nix_tm_node_type_get,
.capabilities_get = cnxk_nix_tm_capa_get,
.level_capabilities_get = cnxk_nix_tm_level_capa_get,
@ -617,6 +728,10 @@ const struct rte_tm_ops cnxk_tm_ops = {
.node_shaper_update = cnxk_nix_tm_node_shaper_update,
.node_parent_update = cnxk_nix_tm_node_parent_update,
.node_stats_read = cnxk_nix_tm_node_stats_read,
.mark_vlan_dei = cnxk_nix_tm_mark_vlan_dei,
.mark_ip_ecn = cnxk_nix_tm_mark_ip_ecn,
.mark_ip_dscp = cnxk_nix_tm_mark_ip_dscp,
};
int