net/octeontx2: support SDP interface

System DPI Packet Interface Unit (SDP) is a co-processor
of OCTEON TX2 which provides PCIe endpoint support for a
remote host to DMA packets into and out of the OCTEON TX2 SoC.
SDP interface comes in to live only when it is connected in
EP mode. It exposes input and output queue pairs to remote host
for instruction input and packet output. It can be used as
a communication channel between remote host and OCTEON TX2.
Host machine needs to use corresponding user/kernel mode
driver to communicate with SDP interface on OCTEON TX2 SoC.

SDP interface support is limited to SDP PF device now.
No SDP VF support.

Signed-off-by: Subrahmanyam Nilla <snilla@marvell.com>
Signed-off-by: Venkateshwarlu Nalla <venkatn@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Subrahmanyam Nilla 2019-11-06 14:57:34 +05:30 committed by Ferruh Yigit
parent aa74c383d4
commit c261680cdb
13 changed files with 90 additions and 23 deletions

View File

@ -212,6 +212,10 @@ Multicast MAC filtering
``net_octeontx2`` pmd supports multicast mac filtering feature only on physical
function devices.
SDP interface support
~~~~~~~~~~~~~~~~~~~~~
OCTEON TX2 SDP interface support is limited to PF device, No VF support.
Debugging Options
-----------------

View File

@ -64,6 +64,8 @@ DPDK subsystem.
+---+-----+--------------------------------------------------------------+
| 8 | DPI | rte_rawdev |
+---+-----+--------------------------------------------------------------+
| 9 | SDP | rte_ethdev |
+---+-----+--------------------------------------------------------------+
PF0 is called the administrative / admin function (AF) and has exclusive
privileges to provision RVU functional block's LFs to each of the PF/VF.
@ -102,6 +104,25 @@ Typical application usage models are,
#. Exception path to Linux kernel from DPDK application as SW ``KNI`` replacement.
#. Communication between two different DPDK applications.
SDP interface
-------------
System DPI Packet Interface unit(SDP) provides PCIe endpoint support for remote host
to DMA packets into and out of OCTEON TX2 SoC. SDP interface comes in to live only when
OCTEON TX2 SoC is connected in PCIe endpoint mode. It can be used to send/receive
packets to/from remote host machine using input/output queue pairs exposed to it.
SDP interface receives input packets from remote host from NIX-RX and sends packets
to remote host using NIX-TX. Remote host machine need to use corresponding driver
(kernel/user mode) to communicate with SDP interface on OCTEON TX2 SoC. SDP supports
single PCIe SRIOV physical function(PF) and multiple virtual functions(VF's). Users
can bind PF or VF to use SDP interface and it will be enumerated as ethdev ports.
The primary use case for SDP is to enable the smart NIC use case. Typical usage models are,
#. Communication channel between remote host and OCTEON TX2 SoC over PCIe.
#. Transfer packets received from network interface to remote host over PCIe and
vice-versa.
OCTEON TX2 packet flow
----------------------

View File

@ -121,6 +121,8 @@ extern int otx2_logtype_dpi;
#define PCI_DEVID_OCTEONTX2_RVU_CPT_VF 0xA0FE
#define PCI_DEVID_OCTEONTX2_RVU_AF_VF 0xA0f8
#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
#define PCI_DEVID_OCTEONTX2_RVU_SDP_PF 0xA0f6
#define PCI_DEVID_OCTEONTX2_RVU_SDP_VF 0xA0f7
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX_95XX 0xB200

View File

@ -885,6 +885,7 @@ otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
case PCI_DEVID_OCTEONTX2_RVU_VF:
case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
dev->hwcap |= OTX2_HWCAP_F_VF;
break;
}

View File

@ -19,6 +19,10 @@
#define otx2_dev_is_lbk(dev) ((dev->hwcap & OTX2_HWCAP_F_VF) && \
(dev->tx_chan_base < 0x700))
#define otx2_dev_revid(dev) (dev->hwcap & 0xFF)
#define otx2_dev_is_sdp(dev) (dev->sdp_link)
#define otx2_dev_is_vf_or_sdp(dev) \
(otx2_dev_is_vf(dev) || otx2_dev_is_sdp(dev))
#define otx2_dev_is_A0(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \

View File

@ -144,7 +144,7 @@ otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
{
struct otx2_mbox *mbox = dev->mbox;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
@ -157,7 +157,7 @@ otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
{
struct otx2_mbox *mbox = dev->mbox;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
@ -190,7 +190,7 @@ nix_cgx_start_link_event(struct otx2_eth_dev *dev)
{
struct otx2_mbox *mbox = dev->mbox;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
@ -203,7 +203,7 @@ cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
{
struct otx2_mbox *mbox = dev->mbox;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return 0;
if (en)
@ -219,7 +219,7 @@ nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
{
struct otx2_mbox *mbox = dev->mbox;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
@ -2086,6 +2086,15 @@ otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
return otx2_mbox_process(mbox);
}
static bool
otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
{
if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
return true;
return false;
}
static int
otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
{
@ -2129,6 +2138,10 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
goto error;
}
}
if (otx2_eth_dev_is_sdp(pci_dev))
dev->sdp_link = true;
else
dev->sdp_link = false;
/* Device generic callbacks */
dev->ops = &otx2_dev_ops;
dev->eth_dev = eth_dev;
@ -2416,6 +2429,14 @@ static const struct rte_pci_id pci_nix_map[] = {
RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_AF_VF)
},
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
},
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
},
{
.vendor_id = 0,
},

View File

@ -321,6 +321,7 @@ struct otx2_eth_dev {
uint64_t clk_delta;
bool mc_tbl_set;
struct otx2_nix_mc_filter_tbl mc_fltr_tbl;
bool sdp_link; /* SDP flag */
} __rte_cache_aligned;
struct otx2_eth_txq {

View File

@ -36,6 +36,8 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
req->update_smq = true;
if (otx2_dev_is_sdp(dev))
req->sdp_link = true;
/* FRS HW config should exclude FCS but include NPC VTAG insert size */
req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE;
@ -46,6 +48,8 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
/* Now just update Rx MAXLEN */
req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
req->maxlen = frame_size - RTE_ETHER_CRC_LEN;
if (otx2_dev_is_sdp(dev))
req->sdp_link = true;
rc = otx2_mbox_process(mbox);
if (rc)
@ -98,7 +102,7 @@ nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en)
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct otx2_mbox *mbox = dev->mbox;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return;
if (en)

View File

@ -14,6 +14,9 @@ otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb)
struct nix_bp_cfg_rsp *rsp;
int rc;
if (otx2_dev_is_sdp(dev))
return 0;
if (enb) {
req = otx2_mbox_alloc_msg_nix_bp_enable(mbox);
req->chan_base = 0;
@ -199,7 +202,7 @@ otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct rte_eth_fc_conf fc_conf;
if (otx2_dev_is_lbk(dev))
if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
return 0;
memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));

View File

@ -93,7 +93,7 @@ otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
RTE_SET_USED(wait_to_complete);
if (otx2_dev_is_lbk(dev))
if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_get_linkinfo(mbox);
@ -129,7 +129,7 @@ otx2_nix_dev_set_link_up(struct rte_eth_dev *eth_dev)
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
int rc, i;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
rc = nix_dev_set_link_state(eth_dev, 1);
@ -150,7 +150,7 @@ otx2_nix_dev_set_link_down(struct rte_eth_dev *eth_dev)
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
int i;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
/* Stop tx queues */

View File

@ -15,7 +15,7 @@ otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
struct otx2_mbox *mbox = dev->mbox;
int rc;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
if (otx2_dev_active_vfs(dev))
@ -38,7 +38,7 @@ otx2_cgx_mac_max_entries_get(struct otx2_eth_dev *dev)
struct otx2_mbox *mbox = dev->mbox;
int rc;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_mac_max_entries_get(mbox);
@ -59,7 +59,7 @@ otx2_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
struct cgx_mac_addr_add_rsp *rsp;
int rc;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
if (otx2_dev_active_vfs(dev))
@ -89,7 +89,7 @@ otx2_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
struct cgx_mac_addr_del_req *req;
int rc;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return;
req = otx2_mbox_alloc_msg_cgx_mac_addr_del(mbox);

View File

@ -104,7 +104,7 @@ nix_ptp_config(struct rte_eth_dev *eth_dev, int en)
struct otx2_mbox *mbox = dev->mbox;
uint8_t rc = -EINVAL;
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return rc;
if (en) {
@ -168,7 +168,7 @@ otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
}
/* If we are VF, no further action can be taken */
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return -EINVAL;
if (!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)) {
@ -222,7 +222,7 @@ otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
}
/* If we are VF, nothing else can be done */
if (otx2_dev_is_vf(dev))
if (otx2_dev_is_vf_or_sdp(dev))
return -EINVAL;
dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;

View File

@ -410,6 +410,12 @@ populate_tm_registers(struct otx2_eth_dev *dev,
*regval++ = shaper2regval(&cir) | 1;
req->num_regs++;
}
/* Configure TL4 to send to SDP channel instead of CGX/LBK */
if (otx2_dev_is_sdp(dev)) {
*reg++ = NIX_AF_TL4X_SDP_LINK_CFG(schq);
*regval++ = BIT_ULL(12);
req->num_regs++;
}
rc = send_tm_reqval(mbox, req);
if (rc)
@ -465,9 +471,12 @@ populate_tm_registers(struct otx2_eth_dev *dev,
else
*regval++ = (strict_schedul_prio << 24) | rr_quantum;
req->num_regs++;
*reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq, nix_get_link(dev));
*regval++ = BIT_ULL(12) | nix_get_relchan(dev);
req->num_regs++;
if (!otx2_dev_is_sdp(dev)) {
*reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
nix_get_link(dev));
*regval++ = BIT_ULL(12) | nix_get_relchan(dev);
req->num_regs++;
}
if (pir.rate && pir.burst) {
*reg++ = NIX_AF_TL2X_PIR(schq);
*regval++ = shaper2regval(&pir) | 1;
@ -522,9 +531,6 @@ nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
uint32_t lvl;
int rc = 0;
if (nix_get_link(dev) == 13)
return -EPERM;
for (lvl = 0; lvl < (uint32_t)dev->otx2_tm_root_lvl + 1; lvl++) {
TAILQ_FOREACH(tm_node, &dev->node_list, node) {
if (tm_node->hw_lvl_id == lvl) {