net/nfp: add flower ctrl VNIC Rx/Tx
Adds the Rx and Tx function for the ctrl VNIC. The logic is mostly identical to the normal Rx and Tx functionality of the NFP PMD. Make use of the ctrl VNIC service logic to service the ctrl vNIC Rx path. Signed-off-by: Chaoyong He <chaoyong.he@corigine.com> Signed-off-by: Heinrich Kuhn <heinrich.kuhn@corigine.com> Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
This commit is contained in:
parent
d981d315ac
commit
a36634e87e
@ -100,6 +100,7 @@ New Features
|
|||||||
|
|
||||||
* Added the support of flower firmware.
|
* Added the support of flower firmware.
|
||||||
* Added the flower service infrastructure.
|
* Added the flower service infrastructure.
|
||||||
|
* Added the control message interactive channels between PMD and firmware.
|
||||||
|
|
||||||
* **Updated NXP dpaa2 driver.**
|
* **Updated NXP dpaa2 driver.**
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include "../nfpcore/nfp_rtsym.h"
|
#include "../nfpcore/nfp_rtsym.h"
|
||||||
#include "../nfpcore/nfp_nsp.h"
|
#include "../nfpcore/nfp_nsp.h"
|
||||||
#include "nfp_flower.h"
|
#include "nfp_flower.h"
|
||||||
|
#include "nfp_flower_ctrl.h"
|
||||||
|
|
||||||
#define CTRL_VNIC_NB_DESC 512
|
#define CTRL_VNIC_NB_DESC 512
|
||||||
#define DEFAULT_FLBUF_SIZE 9216
|
#define DEFAULT_FLBUF_SIZE 9216
|
||||||
@ -438,6 +439,46 @@ nfp_flower_start_ctrl_vnic(struct nfp_net_hw *hw)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nfp_flower_ctrl_vnic_service(void *arg)
|
||||||
|
{
|
||||||
|
struct nfp_app_fw_flower *app_fw_flower = arg;
|
||||||
|
|
||||||
|
nfp_flower_ctrl_vnic_poll(app_fw_flower);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nfp_flower_enable_services(struct nfp_app_fw_flower *app_fw_flower)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
uint32_t service_id;
|
||||||
|
const struct rte_service_spec flower_service = {
|
||||||
|
.name = "flower_ctrl_vnic_service",
|
||||||
|
.callback = nfp_flower_ctrl_vnic_service,
|
||||||
|
.callback_userdata = (void *)app_fw_flower,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Register the flower services */
|
||||||
|
ret = rte_service_component_register(&flower_service, &service_id);
|
||||||
|
if (ret != 0) {
|
||||||
|
PMD_INIT_LOG(ERR, "Could not register %s", flower_service.name);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
PMD_INIT_LOG(INFO, "%s registered", flower_service.name);
|
||||||
|
|
||||||
|
/* Map them to available service cores*/
|
||||||
|
ret = nfp_map_service(service_id);
|
||||||
|
if (ret != 0) {
|
||||||
|
PMD_INIT_LOG(ERR, "Could not map %s", flower_service.name);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
|
nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
|
||||||
{
|
{
|
||||||
@ -519,6 +560,14 @@ nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
|
|||||||
goto ctrl_vnic_cleanup;
|
goto ctrl_vnic_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Start up flower services */
|
||||||
|
ret = nfp_flower_enable_services(app_fw_flower);
|
||||||
|
if (ret != 0) {
|
||||||
|
PMD_INIT_LOG(ERR, "Could not enable flower services");
|
||||||
|
ret = -ESRCH;
|
||||||
|
goto ctrl_vnic_cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ctrl_vnic_cleanup:
|
ctrl_vnic_cleanup:
|
||||||
|
@ -6,6 +6,14 @@
|
|||||||
#ifndef _NFP_FLOWER_H_
|
#ifndef _NFP_FLOWER_H_
|
||||||
#define _NFP_FLOWER_H_
|
#define _NFP_FLOWER_H_
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flower fallback and ctrl path always adds and removes
|
||||||
|
* 8 bytes of prepended data. Tx descriptors must point
|
||||||
|
* to the correct packet data offset after metadata has
|
||||||
|
* been added
|
||||||
|
*/
|
||||||
|
#define FLOWER_PKT_DATA_OFFSET 8
|
||||||
|
|
||||||
/* The flower application's private structure */
|
/* The flower application's private structure */
|
||||||
struct nfp_app_fw_flower {
|
struct nfp_app_fw_flower {
|
||||||
/* Pointer to the PF vNIC */
|
/* Pointer to the PF vNIC */
|
||||||
@ -16,6 +24,12 @@ struct nfp_app_fw_flower {
|
|||||||
|
|
||||||
/* Pointer to the ctrl vNIC */
|
/* Pointer to the ctrl vNIC */
|
||||||
struct nfp_net_hw *ctrl_hw;
|
struct nfp_net_hw *ctrl_hw;
|
||||||
|
|
||||||
|
/* Ctrl vNIC Rx counter */
|
||||||
|
uint64_t ctrl_vnic_rx_count;
|
||||||
|
|
||||||
|
/* Ctrl vNIC Tx counter */
|
||||||
|
uint64_t ctrl_vnic_tx_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
|
int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
|
||||||
|
250
drivers/net/nfp/flower/nfp_flower_ctrl.c
Normal file
250
drivers/net/nfp/flower/nfp_flower_ctrl.c
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
/* SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
* Copyright (c) 2022 Corigine, Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <rte_common.h>
|
||||||
|
#include <ethdev_pci.h>
|
||||||
|
|
||||||
|
#include "../nfp_common.h"
|
||||||
|
#include "../nfp_logs.h"
|
||||||
|
#include "../nfp_ctrl.h"
|
||||||
|
#include "../nfp_rxtx.h"
|
||||||
|
#include "nfp_flower.h"
|
||||||
|
#include "nfp_flower_ctrl.h"
|
||||||
|
|
||||||
|
#define MAX_PKT_BURST 32
|
||||||
|
|
||||||
|
static uint16_t
|
||||||
|
nfp_flower_ctrl_vnic_recv(void *rx_queue,
|
||||||
|
struct rte_mbuf **rx_pkts,
|
||||||
|
uint16_t nb_pkts)
|
||||||
|
{
|
||||||
|
uint64_t dma_addr;
|
||||||
|
uint16_t avail = 0;
|
||||||
|
struct rte_mbuf *mb;
|
||||||
|
uint16_t nb_hold = 0;
|
||||||
|
struct nfp_net_hw *hw;
|
||||||
|
struct nfp_net_rxq *rxq;
|
||||||
|
struct rte_mbuf *new_mb;
|
||||||
|
struct nfp_net_rx_buff *rxb;
|
||||||
|
struct nfp_net_rx_desc *rxds;
|
||||||
|
|
||||||
|
rxq = rx_queue;
|
||||||
|
if (unlikely(rxq == NULL)) {
|
||||||
|
/*
|
||||||
|
* DPDK just checks the queue is lower than max queues
|
||||||
|
* enabled. But the queue needs to be configured
|
||||||
|
*/
|
||||||
|
PMD_RX_LOG(ERR, "RX Bad queue");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
hw = rxq->hw;
|
||||||
|
while (avail < nb_pkts) {
|
||||||
|
rxb = &rxq->rxbufs[rxq->rd_p];
|
||||||
|
if (unlikely(rxb == NULL)) {
|
||||||
|
PMD_RX_LOG(ERR, "rxb does not exist!");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
rxds = &rxq->rxds[rxq->rd_p];
|
||||||
|
if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Memory barrier to ensure that we won't do other
|
||||||
|
* reads before the DD bit.
|
||||||
|
*/
|
||||||
|
rte_rmb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We got a packet. Let's alloc a new mbuf for refilling the
|
||||||
|
* free descriptor ring as soon as possible
|
||||||
|
*/
|
||||||
|
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
|
||||||
|
if (unlikely(new_mb == NULL)) {
|
||||||
|
PMD_RX_LOG(ERR,
|
||||||
|
"RX mbuf alloc failed port_id=%u queue_id=%u",
|
||||||
|
rxq->port_id, (unsigned int)rxq->qidx);
|
||||||
|
nfp_net_mbuf_alloc_failed(rxq);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
nb_hold++;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Grab the mbuf and refill the descriptor with the
|
||||||
|
* previously allocated mbuf
|
||||||
|
*/
|
||||||
|
mb = rxb->mbuf;
|
||||||
|
rxb->mbuf = new_mb;
|
||||||
|
|
||||||
|
/* Size of this segment */
|
||||||
|
mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
|
||||||
|
/* Size of the whole packet. We just support 1 segment */
|
||||||
|
mb->pkt_len = mb->data_len;
|
||||||
|
|
||||||
|
if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) {
|
||||||
|
/*
|
||||||
|
* This should not happen and the user has the
|
||||||
|
* responsibility of avoiding it. But we have
|
||||||
|
* to give some info about the error
|
||||||
|
*/
|
||||||
|
RTE_LOG_DP(ERR, PMD,
|
||||||
|
"mbuf overflow likely due to the RX offset.\n"
|
||||||
|
"\t\tYour mbuf size should have extra space for"
|
||||||
|
" RX offset=%u bytes.\n"
|
||||||
|
"\t\tCurrently you just have %u bytes available"
|
||||||
|
" but the received packet is %u bytes long",
|
||||||
|
hw->rx_offset,
|
||||||
|
rxq->mbuf_size - hw->rx_offset,
|
||||||
|
mb->data_len);
|
||||||
|
rte_pktmbuf_free(mb);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Filling the received mbuf with packet info */
|
||||||
|
if (hw->rx_offset)
|
||||||
|
mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
|
||||||
|
else
|
||||||
|
mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds);
|
||||||
|
|
||||||
|
/* No scatter mode supported */
|
||||||
|
mb->nb_segs = 1;
|
||||||
|
mb->next = NULL;
|
||||||
|
mb->port = rxq->port_id;
|
||||||
|
|
||||||
|
rx_pkts[avail++] = mb;
|
||||||
|
|
||||||
|
/* Now resetting and updating the descriptor */
|
||||||
|
rxds->vals[0] = 0;
|
||||||
|
rxds->vals[1] = 0;
|
||||||
|
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
|
||||||
|
rxds->fld.dd = 0;
|
||||||
|
rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
|
||||||
|
rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
|
||||||
|
|
||||||
|
rxq->rd_p++;
|
||||||
|
if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
|
||||||
|
rxq->rd_p = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nb_hold == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nb_hold += rxq->nb_rx_hold;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FL descriptors needs to be written before incrementing the
|
||||||
|
* FL queue WR pointer
|
||||||
|
*/
|
||||||
|
rte_wmb();
|
||||||
|
if (nb_hold >= rxq->rx_free_thresh) {
|
||||||
|
PMD_RX_LOG(DEBUG, "port=%hu queue=%d nb_hold=%hu avail=%hu",
|
||||||
|
rxq->port_id, rxq->qidx, nb_hold, avail);
|
||||||
|
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
|
||||||
|
nb_hold = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
rxq->nb_rx_hold = nb_hold;
|
||||||
|
|
||||||
|
return avail;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint16_t
|
||||||
|
nfp_flower_ctrl_vnic_xmit(struct nfp_app_fw_flower *app_fw_flower,
|
||||||
|
struct rte_mbuf *mbuf)
|
||||||
|
{
|
||||||
|
uint16_t cnt = 0;
|
||||||
|
uint64_t dma_addr;
|
||||||
|
uint32_t free_descs;
|
||||||
|
struct rte_mbuf **lmbuf;
|
||||||
|
struct nfp_net_txq *txq;
|
||||||
|
struct nfp_net_hw *ctrl_hw;
|
||||||
|
struct rte_eth_dev *ctrl_dev;
|
||||||
|
struct nfp_net_nfd3_tx_desc *txds;
|
||||||
|
|
||||||
|
ctrl_hw = app_fw_flower->ctrl_hw;
|
||||||
|
ctrl_dev = ctrl_hw->eth_dev;
|
||||||
|
|
||||||
|
/* Flower ctrl vNIC only has a single tx queue */
|
||||||
|
txq = ctrl_dev->data->tx_queues[0];
|
||||||
|
if (unlikely(txq == NULL)) {
|
||||||
|
/*
|
||||||
|
* DPDK just checks the queue is lower than max queues
|
||||||
|
* enabled. But the queue needs to be configured
|
||||||
|
*/
|
||||||
|
PMD_TX_LOG(ERR, "ctrl dev TX Bad queue");
|
||||||
|
goto xmit_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
txds = &txq->txds[txq->wr_p];
|
||||||
|
txds->vals[0] = 0;
|
||||||
|
txds->vals[1] = 0;
|
||||||
|
txds->vals[2] = 0;
|
||||||
|
txds->vals[3] = 0;
|
||||||
|
|
||||||
|
if (nfp_net_nfd3_txq_full(txq))
|
||||||
|
nfp_net_tx_free_bufs(txq);
|
||||||
|
|
||||||
|
free_descs = nfp_net_nfd3_free_tx_desc(txq);
|
||||||
|
if (unlikely(free_descs == 0)) {
|
||||||
|
PMD_TX_LOG(ERR, "ctrl dev no free descs");
|
||||||
|
goto xmit_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
lmbuf = &txq->txbufs[txq->wr_p].mbuf;
|
||||||
|
RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
|
||||||
|
if (*lmbuf)
|
||||||
|
rte_pktmbuf_free_seg(*lmbuf);
|
||||||
|
|
||||||
|
*lmbuf = mbuf;
|
||||||
|
dma_addr = rte_mbuf_data_iova(mbuf);
|
||||||
|
|
||||||
|
txds->data_len = mbuf->pkt_len;
|
||||||
|
txds->dma_len = txds->data_len;
|
||||||
|
txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
|
||||||
|
txds->dma_addr_lo = (dma_addr & 0xffffffff);
|
||||||
|
txds->offset_eop = FLOWER_PKT_DATA_OFFSET | PCIE_DESC_TX_EOP;
|
||||||
|
|
||||||
|
txq->wr_p++;
|
||||||
|
if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
|
||||||
|
txq->wr_p = 0;
|
||||||
|
|
||||||
|
cnt++;
|
||||||
|
app_fw_flower->ctrl_vnic_tx_count++;
|
||||||
|
|
||||||
|
xmit_end:
|
||||||
|
rte_wmb();
|
||||||
|
nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, 1);
|
||||||
|
|
||||||
|
return cnt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower)
|
||||||
|
{
|
||||||
|
uint16_t i;
|
||||||
|
uint16_t count;
|
||||||
|
struct nfp_net_rxq *rxq;
|
||||||
|
struct nfp_net_hw *ctrl_hw;
|
||||||
|
struct rte_eth_dev *ctrl_eth_dev;
|
||||||
|
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
||||||
|
|
||||||
|
ctrl_hw = app_fw_flower->ctrl_hw;
|
||||||
|
ctrl_eth_dev = ctrl_hw->eth_dev;
|
||||||
|
|
||||||
|
/* ctrl vNIC only has a single Rx queue */
|
||||||
|
rxq = ctrl_eth_dev->data->rx_queues[0];
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
count = nfp_flower_ctrl_vnic_recv(rxq, pkts_burst, MAX_PKT_BURST);
|
||||||
|
if (count != 0) {
|
||||||
|
app_fw_flower->ctrl_vnic_rx_count += count;
|
||||||
|
/* Process cmsgs here, only free for now */
|
||||||
|
for (i = 0; i < count; i++)
|
||||||
|
rte_pktmbuf_free(pkts_burst[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
13
drivers/net/nfp/flower/nfp_flower_ctrl.h
Normal file
13
drivers/net/nfp/flower/nfp_flower_ctrl.h
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
/* SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
* Copyright (c) 2022 Corigine, Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _NFP_FLOWER_CTRL_H_
|
||||||
|
#define _NFP_FLOWER_CTRL_H_
|
||||||
|
|
||||||
|
void nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower);
|
||||||
|
uint16_t nfp_flower_ctrl_vnic_xmit(struct nfp_app_fw_flower *app_fw_flower,
|
||||||
|
struct rte_mbuf *mbuf);
|
||||||
|
|
||||||
|
#endif /* _NFP_FLOWER_CTRL_H_ */
|
@ -7,6 +7,7 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
|
|||||||
endif
|
endif
|
||||||
sources = files(
|
sources = files(
|
||||||
'flower/nfp_flower.c',
|
'flower/nfp_flower.c',
|
||||||
|
'flower/nfp_flower_ctrl.c',
|
||||||
'nfpcore/nfp_cpp_pcie_ops.c',
|
'nfpcore/nfp_cpp_pcie_ops.c',
|
||||||
'nfpcore/nfp_nsp.c',
|
'nfpcore/nfp_nsp.c',
|
||||||
'nfpcore/nfp_cppcore.c',
|
'nfpcore/nfp_cppcore.c',
|
||||||
|
Loading…
Reference in New Issue
Block a user