net/bnxt: support port representor data path
Added code to support Tx/Rx from a VF representor port. The VF-reps use the RX/TX rings of the Trusted VF/PF. For each VF-rep, the Trusted VF/PF driver issues a VFR_ALLOC FW cmd that returns "cfa_code" and "cfa_action" values. The FW sets up the filter tables in such a way that VF traffic by default (in absence of other rules) gets punted to the parent function i.e. either the Trusted VF or the PF. The cfa_code value in the RX-compl informs the driver of the source VF. For traffic being transmitted from the VF-rep, the TX BD is tagged with a cfa_action value that informs the HW to punt it to the corresponding VF. Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com> Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com> Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
parent
322bd6e702
commit
6dc83230b4
@ -495,6 +495,7 @@ struct bnxt_mark_info {
|
||||
|
||||
struct bnxt_rep_info {
|
||||
struct rte_eth_dev *vfr_eth_dev;
|
||||
pthread_mutex_t vfr_lock;
|
||||
};
|
||||
|
||||
/* address space location of register */
|
||||
@ -755,7 +756,8 @@ struct bnxt {
|
||||
uint16_t fw_reset_max_msecs;
|
||||
uint16_t switch_domain_id;
|
||||
uint16_t num_reps;
|
||||
struct bnxt_rep_info rep_info[BNXT_MAX_VF_REPS];
|
||||
struct bnxt_rep_info *rep_info;
|
||||
uint16_t *cfa_code_map;
|
||||
/* Struct to hold adapter error recovery related info */
|
||||
struct bnxt_error_recovery_info *recovery_info;
|
||||
#define BNXT_MARK_TABLE_SZ (sizeof(struct bnxt_mark_info) * 64 * 1024)
|
||||
@ -780,12 +782,28 @@ struct bnxt {
|
||||
* Structure to store private data for each VF representor instance
|
||||
*/
|
||||
struct bnxt_vf_representor {
|
||||
uint16_t switch_domain_id;
|
||||
uint16_t vf_id;
|
||||
uint16_t switch_domain_id;
|
||||
uint16_t vf_id;
|
||||
uint16_t tx_cfa_action;
|
||||
uint16_t rx_cfa_code;
|
||||
/* Private data store of associated PF/Trusted VF */
|
||||
struct bnxt *parent_priv;
|
||||
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t dflt_mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
struct rte_eth_dev *parent_dev;
|
||||
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t dflt_mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
struct bnxt_rx_queue **rx_queues;
|
||||
unsigned int rx_nr_rings;
|
||||
unsigned int tx_nr_rings;
|
||||
uint64_t tx_pkts[BNXT_MAX_VF_REP_RINGS];
|
||||
uint64_t tx_bytes[BNXT_MAX_VF_REP_RINGS];
|
||||
uint64_t rx_pkts[BNXT_MAX_VF_REP_RINGS];
|
||||
uint64_t rx_bytes[BNXT_MAX_VF_REP_RINGS];
|
||||
uint64_t rx_drop_pkts[BNXT_MAX_VF_REP_RINGS];
|
||||
uint64_t rx_drop_bytes[BNXT_MAX_VF_REP_RINGS];
|
||||
};
|
||||
|
||||
struct bnxt_vf_rep_tx_queue {
|
||||
struct bnxt_tx_queue *txq;
|
||||
struct bnxt_vf_representor *bp;
|
||||
};
|
||||
|
||||
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
|
||||
|
@ -136,6 +136,7 @@ static void bnxt_cancel_fw_health_check(struct bnxt *bp);
|
||||
static int bnxt_restore_vlan_filters(struct bnxt *bp);
|
||||
static void bnxt_dev_recover(void *arg);
|
||||
static void bnxt_free_error_recovery_info(struct bnxt *bp);
|
||||
static void bnxt_free_rep_info(struct bnxt *bp);
|
||||
|
||||
int is_bnxt_in_error(struct bnxt *bp)
|
||||
{
|
||||
@ -5242,7 +5243,7 @@ bnxt_init_locks(struct bnxt *bp)
|
||||
|
||||
static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
rc = bnxt_init_fw(bp);
|
||||
if (rc)
|
||||
@ -5641,6 +5642,8 @@ bnxt_uninit_locks(struct bnxt *bp)
|
||||
{
|
||||
pthread_mutex_destroy(&bp->flow_lock);
|
||||
pthread_mutex_destroy(&bp->def_cp_lock);
|
||||
if (bp->rep_info)
|
||||
pthread_mutex_destroy(&bp->rep_info->vfr_lock);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -5663,6 +5666,7 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
|
||||
|
||||
bnxt_uninit_locks(bp);
|
||||
bnxt_free_flow_stats_info(bp);
|
||||
bnxt_free_rep_info(bp);
|
||||
rte_free(bp->ptp_cfg);
|
||||
bp->ptp_cfg = NULL;
|
||||
return rc;
|
||||
@ -5702,56 +5706,73 @@ static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
struct rte_pci_device *pci_dev)
|
||||
static void bnxt_free_rep_info(struct bnxt *bp)
|
||||
{
|
||||
rte_free(bp->rep_info);
|
||||
bp->rep_info = NULL;
|
||||
rte_free(bp->cfa_code_map);
|
||||
bp->cfa_code_map = NULL;
|
||||
}
|
||||
|
||||
static int bnxt_init_rep_info(struct bnxt *bp)
|
||||
{
|
||||
int i = 0, rc;
|
||||
|
||||
if (bp->rep_info)
|
||||
return 0;
|
||||
|
||||
bp->rep_info = rte_zmalloc("bnxt_rep_info",
|
||||
sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
|
||||
0);
|
||||
if (!bp->rep_info) {
|
||||
PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
|
||||
sizeof(*bp->cfa_code_map) *
|
||||
BNXT_MAX_CFA_CODE, 0);
|
||||
if (!bp->cfa_code_map) {
|
||||
PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
|
||||
bnxt_free_rep_info(bp);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
|
||||
bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
|
||||
|
||||
rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
|
||||
if (rc) {
|
||||
PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
|
||||
bnxt_free_rep_info(bp);
|
||||
return rc;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
|
||||
struct rte_eth_devargs eth_da,
|
||||
struct rte_eth_dev *backing_eth_dev)
|
||||
{
|
||||
struct rte_eth_dev *vf_rep_eth_dev;
|
||||
char name[RTE_ETH_NAME_MAX_LEN];
|
||||
struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
|
||||
struct rte_eth_dev *backing_eth_dev, *vf_rep_eth_dev;
|
||||
struct bnxt *backing_bp;
|
||||
uint16_t num_rep;
|
||||
int i, ret = 0;
|
||||
struct bnxt *backing_bp;
|
||||
|
||||
if (pci_dev->device.devargs) {
|
||||
ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
|
||||
ð_da);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
num_rep = eth_da.nb_representor_ports;
|
||||
PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
|
||||
num_rep);
|
||||
|
||||
/* We could come here after first level of probe is already invoked
|
||||
* as part of an application bringup(OVS-DPDK vswitchd), so first check
|
||||
* for already allocated eth_dev for the backing device (PF/Trusted VF)
|
||||
*/
|
||||
backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
|
||||
if (backing_eth_dev == NULL) {
|
||||
ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
|
||||
sizeof(struct bnxt),
|
||||
eth_dev_pci_specific_init, pci_dev,
|
||||
bnxt_dev_init, NULL);
|
||||
|
||||
if (ret || !num_rep)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (num_rep > BNXT_MAX_VF_REPS) {
|
||||
PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
|
||||
eth_da.nb_representor_ports, BNXT_MAX_VF_REPS);
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
num_rep, BNXT_MAX_VF_REPS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* probe representor ports now */
|
||||
if (!backing_eth_dev)
|
||||
backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
|
||||
if (backing_eth_dev == NULL) {
|
||||
ret = -ENODEV;
|
||||
return ret;
|
||||
if (num_rep > RTE_MAX_ETHPORTS) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"nb_representor_ports = %d > %d MAX ETHPORTS\n",
|
||||
num_rep, RTE_MAX_ETHPORTS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
backing_bp = backing_eth_dev->data->dev_private;
|
||||
|
||||
if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
|
||||
@ -5760,14 +5781,17 @@ static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
/* Returning an error is not an option.
|
||||
* Applications are not handling this correctly
|
||||
*/
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < eth_da.nb_representor_ports; i++) {
|
||||
if (bnxt_init_rep_info(backing_bp))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < num_rep; i++) {
|
||||
struct bnxt_vf_representor representor = {
|
||||
.vf_id = eth_da.representor_ports[i],
|
||||
.switch_domain_id = backing_bp->switch_domain_id,
|
||||
.parent_priv = backing_bp
|
||||
.parent_dev = backing_eth_dev
|
||||
};
|
||||
|
||||
if (representor.vf_id >= BNXT_MAX_VF_REPS) {
|
||||
@ -5808,6 +5832,48 @@ static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
struct rte_pci_device *pci_dev)
|
||||
{
|
||||
struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
|
||||
struct rte_eth_dev *backing_eth_dev;
|
||||
uint16_t num_rep;
|
||||
int ret = 0;
|
||||
|
||||
if (pci_dev->device.devargs) {
|
||||
ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
|
||||
ð_da);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
num_rep = eth_da.nb_representor_ports;
|
||||
PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
|
||||
num_rep);
|
||||
|
||||
/* We could come here after first level of probe is already invoked
|
||||
* as part of an application bringup(OVS-DPDK vswitchd), so first check
|
||||
* for already allocated eth_dev for the backing device (PF/Trusted VF)
|
||||
*/
|
||||
backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
|
||||
if (backing_eth_dev == NULL) {
|
||||
ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
|
||||
sizeof(struct bnxt),
|
||||
eth_dev_pci_specific_init, pci_dev,
|
||||
bnxt_dev_init, NULL);
|
||||
|
||||
if (ret || !num_rep)
|
||||
return ret;
|
||||
|
||||
backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
|
||||
}
|
||||
|
||||
/* probe representor ports now */
|
||||
ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
|
||||
{
|
||||
struct rte_eth_dev *eth_dev;
|
||||
|
@ -6,6 +6,11 @@
|
||||
#include "bnxt.h"
|
||||
#include "bnxt_ring.h"
|
||||
#include "bnxt_reps.h"
|
||||
#include "bnxt_rxq.h"
|
||||
#include "bnxt_rxr.h"
|
||||
#include "bnxt_txq.h"
|
||||
#include "bnxt_txr.h"
|
||||
#include "bnxt_hwrm.h"
|
||||
#include "hsi_struct_def_dpdk.h"
|
||||
|
||||
static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
|
||||
@ -13,25 +18,128 @@ static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
|
||||
.dev_configure = bnxt_vf_rep_dev_configure_op,
|
||||
.dev_start = bnxt_vf_rep_dev_start_op,
|
||||
.rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
|
||||
.rx_queue_release = bnxt_vf_rep_rx_queue_release_op,
|
||||
.tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
|
||||
.tx_queue_release = bnxt_vf_rep_tx_queue_release_op,
|
||||
.link_update = bnxt_vf_rep_link_update_op,
|
||||
.dev_close = bnxt_vf_rep_dev_close_op,
|
||||
.dev_stop = bnxt_vf_rep_dev_stop_op
|
||||
.dev_stop = bnxt_vf_rep_dev_stop_op,
|
||||
.stats_get = bnxt_vf_rep_stats_get_op,
|
||||
.stats_reset = bnxt_vf_rep_stats_reset_op,
|
||||
};
|
||||
|
||||
static uint16_t
|
||||
bnxt_vf_rep_rx_burst(__rte_unused void *rx_queue,
|
||||
__rte_unused struct rte_mbuf **rx_pkts,
|
||||
__rte_unused uint16_t nb_pkts)
|
||||
uint16_t
|
||||
bnxt_vfr_recv(struct bnxt *bp, uint16_t cfa_code, uint16_t queue_id,
|
||||
struct rte_mbuf *mbuf)
|
||||
{
|
||||
struct bnxt_sw_rx_bd *prod_rx_buf;
|
||||
struct bnxt_rx_ring_info *rep_rxr;
|
||||
struct bnxt_rx_queue *rep_rxq;
|
||||
struct rte_eth_dev *vfr_eth_dev;
|
||||
struct bnxt_vf_representor *vfr_bp;
|
||||
uint16_t vf_id;
|
||||
uint16_t mask;
|
||||
uint8_t que;
|
||||
|
||||
vf_id = bp->cfa_code_map[cfa_code];
|
||||
/* cfa_code is invalid OR vf_id > MAX REP. Assume normal Rx */
|
||||
if (vf_id == BNXT_VF_IDX_INVALID || vf_id > BNXT_MAX_VF_REPS)
|
||||
return 1;
|
||||
vfr_eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
|
||||
if (!vfr_eth_dev)
|
||||
return 1;
|
||||
vfr_bp = vfr_eth_dev->data->dev_private;
|
||||
if (vfr_bp->rx_cfa_code != cfa_code) {
|
||||
/* cfa_code not meant for this VF rep!!?? */
|
||||
return 1;
|
||||
}
|
||||
/* If rxq_id happens to be > max rep_queue, use rxq0 */
|
||||
que = queue_id < BNXT_MAX_VF_REP_RINGS ? queue_id : 0;
|
||||
rep_rxq = vfr_bp->rx_queues[que];
|
||||
rep_rxr = rep_rxq->rx_ring;
|
||||
mask = rep_rxr->rx_ring_struct->ring_mask;
|
||||
|
||||
/* Put this mbuf on the RxQ of the Representor */
|
||||
prod_rx_buf =
|
||||
&rep_rxr->rx_buf_ring[rep_rxr->rx_prod++ & mask];
|
||||
if (!prod_rx_buf->mbuf) {
|
||||
prod_rx_buf->mbuf = mbuf;
|
||||
vfr_bp->rx_bytes[que] += mbuf->pkt_len;
|
||||
vfr_bp->rx_pkts[que]++;
|
||||
} else {
|
||||
vfr_bp->rx_drop_bytes[que] += mbuf->pkt_len;
|
||||
vfr_bp->rx_drop_pkts[que]++;
|
||||
rte_free(mbuf); /* Representor Rx ring full, drop pkt */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
bnxt_vf_rep_tx_burst(__rte_unused void *tx_queue,
|
||||
__rte_unused struct rte_mbuf **tx_pkts,
|
||||
bnxt_vf_rep_rx_burst(void *rx_queue,
|
||||
struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
struct bnxt_rx_queue *rxq = rx_queue;
|
||||
struct bnxt_sw_rx_bd *cons_rx_buf;
|
||||
struct bnxt_rx_ring_info *rxr;
|
||||
uint16_t nb_rx_pkts = 0;
|
||||
uint16_t mask, i;
|
||||
|
||||
if (!rxq)
|
||||
return 0;
|
||||
|
||||
rxr = rxq->rx_ring;
|
||||
mask = rxr->rx_ring_struct->ring_mask;
|
||||
for (i = 0; i < nb_pkts; i++) {
|
||||
cons_rx_buf = &rxr->rx_buf_ring[rxr->rx_cons & mask];
|
||||
if (!cons_rx_buf->mbuf)
|
||||
return nb_rx_pkts;
|
||||
rx_pkts[nb_rx_pkts] = cons_rx_buf->mbuf;
|
||||
rx_pkts[nb_rx_pkts]->port = rxq->port_id;
|
||||
cons_rx_buf->mbuf = NULL;
|
||||
nb_rx_pkts++;
|
||||
rxr->rx_cons++;
|
||||
}
|
||||
|
||||
return nb_rx_pkts;
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
bnxt_vf_rep_tx_burst(void *tx_queue,
|
||||
struct rte_mbuf **tx_pkts,
|
||||
__rte_unused uint16_t nb_pkts)
|
||||
{
|
||||
struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
|
||||
struct bnxt_tx_queue *ptxq;
|
||||
struct bnxt *parent;
|
||||
struct bnxt_vf_representor *vf_rep_bp;
|
||||
int qid;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
if (!vfr_txq)
|
||||
return 0;
|
||||
|
||||
qid = vfr_txq->txq->queue_id;
|
||||
vf_rep_bp = vfr_txq->bp;
|
||||
parent = vf_rep_bp->parent_dev->data->dev_private;
|
||||
pthread_mutex_lock(&parent->rep_info->vfr_lock);
|
||||
ptxq = parent->tx_queues[qid];
|
||||
|
||||
ptxq->tx_cfa_action = vf_rep_bp->tx_cfa_action;
|
||||
|
||||
for (i = 0; i < nb_pkts; i++) {
|
||||
vf_rep_bp->tx_bytes[qid] += tx_pkts[i]->pkt_len;
|
||||
vf_rep_bp->tx_pkts[qid]++;
|
||||
}
|
||||
|
||||
rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts);
|
||||
ptxq->tx_cfa_action = 0;
|
||||
pthread_mutex_unlock(&parent->rep_info->vfr_lock);
|
||||
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -45,7 +153,7 @@ int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
|
||||
|
||||
vf_rep_bp->vf_id = rep_params->vf_id;
|
||||
vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
|
||||
vf_rep_bp->parent_priv = rep_params->parent_priv;
|
||||
vf_rep_bp->parent_dev = rep_params->parent_dev;
|
||||
|
||||
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
|
||||
eth_dev->data->representor_id = rep_params->vf_id;
|
||||
@ -63,7 +171,7 @@ int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
|
||||
eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
|
||||
eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
|
||||
/* Link state. Inherited from PF or trusted VF */
|
||||
parent_bp = vf_rep_bp->parent_priv;
|
||||
parent_bp = vf_rep_bp->parent_dev->data->dev_private;
|
||||
link = &parent_bp->eth_dev->data->dev_link;
|
||||
|
||||
eth_dev->data->dev_link.link_speed = link->link_speed;
|
||||
@ -94,16 +202,18 @@ int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
|
||||
uint16_t vf_id;
|
||||
|
||||
eth_dev->data->mac_addrs = NULL;
|
||||
eth_dev->dev_ops = NULL;
|
||||
|
||||
parent_bp = rep->parent_priv;
|
||||
if (parent_bp) {
|
||||
parent_bp->num_reps--;
|
||||
vf_id = rep->vf_id;
|
||||
parent_bp = rep->parent_dev->data->dev_private;
|
||||
if (!parent_bp)
|
||||
return 0;
|
||||
|
||||
parent_bp->num_reps--;
|
||||
vf_id = rep->vf_id;
|
||||
if (parent_bp->rep_info)
|
||||
memset(&parent_bp->rep_info[vf_id], 0,
|
||||
sizeof(parent_bp->rep_info[vf_id]));
|
||||
/* mark that this representor has been freed */
|
||||
}
|
||||
eth_dev->dev_ops = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -115,7 +225,7 @@ int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
|
||||
struct rte_eth_link *link;
|
||||
int rc;
|
||||
|
||||
parent_bp = rep->parent_priv;
|
||||
parent_bp = rep->parent_dev->data->dev_private;
|
||||
rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
|
||||
|
||||
/* Link state. Inherited from PF or trusted VF */
|
||||
@ -130,15 +240,134 @@ int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
|
||||
static int bnxt_vfr_alloc(struct bnxt_vf_representor *vfr)
|
||||
{
|
||||
bnxt_vf_rep_link_update_op(eth_dev, 1);
|
||||
int rc = 0;
|
||||
struct bnxt *parent_bp;
|
||||
|
||||
return 0;
|
||||
if (!vfr || !vfr->parent_dev) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"No memory allocated for representor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
parent_bp = vfr->parent_dev->data->dev_private;
|
||||
|
||||
/* Check if representor has been already allocated in FW */
|
||||
if (vfr->tx_cfa_action && vfr->rx_cfa_code)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Alloc VF rep rules in CFA after default VNIC is created.
|
||||
* Otherwise the FW will create the VF-rep rules with
|
||||
* default drop action.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is where we need to replace invoking an HWRM cmd
|
||||
* with the new TFLIB ULP API to do more/less the same job
|
||||
rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp,
|
||||
vfr->vf_id,
|
||||
&vfr->tx_cfa_action,
|
||||
&vfr->rx_cfa_code);
|
||||
*/
|
||||
if (!rc) {
|
||||
parent_bp->cfa_code_map[vfr->rx_cfa_code] = vfr->vf_id;
|
||||
PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
|
||||
vfr->vf_id);
|
||||
} else {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Failed to alloc representor %d in FW\n",
|
||||
vfr->vf_id);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnxt_vf_rep_dev_stop_op(__rte_unused struct rte_eth_dev *eth_dev)
|
||||
static void bnxt_vf_rep_free_rx_mbufs(struct bnxt_vf_representor *rep_bp)
|
||||
{
|
||||
struct bnxt_rx_queue *rxq;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < rep_bp->rx_nr_rings; i++) {
|
||||
rxq = rep_bp->rx_queues[i];
|
||||
bnxt_rx_queue_release_mbufs(rxq);
|
||||
}
|
||||
}
|
||||
|
||||
int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
|
||||
int rc;
|
||||
|
||||
rc = bnxt_vfr_alloc(rep_bp);
|
||||
|
||||
if (!rc) {
|
||||
eth_dev->rx_pkt_burst = &bnxt_vf_rep_rx_burst;
|
||||
eth_dev->tx_pkt_burst = &bnxt_vf_rep_tx_burst;
|
||||
|
||||
bnxt_vf_rep_link_update_op(eth_dev, 1);
|
||||
} else {
|
||||
eth_dev->data->dev_link.link_status = 0;
|
||||
bnxt_vf_rep_free_rx_mbufs(rep_bp);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
|
||||
{
|
||||
int rc = 0;
|
||||
struct bnxt *parent_bp;
|
||||
|
||||
if (!vfr || !vfr->parent_dev) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"No memory allocated for representor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
parent_bp = vfr->parent_dev->data->dev_private;
|
||||
|
||||
/* Check if representor has been already freed in FW */
|
||||
if (!vfr->tx_cfa_action && !vfr->rx_cfa_code)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This is where we need to replace invoking an HWRM cmd
|
||||
* with the new TFLIB ULP API to do more/less the same job
|
||||
rc = bnxt_hwrm_cfa_vfr_free(parent_bp,
|
||||
vfr->vf_id);
|
||||
*/
|
||||
if (rc) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Failed to free representor %d in FW\n",
|
||||
vfr->vf_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
|
||||
PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
|
||||
vfr->vf_id);
|
||||
vfr->tx_cfa_action = 0;
|
||||
vfr->rx_cfa_code = 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct bnxt_vf_representor *vfr_bp = eth_dev->data->dev_private;
|
||||
|
||||
/* Avoid crashes as we are about to free queues */
|
||||
eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
|
||||
eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
|
||||
|
||||
bnxt_vfr_free(vfr_bp);
|
||||
|
||||
if (eth_dev->data->dev_started)
|
||||
eth_dev->data->dev_link.link_status = 0;
|
||||
|
||||
bnxt_vf_rep_free_rx_mbufs(vfr_bp);
|
||||
}
|
||||
|
||||
void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
|
||||
@ -156,7 +385,7 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
|
||||
int rc = 0;
|
||||
|
||||
/* MAC Specifics */
|
||||
parent_bp = rep_bp->parent_priv;
|
||||
parent_bp = rep_bp->parent_dev->data->dev_private;
|
||||
if (!parent_bp) {
|
||||
PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
|
||||
return rc;
|
||||
@ -254,27 +483,225 @@ found:
|
||||
|
||||
int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
|
||||
|
||||
PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
|
||||
rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
|
||||
rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
|
||||
rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_vf_rep_rx_queue_setup_op(__rte_unused struct rte_eth_dev *eth_dev,
|
||||
__rte_unused uint16_t queue_idx,
|
||||
__rte_unused uint16_t nb_desc,
|
||||
__rte_unused unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_rxconf *
|
||||
rx_conf,
|
||||
__rte_unused struct rte_mempool *mp)
|
||||
int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
uint16_t queue_idx,
|
||||
uint16_t nb_desc,
|
||||
unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_rxconf *rx_conf,
|
||||
__rte_unused struct rte_mempool *mp)
|
||||
{
|
||||
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
|
||||
struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
|
||||
struct bnxt_rx_queue *parent_rxq;
|
||||
struct bnxt_rx_queue *rxq;
|
||||
struct bnxt_sw_rx_bd *buf_ring;
|
||||
int rc = 0;
|
||||
|
||||
if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Cannot create Rx ring %d. %d rings available\n",
|
||||
queue_idx, BNXT_MAX_VF_REP_RINGS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
|
||||
PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
parent_rxq = parent_bp->rx_queues[queue_idx];
|
||||
if (!parent_rxq) {
|
||||
PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nb_desc != parent_rxq->nb_rx_desc) {
|
||||
PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (eth_dev->data->rx_queues) {
|
||||
rxq = eth_dev->data->rx_queues[queue_idx];
|
||||
if (rxq)
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
}
|
||||
|
||||
rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
|
||||
sizeof(struct bnxt_rx_queue),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (!rxq) {
|
||||
PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rxq->nb_rx_desc = nb_desc;
|
||||
|
||||
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
buf_ring = rte_zmalloc_socket("bnxt_rx_vfr_buf_ring",
|
||||
sizeof(struct bnxt_sw_rx_bd) *
|
||||
rxq->rx_ring->rx_ring_struct->ring_size,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (!buf_ring) {
|
||||
PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rxq->rx_ring->rx_buf_ring = buf_ring;
|
||||
rxq->queue_id = queue_idx;
|
||||
rxq->port_id = eth_dev->data->port_id;
|
||||
eth_dev->data->rx_queues[queue_idx] = rxq;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (rxq)
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnxt_vf_rep_rx_queue_release_op(void *rx_queue)
|
||||
{
|
||||
struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
|
||||
|
||||
if (!rxq)
|
||||
return;
|
||||
|
||||
bnxt_rx_queue_release_mbufs(rxq);
|
||||
|
||||
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
|
||||
bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
|
||||
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
|
||||
|
||||
rte_free(rxq);
|
||||
}
|
||||
|
||||
int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
uint16_t queue_idx,
|
||||
uint16_t nb_desc,
|
||||
unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_txconf *tx_conf)
|
||||
{
|
||||
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
|
||||
struct bnxt *parent_bp = rep_bp->parent_dev->data->dev_private;
|
||||
struct bnxt_tx_queue *parent_txq, *txq;
|
||||
struct bnxt_vf_rep_tx_queue *vfr_txq;
|
||||
|
||||
if (queue_idx >= BNXT_MAX_VF_REP_RINGS) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Cannot create Tx rings %d. %d rings available\n",
|
||||
queue_idx, BNXT_MAX_VF_REP_RINGS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
|
||||
PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
parent_txq = parent_bp->tx_queues[queue_idx];
|
||||
if (!parent_txq) {
|
||||
PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nb_desc != parent_txq->nb_tx_desc) {
|
||||
PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (eth_dev->data->tx_queues) {
|
||||
vfr_txq = eth_dev->data->tx_queues[queue_idx];
|
||||
bnxt_vf_rep_tx_queue_release_op(vfr_txq);
|
||||
vfr_txq = NULL;
|
||||
}
|
||||
|
||||
vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
|
||||
sizeof(struct bnxt_vf_rep_tx_queue),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (!vfr_txq) {
|
||||
PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
|
||||
return -ENOMEM;
|
||||
}
|
||||
txq = rte_zmalloc_socket("bnxt_tx_queue",
|
||||
sizeof(struct bnxt_tx_queue),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (!txq) {
|
||||
PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
|
||||
rte_free(vfr_txq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->nb_tx_desc = nb_desc;
|
||||
txq->queue_id = queue_idx;
|
||||
txq->port_id = eth_dev->data->port_id;
|
||||
vfr_txq->txq = txq;
|
||||
vfr_txq->bp = rep_bp;
|
||||
eth_dev->data->tx_queues[queue_idx] = vfr_txq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_vf_rep_tx_queue_setup_op(__rte_unused struct rte_eth_dev *eth_dev,
|
||||
__rte_unused uint16_t queue_idx,
|
||||
__rte_unused uint16_t nb_desc,
|
||||
__rte_unused unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_txconf *
|
||||
tx_conf)
|
||||
void bnxt_vf_rep_tx_queue_release_op(void *tx_queue)
|
||||
{
|
||||
struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
|
||||
|
||||
if (!vfr_txq)
|
||||
return;
|
||||
|
||||
rte_free(vfr_txq->txq);
|
||||
rte_free(vfr_txq);
|
||||
}
|
||||
|
||||
int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
|
||||
struct rte_eth_stats *stats)
|
||||
{
|
||||
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
|
||||
int i;
|
||||
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
|
||||
stats->obytes += rep_bp->tx_bytes[i];
|
||||
stats->opackets += rep_bp->tx_pkts[i];
|
||||
stats->ibytes += rep_bp->rx_bytes[i];
|
||||
stats->ipackets += rep_bp->rx_pkts[i];
|
||||
stats->imissed += rep_bp->rx_drop_pkts[i];
|
||||
|
||||
stats->q_ipackets[i] = rep_bp->rx_pkts[i];
|
||||
stats->q_ibytes[i] = rep_bp->rx_bytes[i];
|
||||
stats->q_opackets[i] = rep_bp->tx_pkts[i];
|
||||
stats->q_obytes[i] = rep_bp->tx_bytes[i];
|
||||
stats->q_errors[i] = rep_bp->rx_drop_pkts[i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BNXT_MAX_VF_REP_RINGS; i++) {
|
||||
rep_bp->tx_pkts[i] = 0;
|
||||
rep_bp->tx_bytes[i] = 0;
|
||||
rep_bp->rx_pkts[i] = 0;
|
||||
rep_bp->rx_bytes[i] = 0;
|
||||
rep_bp->rx_drop_pkts[i] = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -9,6 +9,12 @@
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_ethdev.h>
|
||||
|
||||
#define BNXT_MAX_CFA_CODE 65536
|
||||
#define BNXT_VF_IDX_INVALID 0xffff
|
||||
|
||||
uint16_t
|
||||
bnxt_vfr_recv(struct bnxt *bp, uint16_t cfa_code, uint16_t queue_id,
|
||||
struct rte_mbuf *mbuf);
|
||||
int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params);
|
||||
int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev);
|
||||
int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
|
||||
@ -30,6 +36,11 @@ int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
__rte_unused unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_txconf *
|
||||
tx_conf);
|
||||
void bnxt_vf_rep_rx_queue_release_op(void *rx_queue);
|
||||
void bnxt_vf_rep_tx_queue_release_op(void *tx_queue);
|
||||
void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev);
|
||||
void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev);
|
||||
int bnxt_vf_rep_stats_get_op(struct rte_eth_dev *eth_dev,
|
||||
struct rte_eth_stats *stats);
|
||||
int bnxt_vf_rep_stats_reset_op(struct rte_eth_dev *eth_dev);
|
||||
#endif /* _BNXT_REPS_H_ */
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <rte_memory.h>
|
||||
|
||||
#include "bnxt.h"
|
||||
#include "bnxt_reps.h"
|
||||
#include "bnxt_ring.h"
|
||||
#include "bnxt_rxr.h"
|
||||
#include "bnxt_rxq.h"
|
||||
@ -539,7 +540,7 @@ void bnxt_set_mark_in_mbuf(struct bnxt *bp,
|
||||
}
|
||||
|
||||
static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
|
||||
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
|
||||
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
|
||||
{
|
||||
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
|
||||
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
|
||||
@ -735,6 +736,20 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
|
||||
rx:
|
||||
*rx_pkt = mbuf;
|
||||
|
||||
if ((BNXT_VF_IS_TRUSTED(rxq->bp) || BNXT_PF(rxq->bp)) &&
|
||||
rxq->bp->cfa_code_map && rxcmp1->cfa_code) {
|
||||
if (!bnxt_vfr_recv(rxq->bp, rxcmp1->cfa_code, rxq->queue_id,
|
||||
mbuf)) {
|
||||
/* Now return an error so that nb_rx_pkts is not
|
||||
* incremented.
|
||||
* This packet was meant to be given to the representor.
|
||||
* So no need to account the packet and give it to
|
||||
* parent Rx burst function.
|
||||
*/
|
||||
rc = -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
next_rx:
|
||||
|
||||
*raw_cons = tmp_raw_cons;
|
||||
@ -751,6 +766,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
uint32_t raw_cons = cpr->cp_raw_cons;
|
||||
uint32_t cons;
|
||||
int nb_rx_pkts = 0;
|
||||
int nb_rep_rx_pkts = 0;
|
||||
struct rx_pkt_cmpl *rxcmp;
|
||||
uint16_t prod = rxr->rx_prod;
|
||||
uint16_t ag_prod = rxr->ag_prod;
|
||||
@ -784,6 +800,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
nb_rx_pkts++;
|
||||
if (rc == -EBUSY) /* partial completion */
|
||||
break;
|
||||
if (rc == -ENODEV) /* completion for representor */
|
||||
nb_rep_rx_pkts++;
|
||||
} else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
|
||||
evt =
|
||||
bnxt_event_hwrm_resp_handler(rxq->bp,
|
||||
@ -802,7 +820,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
}
|
||||
|
||||
cpr->cp_raw_cons = raw_cons;
|
||||
if (!nb_rx_pkts && !evt) {
|
||||
if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
|
||||
/*
|
||||
* For PMD, there is no need to keep on pushing to REARM
|
||||
* the doorbell if there are no new completions
|
||||
|
@ -188,6 +188,7 @@ struct bnxt_sw_rx_bd {
|
||||
struct bnxt_rx_ring_info {
|
||||
uint16_t rx_prod;
|
||||
uint16_t ag_prod;
|
||||
uint16_t rx_cons; /* Needed for representor */
|
||||
struct bnxt_db_info rx_db;
|
||||
struct bnxt_db_info ag_db;
|
||||
|
||||
|
@ -29,6 +29,7 @@ struct bnxt_tx_queue {
|
||||
struct bnxt *bp;
|
||||
int index;
|
||||
int tx_wake_thresh;
|
||||
uint32_t tx_cfa_action;
|
||||
struct bnxt_tx_ring_info *tx_ring;
|
||||
|
||||
unsigned int cp_nr_rings;
|
||||
|
@ -131,7 +131,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
|
||||
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
|
||||
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
|
||||
PKT_TX_QINQ_PKT))
|
||||
PKT_TX_QINQ_PKT) || txq->tx_cfa_action)
|
||||
long_bd = true;
|
||||
|
||||
nr_bds = long_bd + tx_pkt->nb_segs;
|
||||
@ -184,7 +184,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
|
||||
if (long_bd) {
|
||||
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
|
||||
vlan_tag_flags = 0;
|
||||
cfa_action = 0;
|
||||
cfa_action = txq->tx_cfa_action;
|
||||
/* HW can accelerate only outer vlan in QinQ mode */
|
||||
if (tx_buf->mbuf->ol_flags & PKT_TX_QINQ_PKT) {
|
||||
vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
|
||||
|
Loading…
x
Reference in New Issue
Block a user