net/bnxt: fix device capability reporting

1. Added two functions bnxt_get_tx_port_offloads() and
   bnxt_get_rx_port_offloads() to report the device
   tx/rx offload capabilities to the application.
2. This avoids few duplicate code in the driver and make
   VF-rep capability the same as VF.
3. This will help in selectively reporting offload capabilities
   based on FW support.

Fixes: 0a6d2a7200 ("net/bnxt: get device infos")
Cc: stable@dpdk.org

Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
This commit is contained in:
Kalesh AP 2022-04-27 20:28:06 +05:30 committed by Ajit Khaparde
parent 2646926002
commit 47a956a8fe
7 changed files with 54 additions and 36 deletions

View File

@ -580,30 +580,6 @@ struct bnxt_rep_info {
RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_LEVEL_MASK)
#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_TSO | \
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
RTE_ETH_RX_OFFLOAD_TCP_LRO | \
RTE_ETH_RX_OFFLOAD_SCATTER | \
RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt_flow_stat_info {

View File

@ -971,16 +971,10 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp);
dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) |
dev_info->tx_queue_offload_capa;
if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);

View File

@ -567,10 +567,8 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(parent_bp);
dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(parent_bp);
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->switch_info.name = eth_dev->device->name;

View File

@ -20,6 +20,31 @@
* RX Queues
*/
uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
{
uint64_t rx_offload_capa;
rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
RTE_ETH_RX_OFFLOAD_KEEP_CRC |
RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
RTE_ETH_RX_OFFLOAD_TCP_LRO |
RTE_ETH_RX_OFFLOAD_SCATTER |
RTE_ETH_RX_OFFLOAD_RSS_HASH;
rx_offload_capa |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
return rx_offload_capa;
}
/* Determine whether the current configuration needs aggregation ring in HW. */
int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev)
{

View File

@ -65,4 +65,5 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
int bnxt_need_agg_ring(struct rte_eth_dev *eth_dev);
void bnxt_free_rxq_mem(struct bnxt_rx_queue *rxq);
uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp);
#endif

View File

@ -17,6 +17,29 @@
* TX Queues
*/
uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp)
{
uint64_t tx_offload_capa;
tx_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
return tx_offload_capa;
}
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
{
if (txq && txq->cp_ring && txq->cp_ring->hw_stats)

View File

@ -43,4 +43,5 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp);
#endif