net/bnxt: remove unused macros and fields

Remove unused structure fields and macro definitions.

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
This commit is contained in:
Lance Richardson 2020-10-08 10:30:41 -04:00 committed by Ferruh Yigit
parent e8a83681f4
commit b80da220c1
4 changed files with 0 additions and 155 deletions

View File

@ -20,19 +20,14 @@ struct bnxt_rx_queue {
* and fast path
*/
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */
struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */
uint64_t mbuf_initializer; /* val to init mbuf */
uint16_t nb_rx_desc; /* num of RX desc */
uint16_t rx_tail; /* cur val of RDT register */
uint16_t nb_rx_hold; /* num held free RX desc */
uint16_t rx_free_thresh; /* max free RX desc to hold */
uint16_t queue_id; /* RX queue index */
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
uint16_t rxrearm_nb; /* number of descs to reinit. */
uint16_t rxrearm_start; /* next desc index to reinit. */
#endif
uint16_t reg_idx; /* RX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
uint8_t rx_deferred_start; /* not in global dev start */

View File

@ -7,110 +7,6 @@
#define _BNXT_RXR_H_
#include "hsi_struct_def_dpdk.h"
#define B_RX_DB(db, prod) \
(*(uint32_t *)db = (DB_KEY_RX | (prod)))
#define BNXT_TPA_L4_SIZE(x) \
{ \
typeof(x) hdr_info = (x); \
(((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
}
#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
(((hdr_info) >> 18) & 0x1ff)
#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
(((hdr_info) >> 9) & 0x1ff)
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
((hdr_info) & 0x1ff)
#define flags2_0xf(rxcmp1) \
(((rxcmp1)->flags2) & 0xf)
/* IP non tunnel can be with or without L4-
* Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or
* Ether / (vlan) / outer IP|IP6 / ICMP
* we use '==' instead of '&' because tunnel pkts have all 4 fields set.
*/
#define IS_IP_NONTUNNEL_PKT(flags2_f) \
( \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \
)
/* IP Tunnel pkt must have atleast tunnel-IP-calc set.
* again tunnel ie outer L4 is optional bcoz of
* Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
* UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
* UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
* UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
* also inner L3 chksum error is not taken into consideration by DPDK.
*/
#define IS_IP_TUNNEL_PKT(flags2_f) \
((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))
/* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts.
* For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated
* as good csum pkt.
*/
#define RX_CMP_IP_CS_ERROR(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
#define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
#define RX_CMP_IP_CS_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
/* L4 non tunnel pkt-
* Ether / (vlan) / IP6 / UDP|TCP|SCTP
*/
#define IS_L4_NONTUNNEL_PKT(flags2_f) \
( \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC))))
/* L4 tunnel pkt-
* Outer L4 is not mandatory. Eg: GRE-
* Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
* UDP|TCP|SCTP
*/
#define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)))
#define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)))
#define IS_L4_TUNNEL_PKT(flags2_f) \
( \
IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \
IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
)
#define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
RX_TPA_START_CMPL_AGG_ID_SFT)
@ -141,42 +37,11 @@ static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
#define BNXT_TPA_END_AGG_ID_TH(cmp) \
rte_le_to_cpu_16((cmp)->agg_id)
#define RX_CMP_L4_CS_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
#define RX_CMP_T_L4_CS_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
#define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS)
/* Outer L4 chksum error
*/
#define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
/* Inner L4 chksum error
*/
#define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
#define BNXT_RX_POST_THRESH 32
/* Number of descriptors to process per inner loop in vector mode. */
#define RTE_BNXT_DESCS_PER_LOOP 4U
enum pkt_hash_types {
PKT_HASH_TYPE_NONE, /* Undefined type */
PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
};
struct bnxt_tpa_info {
struct rte_mbuf *mbuf;
uint16_t len;

View File

@ -14,18 +14,11 @@ struct bnxt_cp_ring_info;
struct bnxt_tx_queue {
uint16_t nb_tx_desc; /* number of TX descriptors */
uint16_t tx_free_thresh;/* minimum TX before freeing */
/** Index to last TX descriptor to have been cleaned. */
uint16_t last_desc_cleaned;
/** Total number of TX descriptors ready to be allocated. */
uint16_t tx_next_dd; /* next desc to scan for DD bit */
uint16_t tx_next_rs; /* next desc to set RS bit */
uint16_t queue_id; /* TX queue index */
uint16_t reg_idx; /* TX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t pthresh; /* Prefetch threshold register */
uint8_t hthresh; /* Host threshold register */
uint8_t wthresh; /* Write-back threshold reg */
uint32_t ctx_curr; /* Hardware context states */
uint8_t tx_deferred_start; /* not in global dev start */
uint8_t tx_started; /* TX queue is started */

View File

@ -8,13 +8,9 @@
#include <rte_io.h>
#define MAX_TX_RINGS 16
#define BNXT_TX_PUSH_THRESH 92
#define BNXT_MAX_TSO_SEGS 32
#define BNXT_MIN_PKT_SIZE 52
#define B_TX_DB(db, prod) rte_write32((DB_KEY_TX | (prod)), db)
struct bnxt_tx_ring_info {
uint16_t tx_prod;
uint16_t tx_cons;
@ -25,15 +21,11 @@ struct bnxt_tx_ring_info {
rte_iova_t tx_desc_mapping;
#define BNXT_DEV_STATE_CLOSING 0x1
uint32_t dev_state;
struct bnxt_ring *tx_ring_struct;
};
struct bnxt_sw_tx_bd {
struct rte_mbuf *mbuf; /* mbuf associated with TX descriptor */
uint8_t is_gso;
unsigned short nr_bds;
};