net/hns3: remove similar macro function definitions
For different capabilities, we declare different macro functions to determine whether the capabilities are supported. This patch declare a unified macro function to judge capabilities. Signed-off-by: Chengchang Tang <tangchengchang@huawei.com> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
This commit is contained in:
parent
c1ba6c325c
commit
efcaa81e0b
@ -617,7 +617,7 @@ hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
|
||||
static int
|
||||
hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
|
||||
{
|
||||
if (result != 0 && hns3_dev_copper_supported(hw)) {
|
||||
if (result != 0 && hns3_dev_get_support(hw, COPPER)) {
|
||||
hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
|
||||
result);
|
||||
return result;
|
||||
@ -656,7 +656,7 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
|
||||
}
|
||||
if (revision == PCI_REVISION_ID_HIP09_A) {
|
||||
struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
|
||||
if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
|
||||
if (hns3_dev_get_support(hw, COPPER) == 0 || pf->is_tmp_phy) {
|
||||
PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
|
||||
pf->is_tmp_phy = true;
|
||||
hns3_set_bit(hw->capability,
|
||||
@ -674,7 +674,7 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
|
||||
if (is_init) {
|
||||
hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
|
||||
hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
|
||||
if (hns3_dev_copper_supported(hw))
|
||||
if (hns3_dev_get_support(hw, COPPER))
|
||||
hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
|
||||
}
|
||||
req->compat = rte_cpu_to_le_32(compat);
|
||||
|
@ -918,7 +918,7 @@ hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!hns3_dev_dcb_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, DCB))
|
||||
return 0;
|
||||
|
||||
ret = hns3_dcb_ets_tc_dwrr_cfg(hw);
|
||||
@ -1368,7 +1368,7 @@ hns3_dcb_pause_setup_hw(struct hns3_hw *hw)
|
||||
}
|
||||
|
||||
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
|
||||
if (!hns3_dev_dcb_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, DCB))
|
||||
return 0;
|
||||
|
||||
ret = hns3_pfc_setup_hw(hw);
|
||||
|
@ -2408,7 +2408,7 @@ hns3_setup_dcb(struct rte_eth_dev *dev)
|
||||
struct hns3_hw *hw = &hns->hw;
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_dcb_supported(hw)) {
|
||||
if (!hns3_dev_get_support(hw, DCB)) {
|
||||
hns3_err(hw, "this port does not support dcb configurations.");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -2746,14 +2746,14 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
|
||||
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
|
||||
hns3_txvlan_cap_get(hw));
|
||||
|
||||
if (hns3_dev_outer_udp_cksum_supported(hw))
|
||||
if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
|
||||
info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
|
||||
|
||||
if (hns3_dev_indep_txrx_supported(hw))
|
||||
if (hns3_dev_get_support(hw, INDEP_TXRX))
|
||||
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
|
||||
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
|
||||
|
||||
if (hns3_dev_ptp_supported(hw))
|
||||
if (hns3_dev_get_support(hw, PTP))
|
||||
info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
|
||||
|
||||
info->rx_desc_lim = (struct rte_eth_desc_lim) {
|
||||
@ -3421,7 +3421,7 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type)
|
||||
|
||||
switch (media_type) {
|
||||
case HNS3_MEDIA_TYPE_COPPER:
|
||||
if (!hns3_dev_copper_supported(hw)) {
|
||||
if (!hns3_dev_get_support(hw, COPPER)) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"Media type is copper, not supported.");
|
||||
ret = -EOPNOTSUPP;
|
||||
@ -3489,7 +3489,7 @@ hns3_get_board_configuration(struct hns3_hw *hw)
|
||||
}
|
||||
|
||||
/* Dev does not support DCB */
|
||||
if (!hns3_dev_dcb_supported(hw)) {
|
||||
if (!hns3_dev_get_support(hw, DCB)) {
|
||||
pf->tc_max = 1;
|
||||
pf->pfc_max = 0;
|
||||
} else
|
||||
@ -3802,7 +3802,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
|
||||
tc_num = hns3_get_tc_num(hw);
|
||||
aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);
|
||||
|
||||
if (hns3_dev_dcb_supported(hw))
|
||||
if (hns3_dev_get_support(hw, DCB))
|
||||
shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +
|
||||
pf->dv_buf_size;
|
||||
else
|
||||
@ -3819,7 +3819,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
|
||||
|
||||
shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);
|
||||
buf_alloc->s_buf.buf_size = shared_buf;
|
||||
if (hns3_dev_dcb_supported(hw)) {
|
||||
if (hns3_dev_get_support(hw, DCB)) {
|
||||
buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;
|
||||
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
|
||||
- roundup(aligned_mps / HNS3_BUF_DIV_BY,
|
||||
@ -3830,7 +3830,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
|
||||
buf_alloc->s_buf.self.low = aligned_mps;
|
||||
}
|
||||
|
||||
if (hns3_dev_dcb_supported(hw)) {
|
||||
if (hns3_dev_get_support(hw, DCB)) {
|
||||
hi_thrd = shared_buf - pf->dv_buf_size;
|
||||
|
||||
if (tc_num <= NEED_RESERVE_TC_NUM)
|
||||
@ -4036,7 +4036,7 @@ static int
|
||||
hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
|
||||
{
|
||||
/* When DCB is not supported, rx private buffer is not allocated. */
|
||||
if (!hns3_dev_dcb_supported(hw)) {
|
||||
if (!hns3_dev_get_support(hw, DCB)) {
|
||||
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
|
||||
struct hns3_pf *pf = &hns->pf;
|
||||
uint32_t rx_all = pf->pkt_buf_size;
|
||||
@ -4264,7 +4264,7 @@ hns3_buffer_alloc(struct hns3_hw *hw)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (hns3_dev_dcb_supported(hw)) {
|
||||
if (hns3_dev_get_support(hw, DCB)) {
|
||||
ret = hns3_rx_priv_wl_config(hw, &pkt_buf);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
@ -6233,7 +6233,7 @@ hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_dcb_supported(hw)) {
|
||||
if (!hns3_dev_get_support(hw, DCB)) {
|
||||
hns3_err(hw, "This port does not support dcb configurations.");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -883,45 +883,8 @@ enum {
|
||||
HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B,
|
||||
};
|
||||
|
||||
#define hns3_dev_dcb_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B)
|
||||
|
||||
/* Support copper media type */
|
||||
#define hns3_dev_copper_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_COPPER_B)
|
||||
|
||||
/* Support the queue region action rule of flow directory */
|
||||
#define hns3_dev_fd_queue_region_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B)
|
||||
|
||||
/* Support PTP timestamp offload */
|
||||
#define hns3_dev_ptp_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_PTP_B)
|
||||
|
||||
/* Support to Independently enable/disable/reset Tx or Rx queues */
|
||||
#define hns3_dev_indep_txrx_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B)
|
||||
|
||||
#define hns3_dev_stash_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B)
|
||||
|
||||
#define hns3_dev_rxd_adv_layout_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B)
|
||||
|
||||
#define hns3_dev_outer_udp_cksum_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B)
|
||||
|
||||
#define hns3_dev_ras_imp_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RAS_IMP_B)
|
||||
|
||||
#define hns3_dev_tx_push_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B)
|
||||
|
||||
#define hns3_dev_tm_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TM_B)
|
||||
|
||||
#define hns3_dev_vf_vlan_flt_supported(hw) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B)
|
||||
#define hns3_dev_get_support(hw, _name) \
|
||||
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_##_name##_B)
|
||||
|
||||
#define HNS3_DEV_PRIVATE_TO_HW(adapter) \
|
||||
(&((struct hns3_adapter *)adapter)->hw)
|
||||
|
@ -988,10 +988,10 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
|
||||
DEV_TX_OFFLOAD_MBUF_FAST_FREE |
|
||||
hns3_txvlan_cap_get(hw));
|
||||
|
||||
if (hns3_dev_outer_udp_cksum_supported(hw))
|
||||
if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
|
||||
info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
|
||||
|
||||
if (hns3_dev_indep_txrx_supported(hw))
|
||||
if (hns3_dev_get_support(hw, INDEP_TXRX))
|
||||
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
|
||||
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
|
||||
|
||||
@ -1623,7 +1623,7 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
|
||||
uint8_t msg_data;
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_vf_vlan_flt_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
|
||||
return 0;
|
||||
|
||||
msg_data = enable ? 1 : 0;
|
||||
|
@ -301,7 +301,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev,
|
||||
struct hns3_hw *hw = &hns->hw;
|
||||
uint16_t idx;
|
||||
|
||||
if (!hns3_dev_fd_queue_region_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
|
||||
return rte_flow_error_set(error, ENOTSUP,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION, action,
|
||||
"Not support config queue region!");
|
||||
|
@ -2368,7 +2368,7 @@ hns3_handle_error(struct hns3_adapter *hns)
|
||||
{
|
||||
struct hns3_hw *hw = &hns->hw;
|
||||
|
||||
if (hns3_dev_ras_imp_supported(hw)) {
|
||||
if (hns3_dev_get_support(hw, RAS_IMP)) {
|
||||
hns3_handle_hw_error_v2(hw);
|
||||
hns3_schedule_reset(hns);
|
||||
} else {
|
||||
|
@ -61,7 +61,7 @@ hns3_ptp_init(struct hns3_hw *hw)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return 0;
|
||||
|
||||
ret = hns3_ptp_int_en(hw, true);
|
||||
@ -120,7 +120,7 @@ hns3_timesync_enable(struct rte_eth_dev *dev)
|
||||
struct hns3_pf *pf = &hns->pf;
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
if (pf->ptp_enable)
|
||||
@ -140,7 +140,7 @@ hns3_timesync_disable(struct rte_eth_dev *dev)
|
||||
struct hns3_pf *pf = &hns->pf;
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
if (!pf->ptp_enable)
|
||||
@ -164,7 +164,7 @@ hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
|
||||
struct hns3_pf *pf = &hns->pf;
|
||||
uint64_t ns, sec;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
ns = pf->rx_timestamp & TIME_RX_STAMP_NS_MASK;
|
||||
@ -190,7 +190,7 @@ hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
|
||||
uint64_t ns;
|
||||
int ts_cnt;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
ts_cnt = hns3_read_dev(hw, HNS3_TX_1588_BACK_TSP_CNT) &
|
||||
@ -219,7 +219,7 @@ hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
uint64_t ns, sec;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L);
|
||||
@ -240,7 +240,7 @@ hns3_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
|
||||
uint64_t sec = ts->tv_sec;
|
||||
uint64_t ns = ts->tv_nsec;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
/* Set the timecounters to a new value. */
|
||||
@ -261,7 +261,7 @@ hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
|
||||
struct timespec cur_time;
|
||||
uint64_t ns;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
(void)hns3_timesync_read_time(dev, &cur_time);
|
||||
@ -280,7 +280,7 @@ hns3_restore_ptp(struct hns3_adapter *hns)
|
||||
bool en = pf->ptp_enable;
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_ptp_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, PTP))
|
||||
return 0;
|
||||
|
||||
ret = hns3_timesync_configure(hns, en);
|
||||
|
@ -393,7 +393,7 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hw->cfg_max_queues; i++) {
|
||||
if (hns3_dev_indep_txrx_supported(hw)) {
|
||||
if (hns3_dev_get_support(hw, INDEP_TXRX)) {
|
||||
rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
|
||||
txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
|
||||
|
||||
@ -438,7 +438,7 @@ hns3_enable_txq(struct hns3_tx_queue *txq, bool en)
|
||||
struct hns3_hw *hw = &txq->hns->hw;
|
||||
uint32_t reg;
|
||||
|
||||
if (hns3_dev_indep_txrx_supported(hw)) {
|
||||
if (hns3_dev_get_support(hw, INDEP_TXRX)) {
|
||||
reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG);
|
||||
if (en)
|
||||
reg |= BIT(HNS3_RING_EN_B);
|
||||
@ -455,7 +455,7 @@ hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en)
|
||||
struct hns3_hw *hw = &rxq->hns->hw;
|
||||
uint32_t reg;
|
||||
|
||||
if (hns3_dev_indep_txrx_supported(hw)) {
|
||||
if (hns3_dev_get_support(hw, INDEP_TXRX)) {
|
||||
reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG);
|
||||
if (en)
|
||||
reg |= BIT(HNS3_RING_EN_B);
|
||||
@ -1630,7 +1630,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
|
||||
uint16_t q;
|
||||
int ret;
|
||||
|
||||
if (hns3_dev_indep_txrx_supported(hw))
|
||||
if (hns3_dev_get_support(hw, INDEP_TXRX))
|
||||
return 0;
|
||||
|
||||
/* Setup new number of fake RX/TX queues and reconfigure device. */
|
||||
@ -1874,7 +1874,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
|
||||
conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;
|
||||
|
||||
rxq->rx_deferred_start = conf->rx_deferred_start;
|
||||
if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
|
||||
if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
|
||||
hns3_warn(hw, "deferred start is not supported.");
|
||||
rxq->rx_deferred_start = false;
|
||||
}
|
||||
@ -1910,7 +1910,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
|
||||
HNS3_PORT_BASE_VLAN_ENABLE;
|
||||
else
|
||||
rxq->pvid_sw_discard_en = false;
|
||||
rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
|
||||
rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false;
|
||||
rxq->configured = true;
|
||||
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
|
||||
idx * HNS3_TQP_REG_SIZE);
|
||||
@ -2038,7 +2038,7 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
|
||||
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
|
||||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
|
||||
dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
|
||||
if (hns3_dev_rxd_adv_layout_supported(hw))
|
||||
if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
|
||||
return adv_layout_ptypes;
|
||||
else
|
||||
return ptypes;
|
||||
@ -2940,7 +2940,7 @@ hns3_tx_push_init(struct rte_eth_dev *dev)
|
||||
volatile uint32_t *reg;
|
||||
uint32_t val;
|
||||
|
||||
if (!hns3_dev_tx_push_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, TX_PUSH))
|
||||
return;
|
||||
|
||||
reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0);
|
||||
@ -2961,7 +2961,7 @@ hns3_tx_push_queue_init(struct rte_eth_dev *dev,
|
||||
struct hns3_tx_queue *txq)
|
||||
{
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
if (!hns3_dev_tx_push_supported(hw)) {
|
||||
if (!hns3_dev_get_support(hw, TX_PUSH)) {
|
||||
txq->tx_push_enable = false;
|
||||
return;
|
||||
}
|
||||
@ -3006,7 +3006,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
|
||||
}
|
||||
|
||||
txq->tx_deferred_start = conf->tx_deferred_start;
|
||||
if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) {
|
||||
if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) {
|
||||
hns3_warn(hw, "deferred start is not supported.");
|
||||
txq->tx_deferred_start = false;
|
||||
}
|
||||
@ -4288,7 +4288,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev)
|
||||
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
|
||||
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
if (hns3_dev_ptp_supported(hw))
|
||||
if (hns3_dev_get_support(hw, PTP))
|
||||
return false;
|
||||
|
||||
return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
|
||||
@ -4449,7 +4449,7 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_indep_txrx_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, INDEP_TXRX))
|
||||
return -ENOTSUP;
|
||||
|
||||
rte_spinlock_lock(&hw->lock);
|
||||
@ -4495,7 +4495,7 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
|
||||
|
||||
if (!hns3_dev_indep_txrx_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, INDEP_TXRX))
|
||||
return -ENOTSUP;
|
||||
|
||||
rte_spinlock_lock(&hw->lock);
|
||||
@ -4517,7 +4517,7 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
|
||||
int ret;
|
||||
|
||||
if (!hns3_dev_indep_txrx_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, INDEP_TXRX))
|
||||
return -ENOTSUP;
|
||||
|
||||
rte_spinlock_lock(&hw->lock);
|
||||
@ -4543,7 +4543,7 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
|
||||
|
||||
if (!hns3_dev_indep_txrx_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, INDEP_TXRX))
|
||||
return -ENOTSUP;
|
||||
|
||||
rte_spinlock_lock(&hw->lock);
|
||||
@ -4716,7 +4716,7 @@ hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
|
||||
* If the hardware support rxd advanced layout, then driver enable it
|
||||
* default.
|
||||
*/
|
||||
if (hns3_dev_rxd_adv_layout_supported(hw))
|
||||
if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT))
|
||||
hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev)
|
||||
struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
|
||||
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
if (hns3_dev_ptp_supported(hw))
|
||||
if (hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
/* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */
|
||||
@ -234,7 +234,7 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev)
|
||||
DEV_RX_OFFLOAD_VLAN;
|
||||
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
if (hns3_dev_ptp_supported(hw))
|
||||
if (hns3_dev_get_support(hw, PTP))
|
||||
return -ENOTSUP;
|
||||
|
||||
if (dev->data->scattered_rx)
|
||||
|
@ -31,7 +31,7 @@ hns3_tm_conf_init(struct rte_eth_dev *dev)
|
||||
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev);
|
||||
|
||||
if (!hns3_dev_tm_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, TM))
|
||||
return;
|
||||
|
||||
pf->tm_conf.nb_leaf_nodes_max = max_tx_queues;
|
||||
@ -58,7 +58,7 @@ hns3_tm_conf_uninit(struct rte_eth_dev *dev)
|
||||
struct hns3_tm_shaper_profile *shaper_profile;
|
||||
struct hns3_tm_node *tm_node;
|
||||
|
||||
if (!hns3_dev_tm_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, TM))
|
||||
return;
|
||||
|
||||
if (pf->tm_conf.nb_queue_node > 0) {
|
||||
@ -1233,7 +1233,7 @@ hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg)
|
||||
if (arg == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hns3_dev_tm_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, TM))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
*(const void **)arg = &hns3_tm_ops;
|
||||
@ -1246,7 +1246,7 @@ hns3_tm_dev_start_proc(struct hns3_hw *hw)
|
||||
{
|
||||
struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
|
||||
|
||||
if (!hns3_dev_tm_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, TM))
|
||||
return;
|
||||
|
||||
if (pf->tm_conf.root && !pf->tm_conf.committed)
|
||||
@ -1295,7 +1295,7 @@ hns3_tm_conf_update(struct hns3_hw *hw)
|
||||
struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
|
||||
struct rte_tm_error error;
|
||||
|
||||
if (!hns3_dev_tm_supported(hw))
|
||||
if (!hns3_dev_get_support(hw, TM))
|
||||
return 0;
|
||||
|
||||
if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)
|
||||
|
Loading…
Reference in New Issue
Block a user