net/i40e: remove devarg use-latest-supported-vec

As eal parameter --force-max-simd-bitwidth is already introduced,
to make it more clear when setting rx/tx function, remove
devarg use-latest-supported-vec support.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
This commit is contained in:
Leyi Rong 2021-01-14 14:39:49 +08:00 committed by Ferruh Yigit
parent 591bd5e4ae
commit 6ada10deac
4 changed files with 59 additions and 142 deletions

View File

@ -209,15 +209,6 @@ Runtime Config Options
Currently hot-plugging of representor ports is not supported so all required
representors must be specified on the creation of the PF.
- ``Use latest supported vector`` (default ``disable``)
Latest supported vector path may not always get the best perf so vector path was
recommended to use only on later platform. But users may want the latest vector path
since it can get better perf in some real work loading cases. So ``devargs`` param
``use-latest-supported-vec`` is introduced, for example::
-a 84:00.0,use-latest-supported-vec=1
- ``Enable validation for VF message`` (default ``not enabled``)
The PF counts messages from each VF. If in any period of seconds the message

View File

@ -45,7 +45,6 @@
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
#define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
#define ETH_I40E_VF_MSG_CFG "vf_msg_cfg"
#define I40E_CLEAR_PXE_WAIT_MS 200
@ -403,7 +402,6 @@ static const char *const valid_keys[] = {
ETH_I40E_FLOATING_VEB_LIST_ARG,
ETH_I40E_SUPPORT_MULTI_DRIVER,
ETH_I40E_QUEUE_NUM_PER_VF_ARG,
ETH_I40E_USE_LATEST_VEC,
ETH_I40E_VF_MSG_CFG,
NULL};
@ -1317,62 +1315,6 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw,
return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
}
static int
i40e_parse_latest_vec_handler(__rte_unused const char *key,
const char *value,
void *opaque)
{
struct i40e_adapter *ad = opaque;
int use_latest_vec;
use_latest_vec = atoi(value);
if (use_latest_vec != 0 && use_latest_vec != 1)
PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
ad->use_latest_vec = (uint8_t)use_latest_vec;
return 0;
}
static int
i40e_use_latest_vec(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_kvargs *kvlist;
int kvargs_count;
ad->use_latest_vec = false;
if (!dev->device->devargs)
return 0;
kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
if (!kvlist)
return -EINVAL;
kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
if (!kvargs_count) {
rte_kvargs_free(kvlist);
return 0;
}
if (kvargs_count > 1)
PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
"the first invalid or last valid one is used !",
ETH_I40E_USE_LATEST_VEC);
if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
i40e_parse_latest_vec_handler, ad) < 0) {
rte_kvargs_free(kvlist);
return -EINVAL;
}
rte_kvargs_free(kvlist);
return 0;
}
static int
read_vf_msg_config(__rte_unused const char *key,
const char *value,
@ -1523,8 +1465,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
/* Check if users want the latest supported vec path */
i40e_use_latest_vec(dev);
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
@ -12447,5 +12387,4 @@ RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
ETH_I40E_FLOATING_VEB_ARG "=1"
ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
ETH_I40E_USE_LATEST_VEC "=0|1");
ETH_I40E_SUPPORT_MULTI_DRIVER "=1");

View File

@ -1309,9 +1309,6 @@ struct i40e_adapter {
uint64_t flow_types_mask;
uint64_t pctypes_mask;
/* For devargs */
uint8_t use_latest_vec;
/* For RSS reta table update */
uint8_t rss_reta_updated;
};

View File

@ -3120,36 +3120,40 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.offloads = txq->offloads;
}
static eth_rx_burst_t
i40e_get_latest_rx_vec(bool scatter)
static inline bool
get_avx_supported(bool request_avx512)
{
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
i40e_recv_pkts_vec_avx2;
#ifdef RTE_ARCH_X86
if (request_avx512) {
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
#ifdef CC_AVX512_SUPPORT
return true;
#else
PMD_DRV_LOG(NOTICE,
"AVX512 is not supported in build env");
return false;
#endif
return scatter ? i40e_recv_scattered_pkts_vec :
i40e_recv_pkts_vec;
} else {
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
#ifdef CC_AVX2_SUPPORT
return true;
#else
PMD_DRV_LOG(NOTICE,
"AVX2 is not supported in build env");
return false;
#endif
}
#else
RTE_SET_USED(request_avx512);
#endif /* RTE_ARCH_X86 */
return false;
}
static eth_rx_burst_t
i40e_get_recommend_rx_vec(bool scatter)
{
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
/*
* since AVX frequency can be different to base frequency, limit
* use of AVX2 version to later plaforms, not all those that could
* theoretically run it.
*/
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
i40e_recv_pkts_vec_avx2;
#endif
return scatter ? i40e_recv_scattered_pkts_vec :
i40e_recv_pkts_vec;
}
void __rte_cold
i40e_set_rx_function(struct rte_eth_dev *dev)
@ -3157,6 +3161,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
uint16_t rx_using_sse, i;
bool use_avx2 = false;
/* In order to allow Vector Rx there are a few configuration
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
@ -3179,20 +3184,30 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
break;
}
}
use_avx2 = get_avx_supported(0);
}
}
if (ad->rx_vec_allowed &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
/* Vec Rx path */
PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
if (dev->data->scattered_rx) {
PMD_INIT_LOG(DEBUG,
"Using %sVector Scattered Rx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);
if (ad->use_latest_vec)
dev->rx_pkt_burst =
i40e_get_latest_rx_vec(dev->data->scattered_rx);
else
dev->rx_pkt_burst =
i40e_get_recommend_rx_vec(dev->data->scattered_rx);
dev->rx_pkt_burst = use_avx2 ?
i40e_recv_scattered_pkts_vec_avx2 :
i40e_recv_scattered_pkts_vec;
} else {
PMD_INIT_LOG(DEBUG,
"Using %sVector Rx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);
dev->rx_pkt_burst = use_avx2 ?
i40e_recv_pkts_vec_avx2 :
i40e_recv_pkts_vec;
}
} else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
@ -3293,39 +3308,13 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
txq->queue_id);
}
static eth_tx_burst_t
i40e_get_latest_tx_vec(void)
{
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return i40e_xmit_pkts_vec_avx2;
#endif
return i40e_xmit_pkts_vec;
}
static eth_tx_burst_t
i40e_get_recommend_tx_vec(void)
{
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
/*
* since AVX frequency can be different to base frequency, limit
* use of AVX2 version to later plaforms, not all those that could
* theoretically run it.
*/
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return i40e_xmit_pkts_vec_avx2;
#endif
return i40e_xmit_pkts_vec;
}
void __rte_cold
i40e_set_tx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
int i;
bool use_avx2 = false;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
if (ad->tx_vec_allowed) {
@ -3338,19 +3327,20 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
break;
}
}
use_avx2 = get_avx_supported(0);
}
}
if (ad->tx_simple_allowed) {
if (ad->tx_vec_allowed &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
if (ad->use_latest_vec)
dev->tx_pkt_burst =
i40e_get_latest_tx_vec();
else
dev->tx_pkt_burst =
i40e_get_recommend_tx_vec();
PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);
dev->tx_pkt_burst = use_avx2 ?
i40e_xmit_pkts_vec_avx2 :
i40e_xmit_pkts_vec;
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;