drivers/net: advertise no support for keeping flow rules

When RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP capability bit is zero,
the specified behavior is the same as it had been before
this bit was introduced. Explicitly reset it in all PMDs
supporting rte_flow API in order to attract the attention
of maintainers, who should eventually choose to advertise
the new capability or not. It is already known that
mlx4 and mlx5 will not support this capability.

For RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP
similar action is not performed,
because no PMD except mlx5 supports indirect actions.
Any PMD that starts doing so will anyway have to consider
all relevant API, including this capability.

Suggested-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Dmitry Kozlyuk 2021-11-02 19:01:32 +02:00 committed by Ferruh Yigit
parent 2c9cd45de7
commit 2fe6f1b762
26 changed files with 31 additions and 0 deletions

View File

@ -1000,6 +1000,7 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {

View File

@ -546,6 +546,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->max_tx_queues = max_rx_rings;
dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
dev_info->hash_key_size = 40;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
/* MTU specifics */
dev_info->min_mtu = RTE_ETHER_MIN_MTU;

View File

@ -68,6 +68,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
devinfo->speed_capa = dev->speed_capa;
devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
return 0;
}

View File

@ -131,6 +131,8 @@ int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->max_vfs = adapter->params.arch.vfcount;
device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
device_info->rx_queue_offload_capa = 0UL;
device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;

View File

@ -254,6 +254,7 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
RTE_ETH_LINK_SPEED_2_5G |
RTE_ETH_LINK_SPEED_10G;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;

View File

@ -1101,6 +1101,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
RTE_ETH_LINK_SPEED_1G;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
/* Preferred queue parameters */
dev_info->default_rxportconf.nb_queues = 1;
dev_info->default_txportconf.nb_queues = 1;

View File

@ -2168,6 +2168,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
dev_info->tx_queue_offload_capa;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
switch (hw->mac.type) {
case e1000_82575:

View File

@ -469,6 +469,7 @@ static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->rx_offload_capa = enic->rx_offload_capa;
device_info->tx_offload_capa = enic->tx_offload_capa;
device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};

View File

@ -1227,6 +1227,7 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
infos->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
infos->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
struct rte_eth_dev_info sub_info;

View File

@ -751,6 +751,8 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
info->hash_key_size = HINIC_RSS_KEY_SIZE;
info->reta_size = HINIC_RSS_INDIR_SIZE;
info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;

View File

@ -2598,6 +2598,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
if (hns3_dev_get_support(hw, PTP))
info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;

View File

@ -699,6 +699,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
if (hns3_dev_get_support(hw, INDEP_TXRX))
info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = HNS3_MAX_RING_DESC,

View File

@ -3750,6 +3750,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);

View File

@ -35,6 +35,8 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
/* get dev info for the vdev */
dev_info->device = ethdev->device;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
dev_info->max_tx_queues = ethdev->data->nb_tx_queues;

View File

@ -1056,6 +1056,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = vf->vf_res->rss_lut_size;
dev_info->flow_type_rss_offloads = IAVF_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = IAVF_NUM_MACADDR_MAX;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_offload_capa =
RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
RTE_ETH_RX_OFFLOAD_QINQ_STRIP |

View File

@ -663,6 +663,7 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
dev_info->hash_key_size = hw->vf_res->rss_key_size;
dev_info->reta_size = hw->vf_res->rss_lut_size;
dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_offload_capa =
RTE_ETH_RX_OFFLOAD_VLAN_STRIP |

View File

@ -1477,6 +1477,7 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;

View File

@ -96,6 +96,7 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
dev_info->dev_capa =
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->switch_info.name = ethdev->device->name;
dev_info->switch_info.domain_id = rpst->switch_domain_id;

View File

@ -1709,6 +1709,8 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev,
{
struct mrvl_priv *priv = dev->data->dev_private;
info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
info->speed_capa = RTE_ETH_LINK_SPEED_10M |
RTE_ETH_LINK_SPEED_100M |
RTE_ETH_LINK_SPEED_1G |

View File

@ -583,6 +583,7 @@ otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
return 0;
}

View File

@ -1367,6 +1367,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
if (IS_PF(edev))
dev_info->max_rx_queues = (uint16_t)RTE_MIN(

View File

@ -186,6 +186,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
if (mae->status == SFC_MAE_STATUS_SUPPORTED ||
mae->status == SFC_MAE_STATUS_ADMIN) {

View File

@ -93,6 +93,7 @@ pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
dev_info->max_rx_pktlen = UINT32_MAX;
dev_info->max_rx_queues = UINT16_MAX;
dev_info->max_tx_queues = UINT16_MAX;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
return 0;
}

View File

@ -1006,6 +1006,7 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
* functions together and not in partial combinations
*/
dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
return 0;
}

View File

@ -2597,6 +2597,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_vfs = pci_dev->max_vfs;
dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);

View File

@ -487,6 +487,7 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);