ethdev: replace callback getting filter operations

Since rte_flow is the only API for filtering operations,
the legacy driver interface filter_ctrl was too much complicated
for the simple task of getting the struct rte_flow_ops.

The filter type RTE_ETH_FILTER_GENERIC and
the filter operarion RTE_ETH_FILTER_GET are removed.
The new driver callback flow_ops_get replaces filter_ctrl.

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Thomas Monjalon 2021-03-21 10:00:00 +01:00 committed by Ferruh Yigit
parent 8d96d605d3
commit fb7ad441d4
53 changed files with 238 additions and 721 deletions

View File

@ -402,9 +402,9 @@ Supports configuring link flow control.
Flow API
--------
Supports the DPDK Flow API for generic filtering.
Supports flow API family.
* **[implements] eth_dev_ops**: ``filter_ctrl:RTE_ETH_FILTER_GENERIC``.
* **[implements] eth_dev_ops**: ``flow_ops_get``.
* **[implements] rte_flow_ops**: ``All``.

View File

@ -22,11 +22,6 @@ defined in ``rte_flow.h``.
queues, to virtual/physical device functions or ports, performing tunnel
offloads, adding marks and so on.
It is slightly higher-level than the legacy filtering framework which it
encompasses and supersedes (including all functions and filter types) in
order to expose a single interface with an unambiguous behavior that is
common to all poll-mode drivers (PMDs).
Flow rule
---------
@ -3104,7 +3099,6 @@ port and may return errors such as ``ENOTSUP`` ("not supported"):
- Configuring MAC addresses.
- Configuring multicast addresses.
- Configuring VLAN filters.
- Configuring Rx filters through the legacy API (e.g. FDIR).
- Configuring global RSS settings.
.. code-block:: c
@ -3331,13 +3325,7 @@ The PMD interface is defined in ``rte_flow_driver.h``. It is not subject to
API/ABI versioning constraints as it is not exposed to applications and may
evolve independently.
It is currently implemented on top of the legacy filtering framework through
filter type *RTE_ETH_FILTER_GENERIC* that accepts the single operation
*RTE_ETH_FILTER_GET* to return PMD-specific *rte_flow* callbacks wrapped
inside ``struct rte_flow_ops``.
This overhead is temporarily necessary in order to keep compatibility with
the legacy filtering framework, which should eventually disappear.
The PMD interface is based on callbacks pointed by the ``struct rte_flow_ops``.
- PMD callbacks implement exactly the interface described in `Rules
management`_, except for the port ID argument which has already been

View File

@ -997,9 +997,7 @@ void bnxt_flow_cnt_alarm_cb(void *arg);
int bnxt_flow_stats_req(struct bnxt *bp);
int bnxt_flow_stats_cnt(struct bnxt *bp);
uint32_t bnxt_get_speed_capabilities(struct bnxt *bp);
int bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
int
bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
#endif

View File

@ -3236,9 +3236,8 @@ bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
}
int
bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
struct bnxt *bp = dev->data->dev_private;
int ret = 0;
@ -3251,10 +3250,8 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
bp = vfr->parent_dev->data->dev_private;
/* parent is deleted while children are still valid */
if (!bp) {
PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n",
dev->data->port_id,
filter_type,
filter_op);
PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n",
dev->data->port_id);
return -EIO;
}
}
@ -3263,27 +3260,16 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
if (ret)
return ret;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
/* PMD supports thread-safe flow operations. rte_flow API
* functions can avoid mutex for multi-thread safety.
*/
dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
/* PMD supports thread-safe flow operations. rte_flow API
* functions can avoid mutex for multi-thread safety.
*/
dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
if (BNXT_TRUFLOW_EN(bp))
*ops = &bnxt_ulp_rte_flow_ops;
else
*ops = &bnxt_flow_ops;
if (BNXT_TRUFLOW_EN(bp))
*(const void **)arg = &bnxt_ulp_rte_flow_ops;
else
*(const void **)arg = &bnxt_flow_ops;
break;
default:
PMD_DRV_LOG(ERR,
"Filter type (%d) not supported", filter_type);
ret = -EINVAL;
break;
}
return ret;
}
@ -3800,7 +3786,7 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.rx_queue_stop = bnxt_rx_queue_stop,
.tx_queue_start = bnxt_tx_queue_start,
.tx_queue_stop = bnxt_tx_queue_stop,
.filter_ctrl = bnxt_filter_ctrl_op,
.flow_ops_get = bnxt_flow_ops_get_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
.get_eeprom = bnxt_get_eeprom_op,

View File

@ -29,7 +29,7 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = {
.dev_stop = bnxt_rep_dev_stop_op,
.stats_get = bnxt_rep_stats_get_op,
.stats_reset = bnxt_rep_stats_reset_op,
.filter_ctrl = bnxt_filter_ctrl_op
.flow_ops_get = bnxt_flow_ops_get_op
};
uint16_t

View File

@ -3108,14 +3108,11 @@ bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
}
static int
bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
enum rte_filter_type type, enum rte_filter_op op, void *arg)
bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
*(const void **)arg = &bond_flow_ops;
return 0;
}
return -ENOTSUP;
*ops = &bond_flow_ops;
return 0;
}
static int
@ -3207,7 +3204,7 @@ const struct eth_dev_ops default_dev_ops = {
.mac_addr_set = bond_ethdev_mac_address_set,
.mac_addr_add = bond_ethdev_mac_addr_add,
.mac_addr_remove = bond_ethdev_mac_addr_remove,
.filter_ctrl = bond_filter_ctrl
.flow_ops_get = bond_flow_ops_get
};
static int

View File

@ -1335,7 +1335,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.rx_queue_start = cxgbe_dev_rx_queue_start,
.rx_queue_stop = cxgbe_dev_rx_queue_stop,
.rx_queue_release = cxgbe_dev_rx_queue_release,
.filter_ctrl = cxgbe_dev_filter_ctrl,
.flow_ops_get = cxgbe_dev_flow_ops_get,
.stats_get = cxgbe_dev_stats_get,
.stats_reset = cxgbe_dev_stats_reset,
.flow_ctrl_get = cxgbe_flow_ctrl_get,

View File

@ -1448,23 +1448,9 @@ static const struct rte_flow_ops cxgbe_flow_ops = {
};
int
cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
cxgbe_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
int ret = 0;
RTE_SET_USED(dev);
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &cxgbe_flow_ops;
break;
default:
ret = -ENOTSUP;
break;
}
return ret;
*ops = &cxgbe_flow_ops;
return 0;
}

View File

@ -35,10 +35,7 @@ struct rte_flow {
struct rte_eth_dev *dev;
};
int
cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
int cxgbe_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
#endif /* _CXGBE_FLOW_H_ */

View File

@ -99,10 +99,6 @@ static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
{"cgr_reject_bytes", 4, 1},
};
static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
RTE_ETH_FILTER_GET
};
static struct rte_dpaa2_driver rte_dpaa2_pmd;
static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
@ -2322,45 +2318,15 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
return ret;
}
static inline int
dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
{
unsigned int i;
for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
if (dpaa2_supported_filter_ops[i] == filter_op)
return 0;
}
return -ENOTSUP;
}
static int
dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
int ret = 0;
if (!dev)
return -ENODEV;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
ret = -ENOTSUP;
break;
}
*(const void **)arg = &dpaa2_flow_ops;
dpaa2_filter_type |= filter_type;
break;
default:
RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
filter_type);
ret = -ENOTSUP;
break;
}
return ret;
*ops = &dpaa2_flow_ops;
return 0;
}
static void
@ -2453,7 +2419,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.mac_addr_set = dpaa2_dev_set_mac_addr,
.rss_hash_update = dpaa2_dev_rss_hash_update,
.rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
.filter_ctrl = dpaa2_dev_flow_ctrl,
.flow_ops_get = dpaa2_dev_flow_ops_get,
.rxq_info_get = dpaa2_rxq_info_get,
.txq_info_get = dpaa2_txq_info_get,
.tm_ops_get = dpaa2_tm_ops_get,

View File

@ -113,7 +113,6 @@ extern int dpaa2_timestamp_dynfield_offset;
/*Externaly defined*/
extern const struct rte_flow_ops dpaa2_flow_ops;
extern enum rte_filter_type dpaa2_filter_type;
extern const struct rte_tm_ops dpaa2_tm_ops;

View File

@ -89,8 +89,6 @@ enum rte_flow_action_type dpaa2_supported_action_type[] = {
/* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
#define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
#ifndef __cplusplus
static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
@ -3969,24 +3967,15 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
flow->ipaddr_rule.fs_ipdst_offset =
IP_ADDRESS_OFFSET_INVALID;
switch (dpaa2_filter_type) {
case RTE_ETH_FILTER_GENERIC:
ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
actions, error);
if (ret < 0) {
if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
rte_flow_error_set(error, EPERM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
attr, "unknown");
DPAA2_PMD_ERR(
"Failure to create flow, return code (%d)", ret);
goto creation_error;
}
break;
default:
DPAA2_PMD_ERR("Filter type (%d) not supported",
dpaa2_filter_type);
break;
ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
actions, error);
if (ret < 0) {
if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
rte_flow_error_set(error, EPERM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
attr, "unknown");
DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
goto creation_error;
}
return flow;

View File

@ -194,10 +194,8 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter);
static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
static int eth_igb_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
static int eth_igb_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
@ -374,7 +372,7 @@ static const struct eth_dev_ops eth_igb_ops = {
.reta_query = eth_igb_rss_reta_query,
.rss_hash_update = eth_igb_rss_hash_update,
.rss_hash_conf_get = eth_igb_rss_hash_conf_get,
.filter_ctrl = eth_igb_filter_ctrl,
.flow_ops_get = eth_igb_flow_ops_get,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
.rxq_info_get = igb_rxq_info_get,
.txq_info_get = igb_txq_info_get,
@ -4583,26 +4581,11 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
static int
eth_igb_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
eth_igb_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &igb_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
break;
}
return ret;
*ops = &igb_flow_ops;
return 0;
}
static int

View File

@ -74,13 +74,10 @@ static const struct vic_speed_capa {
RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
static int
enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
struct enic *enic = pmd_priv(dev);
int ret = 0;
ENICPMD_FUNC_TRACE();
@ -90,23 +87,12 @@ enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
*/
if (enic->geneve_opt_enabled)
return -ENOTSUP;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
if (enic->flow_filter_mode == FILTER_FLOWMAN)
*(const void **)arg = &enic_fm_flow_ops;
else
*(const void **)arg = &enic_flow_ops;
break;
default:
dev_warning(enic, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
return ret;
if (enic->flow_filter_mode == FILTER_FLOWMAN)
*ops = &enic_fm_flow_ops;
else
*ops = &enic_flow_ops;
return 0;
}
static void enicpmd_dev_tx_queue_release(void *txq)
@ -1121,7 +1107,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.mac_addr_remove = enicpmd_remove_mac_addr,
.mac_addr_set = enicpmd_set_mac_addr,
.set_mc_addr_list = enicpmd_set_mc_addr_list,
.filter_ctrl = enicpmd_dev_filter_ctrl,
.flow_ops_get = enicpmd_dev_flow_ops_get,
.reta_query = enicpmd_dev_rss_reta_query,
.reta_update = enicpmd_dev_rss_reta_update,
.rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,

View File

@ -377,34 +377,21 @@ static const struct rte_flow_ops enic_vf_flow_ops = {
};
static int
enic_vf_filter_ctrl(struct rte_eth_dev *eth_dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
enic_vf_flow_ops_get(struct rte_eth_dev *eth_dev,
const struct rte_flow_ops **ops)
{
struct enic_vf_representor *vf;
int ret = 0;
ENICPMD_FUNC_TRACE();
vf = eth_dev->data->dev_private;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
if (vf->enic.flow_filter_mode == FILTER_FLOWMAN) {
*(const void **)arg = &enic_vf_flow_ops;
} else {
ENICPMD_LOG(WARNING, "VF representors require flowman support for rte_flow API");
ret = -EINVAL;
}
break;
default:
ENICPMD_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
if (vf->enic.flow_filter_mode != FILTER_FLOWMAN) {
ENICPMD_LOG(WARNING,
"VF representors require flowman support for rte_flow API");
return -EINVAL;
}
return ret;
*ops = &enic_vf_flow_ops;
return 0;
}
static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
@ -566,7 +553,7 @@ static const struct eth_dev_ops enic_vf_representor_dev_ops = {
.dev_start = enic_vf_dev_start,
.dev_stop = enic_vf_dev_stop,
.dev_close = enic_vf_dev_close,
.filter_ctrl = enic_vf_filter_ctrl,
.flow_ops_get = enic_vf_flow_ops_get,
.link_update = enic_vf_link_update,
.promiscuous_enable = enic_vf_promiscuous_enable,
.promiscuous_disable = enic_vf_promiscuous_disable,

View File

@ -1514,17 +1514,11 @@ fs_rss_hash_update(struct rte_eth_dev *dev,
}
static int
fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
enum rte_filter_type type,
enum rte_filter_op op,
void *arg)
fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
if (type == RTE_ETH_FILTER_GENERIC &&
op == RTE_ETH_FILTER_GET) {
*(const void **)arg = &fs_flow_ops;
return 0;
}
return -ENOTSUP;
*ops = &fs_flow_ops;
return 0;
}
const struct eth_dev_ops failsafe_ops = {
@ -1565,5 +1559,5 @@ const struct eth_dev_ops failsafe_ops = {
.mac_addr_set = fs_mac_addr_set,
.set_mc_addr_list = fs_set_mc_addr_list,
.rss_hash_update = fs_rss_hash_update,
.filter_ctrl = fs_filter_ctrl,
.flow_ops_get = fs_flow_ops_get,
};

View File

@ -2504,42 +2504,20 @@ static int hinic_set_mc_addr_list(struct rte_eth_dev *dev,
}
/**
* DPDK callback to manage filter control operations
* DPDK callback to get flow operations
*
* @param dev
* Pointer to Ethernet device structure.
* @param filter_type
* Filter type, which just supports generic type.
* @param filter_op
* Filter operation to perform.
* @param arg
* @param ops
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative error value otherwise.
*/
static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
static int hinic_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
int func_id = hinic_global_func_id(nic_dev->hwdev);
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &hinic_flow_ops;
break;
default:
PMD_DRV_LOG(INFO, "Filter type (%d) not supported",
filter_type);
return -EINVAL;
}
PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x,"
"filter_op: 0x%x.", func_id, filter_type, filter_op);
*ops = &hinic_flow_ops;
return 0;
}
@ -3047,7 +3025,7 @@ static const struct eth_dev_ops hinic_pmd_ops = {
.mac_addr_remove = hinic_mac_addr_remove,
.mac_addr_add = hinic_mac_addr_add,
.set_mc_addr_list = hinic_set_mc_addr_list,
.filter_ctrl = hinic_dev_filter_ctrl,
.flow_ops_get = hinic_dev_flow_ops_get,
};
static const struct eth_dev_ops hinic_pmd_vf_ops = {
@ -3082,7 +3060,7 @@ static const struct eth_dev_ops hinic_pmd_vf_ops = {
.mac_addr_remove = hinic_mac_addr_remove,
.mac_addr_add = hinic_mac_addr_add,
.set_mc_addr_list = hinic_set_mc_addr_list,
.filter_ctrl = hinic_dev_filter_ctrl,
.flow_ops_get = hinic_dev_flow_ops_get,
};
static int hinic_func_init(struct rte_eth_dev *eth_dev)

View File

@ -6626,7 +6626,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
.rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
.reta_update = hns3_dev_rss_reta_update,
.reta_query = hns3_dev_rss_reta_query,
.filter_ctrl = hns3_dev_filter_ctrl,
.flow_ops_get = hns3_dev_flow_ops_get,
.vlan_filter_set = hns3_vlan_filter_set,
.vlan_tpid_set = hns3_vlan_tpid_set,
.vlan_offload_set = hns3_vlan_offload_set,

View File

@ -968,9 +968,8 @@ hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
}
int hns3_buffer_alloc(struct hns3_hw *hw);
int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
int hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
bool hns3_is_reset_pending(struct hns3_adapter *hns);
bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
void hns3_update_link_status_and_event(struct hns3_hw *hw);

View File

@ -2781,7 +2781,7 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
.rss_hash_conf_get = hns3_dev_rss_hash_conf_get,
.reta_update = hns3_dev_rss_reta_update,
.reta_query = hns3_dev_rss_reta_query,
.filter_ctrl = hns3_dev_filter_ctrl,
.flow_ops_get = hns3_dev_flow_ops_get,
.vlan_filter_set = hns3vf_vlan_filter_set,
.vlan_offload_set = hns3vf_vlan_offload_set,
.get_reg = hns3_get_regs,

View File

@ -2001,34 +2001,16 @@ static const struct rte_flow_ops hns3_flow_ops = {
.isolate = NULL,
};
/*
* The entry of flow API.
* @param dev
* Pointer to Ethernet device.
* @return
* 0 on success, a negative errno value otherwise is set.
*/
int
hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
struct hns3_hw *hw;
int ret = 0;
hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
if (hw->adapter_state >= HNS3_NIC_CLOSED)
return -ENODEV;
*(const void **)arg = &hns3_flow_ops;
break;
default:
hns3_err(hw, "Filter type (%d) not supported", filter_type);
ret = -EOPNOTSUPP;
break;
}
if (hw->adapter_state >= HNS3_NIC_CLOSED)
return -ENODEV;
return ret;
*ops = &hns3_flow_ops;
return 0;
}

View File

@ -338,10 +338,8 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
@ -503,7 +501,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
.udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
.filter_ctrl = i40e_dev_filter_ctrl,
.flow_ops_get = i40e_dev_flow_ops_get,
.rxq_info_get = i40e_rxq_info_get,
.txq_info_get = i40e_txq_info_get,
.rx_burst_mode_get = i40e_rx_burst_mode_get,
@ -9878,30 +9876,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
}
static int
i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
int ret = 0;
if (dev == NULL)
return -EINVAL;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &i40e_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
return ret;
*ops = &i40e_flow_ops;
return 0;
}
/*

View File

@ -117,10 +117,8 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addrs,
uint32_t mc_addrs_num);
@ -195,7 +193,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
.mtu_set = iavf_dev_mtu_set,
.rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable,
.filter_ctrl = iavf_dev_filter_ctrl,
.flow_ops_get = iavf_dev_flow_ops_get,
.tx_done_cleanup = iavf_dev_tx_done_cleanup,
};
@ -2079,30 +2077,14 @@ iavf_dev_interrupt_handler(void *param)
}
static int
iavf_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
int ret = 0;
if (!dev)
return -EINVAL;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &iavf_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
return ret;
*ops = &iavf_flow_ops;
return 0;
}
static void

View File

@ -743,31 +743,14 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
}
static int
ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
int ret = 0;
if (!dev)
return -EINVAL;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &ice_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
return ret;
*ops = &ice_flow_ops;
return 0;
}
#define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4)
@ -984,7 +967,7 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
.promiscuous_disable = ice_dcf_dev_promiscuous_disable,
.allmulticast_enable = ice_dcf_dev_allmulticast_enable,
.allmulticast_disable = ice_dcf_dev_allmulticast_disable,
.filter_ctrl = ice_dcf_dev_filter_ctrl,
.flow_ops_get = ice_dcf_dev_flow_ops_get,
.udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del,
};

View File

@ -129,10 +129,8 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
static int ice_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
unsigned int limit);
static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
@ -215,7 +213,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
.xstats_get = ice_xstats_get,
.xstats_get_names = ice_xstats_get_names,
.xstats_reset = ice_stats_reset,
.filter_ctrl = ice_dev_filter_ctrl,
.flow_ops_get = ice_dev_flow_ops_get,
.udp_tunnel_port_add = ice_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
.tx_done_cleanup = ice_tx_done_cleanup,
@ -5267,30 +5265,14 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
}
static int
ice_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
ice_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
int ret = 0;
if (!dev)
return -EINVAL;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &ice_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
return ret;
*ops = &ice_flow_ops;
return 0;
}
/* Add UDP tunneling port */

View File

@ -297,7 +297,7 @@ static const struct eth_dev_ops eth_igc_ops = {
.vlan_offload_set = eth_igc_vlan_offload_set,
.vlan_tpid_set = eth_igc_vlan_tpid_set,
.vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
.filter_ctrl = eth_igc_filter_ctrl,
.flow_ops_get = eth_igc_flow_ops_get,
};
/*

View File

@ -369,24 +369,9 @@ igc_clear_all_filter(struct rte_eth_dev *dev)
}
int
eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
eth_igc_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
int ret = 0;
RTE_SET_USED(dev);
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &igc_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
}
return ret;
*ops = &igc_flow_ops;
return 0;
}

View File

@ -29,9 +29,8 @@ int igc_set_syn_filter(struct rte_eth_dev *dev,
const struct igc_syn_filter *filter);
void igc_clear_syn_filter(struct rte_eth_dev *dev);
void igc_clear_all_filter(struct rte_eth_dev *dev);
int
eth_igc_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
int eth_igc_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
#ifdef __cplusplus
}

View File

@ -2821,11 +2821,9 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
}
static int
ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
enum rte_filter_type filter_type, enum rte_filter_op filter_op,
void *arg)
ipn3ke_afu_flow_ops_get(struct rte_eth_dev *ethdev,
const struct rte_flow_ops **ops)
{
int ret = 0;
struct ipn3ke_hw *hw;
struct ipn3ke_rpst *rpst;
@ -2836,27 +2834,13 @@ ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
if (hw->acc_flow)
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &ipn3ke_flow_ops;
break;
default:
IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
*ops = &ipn3ke_flow_ops;
else if (rpst->i40e_pf_eth)
(*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev,
filter_type,
filter_op,
arg);
(*rpst->i40e_pf_eth->dev_ops->flow_ops_get)(ethdev, ops);
else
return -EINVAL;
return ret;
return 0;
}
static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
@ -2874,7 +2858,7 @@ static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
.stats_reset = ipn3ke_rpst_stats_reset,
.xstats_reset = ipn3ke_rpst_stats_reset,
.filter_ctrl = ipn3ke_afu_filter_ctrl,
.flow_ops_get = ipn3ke_afu_flow_ops_get,
.rx_queue_start = ipn3ke_rpst_rx_queue_start,
.rx_queue_stop = ipn3ke_rpst_rx_queue_stop,

View File

@ -304,10 +304,8 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
@ -538,7 +536,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.reta_query = ixgbe_dev_rss_reta_query,
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
.filter_ctrl = ixgbe_dev_filter_ctrl,
.flow_ops_get = ixgbe_dev_flow_ops_get,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
.rxq_info_get = ixgbe_rxq_info_get,
.txq_info_get = ixgbe_txq_info_get,
@ -6805,27 +6803,11 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
static int
ixgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &ixgbe_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
return ret;
*ops = &ixgbe_flow_ops;
return 0;
}
static u8 *

View File

@ -437,7 +437,7 @@ static const struct eth_dev_ops mlx4_dev_ops = {
.flow_ctrl_get = mlx4_flow_ctrl_get,
.flow_ctrl_set = mlx4_flow_ctrl_set,
.mtu_set = mlx4_mtu_set,
.filter_ctrl = mlx4_filter_ctrl,
.flow_ops_get = mlx4_flow_ops_get,
.rx_queue_intr_enable = mlx4_rx_intr_enable,
.rx_queue_intr_disable = mlx4_rx_intr_disable,
.is_removed = mlx4_is_removed,

View File

@ -1590,37 +1590,19 @@ static const struct rte_flow_ops mlx4_flow_ops = {
};
/**
* Manage filter operations.
* Get rte_flow callbacks.
*
* @param dev
* Pointer to Ethernet device structure.
* @param filter_type
* Filter type.
* @param filter_op
* Operation to perform.
* @param arg
* @param ops
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative errno value otherwise and rte_errno is set.
* @return 0
*/
int
mlx4_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
mlx4_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
break;
*(const void **)arg = &mlx4_flow_ops;
return 0;
default:
ERROR("%p: filter type (%d) not supported",
(void *)dev, filter_type);
break;
}
rte_errno = ENOTSUP;
return -rte_errno;
*ops = &mlx4_flow_ops;
return 0;
}

View File

@ -51,9 +51,6 @@ uint64_t mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types,
int verbs_to_dpdk);
int mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct mlx4_priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
int mlx4_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
#endif /* RTE_PMD_MLX4_FLOW_H_ */

View File

@ -1477,7 +1477,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
.reta_query = mlx5_dev_rss_reta_query,
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
.filter_ctrl = mlx5_dev_filter_ctrl,
.flow_ops_get = mlx5_flow_ops_get,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
@ -1562,7 +1562,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.mtu_set = mlx5_dev_set_mtu,
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
.filter_ctrl = mlx5_dev_filter_ctrl,
.flow_ops_get = mlx5_flow_ops_get,
.rxq_info_get = mlx5_rxq_info_get,
.txq_info_get = mlx5_txq_info_get,
.rx_burst_mode_get = mlx5_rx_burst_mode_get,

View File

@ -1195,10 +1195,7 @@ int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error);
int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
struct rte_flow_error *error);
int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
int mlx5_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
int mlx5_flow_start_default(struct rte_eth_dev *dev);
void mlx5_flow_stop_default(struct rte_eth_dev *dev);
int mlx5_flow_verify(struct rte_eth_dev *dev);

View File

@ -6498,40 +6498,20 @@ mlx5_flow_query(struct rte_eth_dev *dev,
}
/**
* Manage filter operations.
* Get rte_flow callbacks.
*
* @param dev
* Pointer to Ethernet device structure.
* @param filter_type
* Filter type.
* @param filter_op
* Operation to perform.
* @param arg
* @param ops
* Pointer to operation-specific structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
* @return 0
*/
int
mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET) {
rte_errno = EINVAL;
return -rte_errno;
}
*(const void **)arg = &mlx5_flow_ops;
return 0;
default:
DRV_LOG(ERR, "port %u filter type (%d) not supported",
dev->data->port_id, filter_type);
rte_errno = ENOTSUP;
return -rte_errno;
}
*ops = &mlx5_flow_ops;
return 0;
}

View File

@ -2340,32 +2340,18 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
*
* @param dev
* Pointer to the device structure.
* @param filer_type
* Flow filter type.
* @param filter_op
* Flow filter operation.
* @param arg
* @param ops
* Pointer to pass the flow ops.
*
* @return
* 0 on success, negative error value otherwise.
*/
static int
mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
mrvl_eth_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &mrvl_flow_ops;
return 0;
default:
MRVL_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
return -EINVAL;
}
*ops = &mrvl_flow_ops;
return 0;
}
/**
@ -2443,7 +2429,7 @@ static const struct eth_dev_ops mrvl_ops = {
.flow_ctrl_set = mrvl_flow_ctrl_set,
.rss_hash_update = mrvl_rss_hash_update,
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
.filter_ctrl = mrvl_eth_filter_ctrl,
.flow_ops_get = mrvl_eth_flow_ops_get,
.mtr_ops_get = mrvl_mtr_ops_get,
.tm_ops_get = mrvl_tm_ops_get,
};

View File

@ -2330,7 +2330,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
.tx_done_cleanup = otx2_nix_tx_done_cleanup,
.set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
.pool_ops_supported = otx2_nix_pool_ops_supported,
.filter_ctrl = otx2_nix_dev_filter_ctrl,
.flow_ops_get = otx2_nix_dev_flow_ops_get,
.get_module_info = otx2_nix_get_module_info,
.get_module_eeprom = otx2_nix_get_module_eeprom,
.fw_version_get = otx2_nix_fw_version_get,

View File

@ -396,9 +396,8 @@ otx2_eth_pmd_priv(struct rte_eth_dev *eth_dev)
/* Ops */
int otx2_nix_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info);
int otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
int otx2_nix_dev_flow_ops_get(struct rte_eth_dev *eth_dev,
const struct rte_flow_ops **ops);
int otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
size_t fw_size);
int otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,

View File

@ -471,24 +471,11 @@ otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
}
int
otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
otx2_nix_dev_flow_ops_get(struct rte_eth_dev *eth_dev __rte_unused,
const struct rte_flow_ops **ops)
{
RTE_SET_USED(eth_dev);
if (filter_type != RTE_ETH_FILTER_GENERIC) {
otx2_err("Unsupported filter type %d", filter_type);
return -ENOTSUP;
}
if (filter_op == RTE_ETH_FILTER_GET) {
*(const void **)arg = &otx2_flow_ops;
return 0;
}
otx2_err("Invalid filter_op %d", filter_op);
return -EINVAL;
*ops = &otx2_flow_ops;
return 0;
}
static struct cgx_fw_data *

View File

@ -2436,7 +2436,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
.filter_ctrl = qede_dev_filter_ctrl,
.flow_ops_get = qede_dev_flow_ops_get,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
.fw_version_get = qede_fw_version_get,

View File

@ -285,11 +285,8 @@ int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
int qede_link_update(struct rte_eth_dev *eth_dev,
__rte_unused int wait_to_complete);
int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
enum rte_filter_op op, void *arg);
int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op, void *arg);
int qede_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
int qede_check_fdir_support(struct rte_eth_dev *eth_dev);

View File

@ -1050,31 +1050,18 @@ const struct rte_flow_ops qede_flow_ops = {
.flush = qede_flow_flush,
};
int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
int
qede_dev_flow_ops_get(struct rte_eth_dev *eth_dev,
const struct rte_flow_ops **ops)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
return -ENOTSUP;
}
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &qede_flow_ops;
return 0;
default:
DP_ERR(edev, "Unsupported filter type %d\n",
filter_type);
return -EINVAL;
if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
return -ENOTSUP;
}
*ops = &qede_flow_ops;
return 0;
}

View File

@ -1751,32 +1751,11 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
}
static int
sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
int rc = ENOTSUP;
sfc_log_init(sa, "entry");
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET) {
rc = EINVAL;
} else {
*(const void **)arg = &sfc_flow_ops;
rc = 0;
}
break;
default:
sfc_err(sa, "Unknown filter type %u", filter_type);
break;
}
sfc_log_init(sa, "exit: %d", -rc);
SFC_ASSERT(rc >= 0);
return -rc;
*ops = &sfc_flow_ops;
return 0;
}
static int
@ -1859,7 +1838,7 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_update = sfc_dev_rss_hash_update,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
.filter_ctrl = sfc_dev_filter_ctrl,
.flow_ops_get = sfc_dev_flow_ops_get,
.set_mc_addr_list = sfc_set_mc_addr_list,
.rxq_info_get = sfc_rx_queue_info_get,
.txq_info_get = sfc_tx_queue_info_get,

View File

@ -248,18 +248,11 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused,
}
static int
pmd_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
pmd_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
if (filter_type == RTE_ETH_FILTER_GENERIC &&
filter_op == RTE_ETH_FILTER_GET) {
*(const void **)arg = &pmd_flow_ops;
return 0;
}
return -ENOTSUP;
*ops = &pmd_flow_ops;
return 0;
}
static int
@ -287,7 +280,7 @@ static const struct eth_dev_ops pmd_ops = {
.dev_infos_get = pmd_dev_infos_get,
.rx_queue_setup = pmd_rx_queue_setup,
.tx_queue_setup = pmd_tx_queue_setup,
.filter_ctrl = pmd_filter_ctrl,
.flow_ops_get = pmd_flow_ops_get,
.tm_ops_get = pmd_tm_ops_get,
.mtr_ops_get = pmd_mtr_ops_get,
};

View File

@ -1890,7 +1890,7 @@ static const struct eth_dev_ops ops = {
.stats_reset = tap_stats_reset,
.dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
.rss_hash_update = tap_rss_hash_update,
.filter_ctrl = tap_dev_filter_ctrl,
.flow_ops_get = tap_dev_flow_ops_get,
};
static int

View File

@ -2160,35 +2160,20 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
}
/**
* Manage filter operations.
* Get rte_flow operations.
*
* @param dev
* Pointer to Ethernet device structure.
* @param filter_type
* Filter type.
* @param filter_op
* Operation to perform.
* @param arg
* @param ops
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
tap_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
tap_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
const struct rte_flow_ops **ops)
{
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &tap_flow_ops;
return 0;
default:
TAP_LOG(ERR, "%p: filter type (%d) not supported",
dev, filter_type);
}
return -EINVAL;
*ops = &tap_flow_ops;
return 0;
}

View File

@ -46,10 +46,8 @@ enum bpf_fd_idx {
SEC_MAX,
};
int tap_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
int tap_dev_flow_ops_get(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
int tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
int tap_flow_implicit_create(struct pmd_internals *pmd,

View File

@ -4080,27 +4080,11 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
static int
txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg)
txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_ops **ops)
{
int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
*(const void **)arg = &txgbe_flow_ops;
break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
ret = -EINVAL;
break;
}
return ret;
*ops = &txgbe_flow_ops;
return 0;
}
static u8 *
@ -5210,7 +5194,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.reta_query = txgbe_dev_rss_reta_query,
.rss_hash_update = txgbe_dev_rss_hash_update,
.rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
.filter_ctrl = txgbe_dev_filter_ctrl,
.flow_ops_get = txgbe_dev_flow_ops_get,
.set_mc_addr_list = txgbe_dev_set_mc_addr_list,
.rxq_info_get = txgbe_rxq_info_get,
.txq_info_get = txgbe_txq_info_get,

View File

@ -465,34 +465,16 @@ typedef int (*eth_get_module_eeprom_t)(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *info);
/**< @internal Retrieve plugin module eeprom data */
struct rte_flow_ops;
/**
* Feature filter types
* @internal
* Get flow operations.
*
* If the flow API is not supported for the specified device,
* the driver can return NULL.
*/
enum rte_filter_type {
RTE_ETH_FILTER_NONE = 0,
RTE_ETH_FILTER_ETHERTYPE,
RTE_ETH_FILTER_FLEXIBLE,
RTE_ETH_FILTER_SYN,
RTE_ETH_FILTER_NTUPLE,
RTE_ETH_FILTER_TUNNEL,
RTE_ETH_FILTER_FDIR,
RTE_ETH_FILTER_HASH,
RTE_ETH_FILTER_L2_TUNNEL,
RTE_ETH_FILTER_GENERIC,
};
/**
* Generic operations on filters
*/
enum rte_filter_op {
RTE_ETH_FILTER_GET, /**< get flow API ops */
};
typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
/**< @internal Take operations to assigned filter type on an Ethernet device */
typedef int (*eth_flow_ops_get_t)(struct rte_eth_dev *dev,
const struct rte_flow_ops **ops);
typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
@ -904,7 +886,7 @@ struct eth_dev_ops {
eth_get_module_eeprom_t get_module_eeprom;
/** Get plugin module eeprom data. */
eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
eth_flow_ops_get_t flow_ops_get; /**< Get flow operations. */
eth_get_dcb_info get_dcb_info; /** Get DCB information. */
@ -1445,6 +1427,18 @@ rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
* Legacy ethdev API used internally by drivers.
*/
enum rte_filter_type {
RTE_ETH_FILTER_NONE = 0,
RTE_ETH_FILTER_ETHERTYPE,
RTE_ETH_FILTER_FLEXIBLE,
RTE_ETH_FILTER_SYN,
RTE_ETH_FILTER_NTUPLE,
RTE_ETH_FILTER_TUNNEL,
RTE_ETH_FILTER_FDIR,
RTE_ETH_FILTER_HASH,
RTE_ETH_FILTER_L2_TUNNEL,
};
/**
* Define all structures for Ethertype Filter type.
*/

View File

@ -339,7 +339,7 @@ struct rte_eth_fdir_action {
};
/**
* A structure used to define the flow director filter entry by filter_ctrl API.
* A structure used to define the flow director filter entry.
*/
struct rte_eth_fdir_filter {
uint32_t soft_id;

View File

@ -255,18 +255,21 @@ rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
code = ENODEV;
else if (unlikely(!dev->dev_ops->filter_ctrl ||
dev->dev_ops->filter_ctrl(dev,
RTE_ETH_FILTER_GENERIC,
RTE_ETH_FILTER_GET,
&ops) ||
!ops))
else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
/* flow API not supported with this driver dev_ops */
code = ENOSYS;
else
return ops;
rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(code));
return NULL;
code = dev->dev_ops->flow_ops_get(dev, &ops);
if (code == 0 && ops == NULL)
/* flow API not supported with this device */
code = ENOSYS;
if (code != 0) {
rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, rte_strerror(code));
return NULL;
}
return ops;
}
/* Check whether a flow rule can be created on a given port. */

View File

@ -28,31 +28,6 @@ extern "C" {
/**
* Generic flow operations structure implemented and returned by PMDs.
*
* To implement this API, PMDs must handle the RTE_ETH_FILTER_GENERIC filter
* type in their .filter_ctrl callback function (struct eth_dev_ops) as well
* as the RTE_ETH_FILTER_GET filter operation.
*
* If successful, this operation must result in a pointer to a PMD-specific
* struct rte_flow_ops written to the argument address as described below:
*
* \code
*
* // PMD filter_ctrl callback
*
* static const struct rte_flow_ops pmd_flow_ops = { ... };
*
* switch (filter_type) {
* case RTE_ETH_FILTER_GENERIC:
* if (filter_op != RTE_ETH_FILTER_GET)
* return -EINVAL;
* *(const void **)arg = &pmd_flow_ops;
* return 0;
* }
*
* \endcode
*
* See also rte_flow_ops_get().
*
* These callback functions are not supposed to be used by applications
* directly, which must rely on the API defined in rte_flow.h.
*