net/mlx5: remove flow director support
Generic flow API should be use for flow steering as is provides a better and easier way to configure flows. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
parent
33d5cea35e
commit
34bb7d073f
@ -23,7 +23,6 @@ RSS key update = Y
|
||||
RSS reta update = Y
|
||||
SR-IOV = Y
|
||||
VLAN filter = Y
|
||||
Flow director = Y
|
||||
Flow API = Y
|
||||
CRC offload = Y
|
||||
VLAN offload = Y
|
||||
|
@ -89,8 +89,6 @@ Features
|
||||
- Promiscuous mode.
|
||||
- Multicast promiscuous mode.
|
||||
- Hardware checksum offloads.
|
||||
- Flow director (RTE_FDIR_MODE_PERFECT, RTE_FDIR_MODE_PERFECT_MAC_VLAN and
|
||||
RTE_ETH_FDIR_REJECT).
|
||||
- Flow API.
|
||||
- Multiple process.
|
||||
- KVM and VMware ESX SR-IOV modes are supported.
|
||||
|
@ -49,7 +49,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxmode.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_vlan.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
|
||||
|
@ -201,10 +201,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
|
||||
priv_mac_addrs_disable(priv);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
|
||||
/* Remove flow director elements. */
|
||||
priv_fdir_disable(priv);
|
||||
priv_fdir_delete_filters_list(priv);
|
||||
|
||||
/* Prevent crashes when queues are still in use. */
|
||||
dev->rx_pkt_burst = removed_rx_burst;
|
||||
dev->tx_pkt_burst = removed_tx_burst;
|
||||
@ -844,10 +840,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
claim_zero(priv_mac_addr_add(priv, 0,
|
||||
(const uint8_t (*)[ETHER_ADDR_LEN])
|
||||
mac.addr_bytes));
|
||||
/* Initialize FD filters list. */
|
||||
err = fdir_init_filters_list(priv);
|
||||
if (err)
|
||||
goto port_error;
|
||||
#ifndef NDEBUG
|
||||
{
|
||||
char ifname[IF_NAMESIZE];
|
||||
|
@ -145,8 +145,6 @@ struct priv {
|
||||
struct rte_intr_handle intr_handle; /* Interrupt handler. */
|
||||
unsigned int (*reta_idx)[]; /* RETA index table. */
|
||||
unsigned int reta_idx_n; /* RETA index size. */
|
||||
struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
|
||||
struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
|
||||
struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
|
||||
TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
|
||||
uint32_t link_speed_capa; /* Link speed capabilities. */
|
||||
@ -273,18 +271,10 @@ void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int);
|
||||
int mlx5_dev_start(struct rte_eth_dev *);
|
||||
void mlx5_dev_stop(struct rte_eth_dev *);
|
||||
|
||||
/* mlx5_fdir.c */
|
||||
|
||||
void priv_fdir_queue_destroy(struct priv *, struct fdir_queue *);
|
||||
int fdir_init_filters_list(struct priv *);
|
||||
void priv_fdir_delete_filters_list(struct priv *);
|
||||
void priv_fdir_disable(struct priv *);
|
||||
void priv_fdir_enable(struct priv *);
|
||||
int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
|
||||
enum rte_filter_op, void *);
|
||||
|
||||
/* mlx5_flow.c */
|
||||
|
||||
int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
|
||||
enum rte_filter_op, void *);
|
||||
int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
|
||||
const struct rte_flow_item [],
|
||||
const struct rte_flow_action [],
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -298,6 +298,49 @@ struct rte_flow_drop {
|
||||
struct ibv_cq *cq; /**< Verbs completion queue. */
|
||||
};
|
||||
|
||||
static const struct rte_flow_ops mlx5_flow_ops = {
|
||||
.validate = mlx5_flow_validate,
|
||||
.create = mlx5_flow_create,
|
||||
.destroy = mlx5_flow_destroy,
|
||||
.flush = mlx5_flow_flush,
|
||||
.query = NULL,
|
||||
.isolate = mlx5_flow_isolate,
|
||||
};
|
||||
|
||||
/**
|
||||
* Manage filter operations.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param filter_type
|
||||
* Filter type.
|
||||
* @param filter_op
|
||||
* Operation to perform.
|
||||
* @param arg
|
||||
* Pointer to operation-specific structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
|
||||
enum rte_filter_type filter_type,
|
||||
enum rte_filter_op filter_op,
|
||||
void *arg)
|
||||
{
|
||||
int ret = EINVAL;
|
||||
|
||||
if (filter_type == RTE_ETH_FILTER_GENERIC) {
|
||||
if (filter_op != RTE_ETH_FILTER_GET)
|
||||
return -EINVAL;
|
||||
*(const void **)arg = &mlx5_flow_ops;
|
||||
return 0;
|
||||
}
|
||||
ERROR("%p: filter type (%d) not supported",
|
||||
(void *)dev, filter_type);
|
||||
return -ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check support for a given item.
|
||||
*
|
||||
|
@ -760,8 +760,6 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
|
||||
{
|
||||
DEBUG("cleaning up %p", (void *)rxq_ctrl);
|
||||
rxq_free_elts(rxq_ctrl);
|
||||
if (rxq_ctrl->fdir_queue != NULL)
|
||||
priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue);
|
||||
if (rxq_ctrl->wq != NULL)
|
||||
claim_zero(ibv_destroy_wq(rxq_ctrl->wq));
|
||||
if (rxq_ctrl->cq != NULL)
|
||||
|
@ -78,14 +78,6 @@ struct mlx5_txq_stats {
|
||||
uint64_t oerrors; /**< Total number of failed transmitted packets. */
|
||||
};
|
||||
|
||||
/* Flow director queue structure. */
|
||||
struct fdir_queue {
|
||||
struct ibv_qp *qp; /* Associated RX QP. */
|
||||
struct ibv_rwq_ind_table *ind_table; /* Indirection table. */
|
||||
struct ibv_wq *wq; /* Work queue. */
|
||||
struct ibv_cq *cq; /* Completion queue. */
|
||||
};
|
||||
|
||||
struct priv;
|
||||
|
||||
/* Compressed CQE context. */
|
||||
@ -134,7 +126,6 @@ struct rxq_ctrl {
|
||||
struct priv *priv; /* Back pointer to private data. */
|
||||
struct ibv_cq *cq; /* Completion Queue. */
|
||||
struct ibv_wq *wq; /* Work Queue. */
|
||||
struct fdir_queue *fdir_queue; /* Flow director queue. */
|
||||
struct ibv_mr *mr; /* Memory Region (for mp). */
|
||||
struct ibv_comp_channel *channel;
|
||||
unsigned int socket; /* CPU socket ID for allocations. */
|
||||
|
@ -80,8 +80,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
(void *)priv, strerror(err));
|
||||
goto error;
|
||||
}
|
||||
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
|
||||
priv_fdir_enable(priv);
|
||||
err = priv_flow_start(priv);
|
||||
if (err) {
|
||||
priv->started = 0;
|
||||
@ -135,7 +133,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
priv_special_flow_disable_all(priv);
|
||||
priv_mac_addrs_disable(priv);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
priv_fdir_disable(priv);
|
||||
priv_flow_stop(priv);
|
||||
priv_rx_intr_vec_disable(priv);
|
||||
priv_dev_interrupt_handler_uninstall(priv, dev);
|
||||
|
Loading…
Reference in New Issue
Block a user