net/mlx5: refactor TC-flow infrastructure

This commit refactors tc_flow as a preparation to coming commits
that sends different type of messages and expect differ type of replies
while still using the same underlying routines.

Signed-off-by: Moti Haimovsky <motih@mellanox.com>
This commit is contained in:
Moti Haimovsky 2018-10-18 21:29:20 +03:00 committed by Ferruh Yigit
parent c18feafa19
commit d53180afe3
4 changed files with 99 additions and 39 deletions

View File

@ -286,8 +286,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma);
if (priv->mnl_socket)
mlx5_flow_tcf_socket_destroy(priv->mnl_socket);
if (priv->tcf_context)
mlx5_flow_tcf_context_destroy(priv->tcf_context);
ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@ -1138,8 +1138,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
if (vf && config.vf_nl_en)
mlx5_nl_mac_addr_sync(eth_dev);
priv->mnl_socket = mlx5_flow_tcf_socket_create();
if (!priv->mnl_socket) {
priv->tcf_context = mlx5_flow_tcf_context_create();
if (!priv->tcf_context) {
err = -rte_errno;
DRV_LOG(WARNING,
"flow rules relying on switch offloads will not be"
@ -1154,16 +1154,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
error.message =
"cannot retrieve network interface index";
} else {
err = mlx5_flow_tcf_init(priv->mnl_socket, ifindex,
&error);
err = mlx5_flow_tcf_init(priv->tcf_context,
ifindex, &error);
}
if (err) {
DRV_LOG(WARNING,
"flow rules relying on switch offloads will"
" not be supported: %s: %s",
error.message, strerror(rte_errno));
mlx5_flow_tcf_socket_destroy(priv->mnl_socket);
priv->mnl_socket = NULL;
mlx5_flow_tcf_context_destroy(priv->tcf_context);
priv->tcf_context = NULL;
}
}
TAILQ_INIT(&priv->flows);
@ -1218,8 +1218,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma);
if (priv->mnl_socket)
mlx5_flow_tcf_socket_destroy(priv->mnl_socket);
if (priv->tcf_context)
mlx5_flow_tcf_context_destroy(priv->tcf_context);
if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
rte_free(priv);

View File

@ -169,7 +169,7 @@ struct mlx5_drop {
struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
};
struct mnl_socket;
struct mlx5_flow_tcf_context;
struct priv {
LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
@ -236,7 +236,7 @@ struct priv {
rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
/* UAR same-page access control required in 32bit implementations. */
#endif
struct mnl_socket *mnl_socket; /* Libmnl socket. */
struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)

View File

@ -243,7 +243,6 @@ struct rte_flow {
struct rte_flow_action_rss rss;/**< RSS context. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
void *nl_flow; /**< Netlink flow buffer if relevant. */
LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
/**< Device flows that are part of the flow. */
uint32_t actions; /**< Bit-fields which mark all detected actions. */
@ -350,9 +349,9 @@ int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
/* mlx5_flow_tcf.c */
int mlx5_flow_tcf_init(struct mnl_socket *nl, unsigned int ifindex,
struct rte_flow_error *error);
struct mnl_socket *mlx5_flow_tcf_socket_create(void);
void mlx5_flow_tcf_socket_destroy(struct mnl_socket *nl);
int mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
unsigned int ifindex, struct rte_flow_error *error);
struct mlx5_flow_tcf_context *mlx5_flow_tcf_context_create(void);
void mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx);
#endif /* RTE_PMD_MLX5_FLOW_H_ */

View File

@ -235,6 +235,19 @@ struct tc_pedit_sel {
#define TTL_LEN 1
#endif
/**
* Structure for holding netlink context.
* Note the size of the message buffer which is MNL_SOCKET_BUFFER_SIZE.
* Using this (8KB) buffer size ensures that netlink messages will never be
* truncated.
*/
struct mlx5_flow_tcf_context {
struct mnl_socket *nl; /* NETLINK_ROUTE libmnl socket. */
uint32_t seq; /* Message sequence number. */
uint32_t buf_size; /* Message buffer size. */
uint8_t *buf; /* Message buffer. */
};
/** Empty masks for known item types. */
static const union {
struct rte_flow_item_port_id port_id;
@ -2153,7 +2166,7 @@ flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
struct mnl_socket *nl = priv->mnl_socket;
struct mnl_socket *nl = priv->tcf_context->nl;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@ -2182,7 +2195,7 @@ static void
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct priv *priv = dev->data->dev_private;
struct mnl_socket *nl = priv->mnl_socket;
struct mnl_socket *nl = priv->tcf_context->nl;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@ -2234,10 +2247,47 @@ const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops = {
};
/**
* Initialize ingress qdisc of a given network interface.
* Create and configure a libmnl socket for Netlink flow rules.
*
* @return
* A valid libmnl socket object pointer on success, NULL otherwise and
* rte_errno is set.
*/
static struct mnl_socket *
flow_tcf_mnl_socket_create(void)
{
struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
if (nl) {
mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
sizeof(int));
if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
return nl;
}
rte_errno = errno;
if (nl)
mnl_socket_close(nl);
return NULL;
}
/**
* Destroy a libmnl socket.
*
* @param nl
* Libmnl socket of the @p NETLINK_ROUTE kind.
*/
static void
flow_tcf_mnl_socket_destroy(struct mnl_socket *nl)
{
if (nl)
mnl_socket_close(nl);
}
/**
* Initialize ingress qdisc of a given network interface.
*
* @param ctx
* Pointer to tc-flower context to use.
* @param ifindex
* Index of network interface to initialize.
* @param[out] error
@ -2247,11 +2297,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops = {
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_tcf_init(struct mnl_socket *nl, unsigned int ifindex,
struct rte_flow_error *error)
mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
unsigned int ifindex, struct rte_flow_error *error)
{
struct nlmsghdr *nlh;
struct tcmsg *tcm;
struct mnl_socket *nl = ctx->nl;
alignas(struct nlmsghdr)
uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)];
@ -2290,37 +2341,47 @@ mlx5_flow_tcf_init(struct mnl_socket *nl, unsigned int ifindex,
}
/**
* Create and configure a libmnl socket for Netlink flow rules.
* Create libmnl context for Netlink flow rules.
*
* @return
* A valid libmnl socket object pointer on success, NULL otherwise and
* rte_errno is set.
*/
struct mnl_socket *
mlx5_flow_tcf_socket_create(void)
struct mlx5_flow_tcf_context *
mlx5_flow_tcf_context_create(void)
{
struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
if (nl) {
mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
sizeof(int));
if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
return nl;
}
rte_errno = errno;
if (nl)
mnl_socket_close(nl);
struct mlx5_flow_tcf_context *ctx = rte_zmalloc(__func__,
sizeof(*ctx),
sizeof(uint32_t));
if (!ctx)
goto error;
ctx->nl = flow_tcf_mnl_socket_create();
if (!ctx->nl)
goto error;
ctx->buf_size = MNL_SOCKET_BUFFER_SIZE;
ctx->buf = rte_zmalloc(__func__,
ctx->buf_size, sizeof(uint32_t));
if (!ctx->buf)
goto error;
ctx->seq = random();
return ctx;
error:
mlx5_flow_tcf_context_destroy(ctx);
return NULL;
}
/**
* Destroy a libmnl socket.
* Destroy a libmnl context.
*
* @param nl
* Libmnl socket of the @p NETLINK_ROUTE kind.
*/
void
mlx5_flow_tcf_socket_destroy(struct mnl_socket *nl)
mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx)
{
mnl_socket_close(nl);
if (!ctx)
return;
flow_tcf_mnl_socket_destroy(ctx->nl);
rte_free(ctx->buf);
rte_free(ctx);
}