net/mlx5: refactor TC-flow infrastructure

This commit refactors tc_flow as a preparation to coming commits
that sends different type of messages and expect differ type of replies
while still using the same underlying routines.

Signed-off-by: Moti Haimovsky <motih@mellanox.com>
This commit is contained in:
Moti Haimovsky 2018-10-18 21:29:20 +03:00 committed by Ferruh Yigit
parent c18feafa19
commit d53180afe3
4 changed files with 99 additions and 39 deletions

View File

@ -286,8 +286,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
close(priv->nl_socket_route); close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0) if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma); close(priv->nl_socket_rdma);
if (priv->mnl_socket) if (priv->tcf_context)
mlx5_flow_tcf_socket_destroy(priv->mnl_socket); mlx5_flow_tcf_context_destroy(priv->tcf_context);
ret = mlx5_hrxq_ibv_verify(dev); ret = mlx5_hrxq_ibv_verify(dev);
if (ret) if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain", DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@ -1138,8 +1138,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
if (vf && config.vf_nl_en) if (vf && config.vf_nl_en)
mlx5_nl_mac_addr_sync(eth_dev); mlx5_nl_mac_addr_sync(eth_dev);
priv->mnl_socket = mlx5_flow_tcf_socket_create(); priv->tcf_context = mlx5_flow_tcf_context_create();
if (!priv->mnl_socket) { if (!priv->tcf_context) {
err = -rte_errno; err = -rte_errno;
DRV_LOG(WARNING, DRV_LOG(WARNING,
"flow rules relying on switch offloads will not be" "flow rules relying on switch offloads will not be"
@ -1154,16 +1154,16 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
error.message = error.message =
"cannot retrieve network interface index"; "cannot retrieve network interface index";
} else { } else {
err = mlx5_flow_tcf_init(priv->mnl_socket, ifindex, err = mlx5_flow_tcf_init(priv->tcf_context,
&error); ifindex, &error);
} }
if (err) { if (err) {
DRV_LOG(WARNING, DRV_LOG(WARNING,
"flow rules relying on switch offloads will" "flow rules relying on switch offloads will"
" not be supported: %s: %s", " not be supported: %s: %s",
error.message, strerror(rte_errno)); error.message, strerror(rte_errno));
mlx5_flow_tcf_socket_destroy(priv->mnl_socket); mlx5_flow_tcf_context_destroy(priv->tcf_context);
priv->mnl_socket = NULL; priv->tcf_context = NULL;
} }
} }
TAILQ_INIT(&priv->flows); TAILQ_INIT(&priv->flows);
@ -1218,8 +1218,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
close(priv->nl_socket_route); close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0) if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma); close(priv->nl_socket_rdma);
if (priv->mnl_socket) if (priv->tcf_context)
mlx5_flow_tcf_socket_destroy(priv->mnl_socket); mlx5_flow_tcf_context_destroy(priv->tcf_context);
if (own_domain_id) if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id)); claim_zero(rte_eth_switch_domain_free(priv->domain_id));
rte_free(priv); rte_free(priv);

View File

@ -169,7 +169,7 @@ struct mlx5_drop {
struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */ struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
}; };
struct mnl_socket; struct mlx5_flow_tcf_context;
struct priv { struct priv {
LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */ LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
@ -236,7 +236,7 @@ struct priv {
rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
/* UAR same-page access control required in 32bit implementations. */ /* UAR same-page access control required in 32bit implementations. */
#endif #endif
struct mnl_socket *mnl_socket; /* Libmnl socket. */ struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
}; };
#define PORT_ID(priv) ((priv)->dev_data->port_id) #define PORT_ID(priv) ((priv)->dev_data->port_id)

View File

@ -243,7 +243,6 @@ struct rte_flow {
struct rte_flow_action_rss rss;/**< RSS context. */ struct rte_flow_action_rss rss;/**< RSS context. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
void *nl_flow; /**< Netlink flow buffer if relevant. */
LIST_HEAD(dev_flows, mlx5_flow) dev_flows; LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
/**< Device flows that are part of the flow. */ /**< Device flows that are part of the flow. */
uint32_t actions; /**< Bit-fields which mark all detected actions. */ uint32_t actions; /**< Bit-fields which mark all detected actions. */
@ -350,9 +349,9 @@ int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
/* mlx5_flow_tcf.c */ /* mlx5_flow_tcf.c */
int mlx5_flow_tcf_init(struct mnl_socket *nl, unsigned int ifindex, int mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
struct rte_flow_error *error); unsigned int ifindex, struct rte_flow_error *error);
struct mnl_socket *mlx5_flow_tcf_socket_create(void); struct mlx5_flow_tcf_context *mlx5_flow_tcf_context_create(void);
void mlx5_flow_tcf_socket_destroy(struct mnl_socket *nl); void mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx);
#endif /* RTE_PMD_MLX5_FLOW_H_ */ #endif /* RTE_PMD_MLX5_FLOW_H_ */

View File

@ -235,6 +235,19 @@ struct tc_pedit_sel {
#define TTL_LEN 1 #define TTL_LEN 1
#endif #endif
/**
* Structure for holding netlink context.
* Note the size of the message buffer which is MNL_SOCKET_BUFFER_SIZE.
* Using this (8KB) buffer size ensures that netlink messages will never be
* truncated.
*/
struct mlx5_flow_tcf_context {
struct mnl_socket *nl; /* NETLINK_ROUTE libmnl socket. */
uint32_t seq; /* Message sequence number. */
uint32_t buf_size; /* Message buffer size. */
uint8_t *buf; /* Message buffer. */
};
/** Empty masks for known item types. */ /** Empty masks for known item types. */
static const union { static const union {
struct rte_flow_item_port_id port_id; struct rte_flow_item_port_id port_id;
@ -2153,7 +2166,7 @@ flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error) struct rte_flow_error *error)
{ {
struct priv *priv = dev->data->dev_private; struct priv *priv = dev->data->dev_private;
struct mnl_socket *nl = priv->mnl_socket; struct mnl_socket *nl = priv->tcf_context->nl;
struct mlx5_flow *dev_flow; struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
@ -2182,7 +2195,7 @@ static void
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow) flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{ {
struct priv *priv = dev->data->dev_private; struct priv *priv = dev->data->dev_private;
struct mnl_socket *nl = priv->mnl_socket; struct mnl_socket *nl = priv->tcf_context->nl;
struct mlx5_flow *dev_flow; struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
@ -2234,10 +2247,47 @@ const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops = {
}; };
/** /**
* Initialize ingress qdisc of a given network interface. * Create and configure a libmnl socket for Netlink flow rules.
*
* @return
* A valid libmnl socket object pointer on success, NULL otherwise and
* rte_errno is set.
*/
static struct mnl_socket *
flow_tcf_mnl_socket_create(void)
{
struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
if (nl) {
mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
sizeof(int));
if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
return nl;
}
rte_errno = errno;
if (nl)
mnl_socket_close(nl);
return NULL;
}
/**
* Destroy a libmnl socket.
* *
* @param nl * @param nl
* Libmnl socket of the @p NETLINK_ROUTE kind. * Libmnl socket of the @p NETLINK_ROUTE kind.
*/
static void
flow_tcf_mnl_socket_destroy(struct mnl_socket *nl)
{
if (nl)
mnl_socket_close(nl);
}
/**
* Initialize ingress qdisc of a given network interface.
*
* @param ctx
* Pointer to tc-flower context to use.
* @param ifindex * @param ifindex
* Index of network interface to initialize. * Index of network interface to initialize.
* @param[out] error * @param[out] error
@ -2247,11 +2297,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops = {
* 0 on success, a negative errno value otherwise and rte_errno is set. * 0 on success, a negative errno value otherwise and rte_errno is set.
*/ */
int int
mlx5_flow_tcf_init(struct mnl_socket *nl, unsigned int ifindex, mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
struct rte_flow_error *error) unsigned int ifindex, struct rte_flow_error *error)
{ {
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct tcmsg *tcm; struct tcmsg *tcm;
struct mnl_socket *nl = ctx->nl;
alignas(struct nlmsghdr) alignas(struct nlmsghdr)
uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)]; uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)];
@ -2290,37 +2341,47 @@ mlx5_flow_tcf_init(struct mnl_socket *nl, unsigned int ifindex,
} }
/** /**
* Create and configure a libmnl socket for Netlink flow rules. * Create libmnl context for Netlink flow rules.
* *
* @return * @return
* A valid libmnl socket object pointer on success, NULL otherwise and * A valid libmnl socket object pointer on success, NULL otherwise and
* rte_errno is set. * rte_errno is set.
*/ */
struct mnl_socket * struct mlx5_flow_tcf_context *
mlx5_flow_tcf_socket_create(void) mlx5_flow_tcf_context_create(void)
{ {
struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE); struct mlx5_flow_tcf_context *ctx = rte_zmalloc(__func__,
sizeof(*ctx),
if (nl) { sizeof(uint32_t));
mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 }, if (!ctx)
sizeof(int)); goto error;
if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID)) ctx->nl = flow_tcf_mnl_socket_create();
return nl; if (!ctx->nl)
} goto error;
rte_errno = errno; ctx->buf_size = MNL_SOCKET_BUFFER_SIZE;
if (nl) ctx->buf = rte_zmalloc(__func__,
mnl_socket_close(nl); ctx->buf_size, sizeof(uint32_t));
if (!ctx->buf)
goto error;
ctx->seq = random();
return ctx;
error:
mlx5_flow_tcf_context_destroy(ctx);
return NULL; return NULL;
} }
/** /**
* Destroy a libmnl socket. * Destroy a libmnl context.
* *
* @param nl * @param nl
* Libmnl socket of the @p NETLINK_ROUTE kind. * Libmnl socket of the @p NETLINK_ROUTE kind.
*/ */
void void
mlx5_flow_tcf_socket_destroy(struct mnl_socket *nl) mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx)
{ {
mnl_socket_close(nl); if (!ctx)
return;
flow_tcf_mnl_socket_destroy(ctx->nl);
rte_free(ctx->buf);
rte_free(ctx);
} }