common/mlx5: share interrupt management

There are many duplicate code of creating and initializing rte_intr_handle.
Add a new mlx5_os API to do this, replace all PMD related code with this
API.

Signed-off-by: Spike Du <spiked@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Spike Du 2022-06-16 11:41:50 +03:00 committed by Raslan Darawsheh
parent 7158e46cb9
commit 72d7efe464
10 changed files with 210 additions and 266 deletions

View File

@ -11,6 +11,7 @@
#endif
#include <dirent.h>
#include <net/if.h>
#include <fcntl.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
@ -964,3 +965,133 @@ mlx5_os_wrapped_mkey_destroy(struct mlx5_pmd_wrapped_mr *pmd_mr)
claim_zero(mlx5_glue->dereg_mr(pmd_mr->obj));
memset(pmd_mr, 0, sizeof(*pmd_mr));
}
/**
* Rte_intr_handle create and init helper.
*
* @param[in] mode
* interrupt instance can be shared between primary and secondary
* processes or not.
* @param[in] set_fd_nonblock
* Whether to set fd to O_NONBLOCK.
* @param[in] fd
* Fd to set in created intr_handle.
* @param[in] cb
* Callback to register for intr_handle.
* @param[in] cb_arg
* Callback argument for cb.
*
* @return
* - Interrupt handle on success.
* - NULL on failure, with rte_errno set.
*/
struct rte_intr_handle *
mlx5_os_interrupt_handler_create(int mode, bool set_fd_nonblock, int fd,
rte_intr_callback_fn cb, void *cb_arg)
{
struct rte_intr_handle *tmp_intr_handle;
int ret, flags;
tmp_intr_handle = rte_intr_instance_alloc(mode);
if (!tmp_intr_handle) {
rte_errno = ENOMEM;
goto err;
}
if (set_fd_nonblock) {
flags = fcntl(fd, F_GETFL);
ret = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
rte_errno = errno;
goto err;
}
}
ret = rte_intr_fd_set(tmp_intr_handle, fd);
if (ret)
goto err;
ret = rte_intr_type_set(tmp_intr_handle, RTE_INTR_HANDLE_EXT);
if (ret)
goto err;
ret = rte_intr_callback_register(tmp_intr_handle, cb, cb_arg);
if (ret) {
rte_errno = -ret;
goto err;
}
return tmp_intr_handle;
err:
if (tmp_intr_handle)
rte_intr_instance_free(tmp_intr_handle);
return NULL;
}
/* Safe unregistration for interrupt callback. */
static void
mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
rte_intr_callback_fn cb_fn, void *cb_arg)
{
uint64_t twait = 0;
uint64_t start = 0;
do {
int ret;
ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg);
if (ret >= 0)
return;
if (ret != -EAGAIN) {
DRV_LOG(INFO, "failed to unregister interrupt"
" handler (error: %d)", ret);
MLX5_ASSERT(false);
return;
}
if (twait) {
struct timespec onems;
/* Wait one millisecond and try again. */
onems.tv_sec = 0;
onems.tv_nsec = NS_PER_S / MS_PER_S;
nanosleep(&onems, 0);
/* Check whether one second elapsed. */
if ((rte_get_timer_cycles() - start) <= twait)
continue;
} else {
/*
* We get the amount of timer ticks for one second.
* If this amount elapsed it means we spent one
* second in waiting. This branch is executed once
* on first iteration.
*/
twait = rte_get_timer_hz();
MLX5_ASSERT(twait);
}
/*
* Timeout elapsed, show message (once a second) and retry.
* We have no other acceptable option here, if we ignore
* the unregistering return code the handler will not
* be unregistered, fd will be closed and we may get the
* crush. Hanging and messaging in the loop seems not to be
* the worst choice.
*/
DRV_LOG(INFO, "Retrying to unregister interrupt handler");
start = rte_get_timer_cycles();
} while (true);
}
/**
* Rte_intr_handle destroy helper.
*
* @param[in] intr_handle
* Rte_intr_handle to destroy.
* @param[in] cb
* Callback which is registered to intr_handle.
* @param[in] cb_arg
* Callback argument for cb.
*
*/
void
mlx5_os_interrupt_handler_destroy(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg)
{
if (rte_intr_fd_get(intr_handle) >= 0)
mlx5_intr_callback_unregister(intr_handle, cb, cb_arg);
rte_intr_instance_free(intr_handle);
}

View File

@ -15,6 +15,7 @@
#include <rte_log.h>
#include <rte_kvargs.h>
#include <rte_devargs.h>
#include <rte_interrupts.h>
#include "mlx5_autoconf.h"
#include "mlx5_glue.h"
@ -299,4 +300,14 @@ __rte_internal
int
mlx5_get_device_guid(const struct rte_pci_addr *dev, uint8_t *guid, size_t len);
__rte_internal
struct rte_intr_handle *
mlx5_os_interrupt_handler_create(int mode, bool set_fd_nonblock, int fd,
rte_intr_callback_fn cb, void *cb_arg);
__rte_internal
void
mlx5_os_interrupt_handler_destroy(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg);
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */

View File

@ -153,5 +153,7 @@ INTERNAL {
mlx5_mr_mempool2mr_bh;
mlx5_mr_mempool_populate_cache;
mlx5_os_interrupt_handler_create; # WINDOWS_NO_EXPORT
mlx5_os_interrupt_handler_destroy; # WINDOWS_NO_EXPORT
local: *;
};

View File

@ -9,6 +9,7 @@
#include <sys/types.h>
#include <rte_errno.h>
#include <rte_interrupts.h>
#include "mlx5_autoconf.h"
#include "mlx5_glue.h"
@ -253,4 +254,27 @@ void *mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access);
__rte_internal
int mlx5_os_umem_dereg(void *pumem);
static inline struct rte_intr_handle *
mlx5_os_interrupt_handler_create(int mode, bool set_fd_nonblock, int fd,
rte_intr_callback_fn cb, void *cb_arg)
{
(void)mode;
(void)set_fd_nonblock;
(void)fd;
(void)cb;
(void)cb_arg;
rte_errno = ENOTSUP;
return NULL;
}
static inline void
mlx5_os_interrupt_handler_destroy(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg)
{
(void)intr_handle;
(void)cb;
(void)cb_arg;
}
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */

View File

@ -881,77 +881,6 @@ mlx5_dev_interrupt_handler(void *cb_arg)
}
}
/*
* Unregister callback handler safely. The handler may be active
* while we are trying to unregister it, in this case code -EAGAIN
* is returned by rte_intr_callback_unregister(). This routine checks
* the return code and tries to unregister handler again.
*
* @param handle
* interrupt handle
* @param cb_fn
* pointer to callback routine
* @cb_arg
* opaque callback parameter
*/
void
mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
rte_intr_callback_fn cb_fn, void *cb_arg)
{
/*
* Try to reduce timeout management overhead by not calling
* the timer related routines on the first iteration. If the
* unregistering succeeds on first call there will be no
* timer calls at all.
*/
uint64_t twait = 0;
uint64_t start = 0;
do {
int ret;
ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg);
if (ret >= 0)
return;
if (ret != -EAGAIN) {
DRV_LOG(INFO, "failed to unregister interrupt"
" handler (error: %d)", ret);
MLX5_ASSERT(false);
return;
}
if (twait) {
struct timespec onems;
/* Wait one millisecond and try again. */
onems.tv_sec = 0;
onems.tv_nsec = NS_PER_S / MS_PER_S;
nanosleep(&onems, 0);
/* Check whether one second elapsed. */
if ((rte_get_timer_cycles() - start) <= twait)
continue;
} else {
/*
* We get the amount of timer ticks for one second.
* If this amount elapsed it means we spent one
* second in waiting. This branch is executed once
* on first iteration.
*/
twait = rte_get_timer_hz();
MLX5_ASSERT(twait);
}
/*
* Timeout elapsed, show message (once a second) and retry.
* We have no other acceptable option here, if we ignore
* the unregistering return code the handler will not
* be unregistered, fd will be closed and we may get the
* crush. Hanging and messaging in the loop seems not to be
* the worst choice.
*/
DRV_LOG(INFO, "Retrying to unregister interrupt handler");
start = rte_get_timer_cycles();
} while (true);
}
/**
* Handle DEVX interrupts from the NIC.
* This function is probably called from the DPDK host thread.

View File

@ -2494,40 +2494,6 @@ mlx5_os_net_cleanup(void)
mlx5_pmd_socket_uninit();
}
static int
mlx5_os_dev_shared_handler_install_lsc(struct mlx5_dev_ctx_shared *sh)
{
int nlsk_fd, flags, ret;
nlsk_fd = mlx5_nl_init(NETLINK_ROUTE, RTMGRP_LINK);
if (nlsk_fd < 0) {
DRV_LOG(ERR, "Failed to create a socket for Netlink events: %s",
rte_strerror(rte_errno));
return -1;
}
flags = fcntl(nlsk_fd, F_GETFL);
ret = fcntl(nlsk_fd, F_SETFL, flags | O_NONBLOCK);
if (ret != 0) {
DRV_LOG(ERR, "Failed to make Netlink event socket non-blocking: %s",
strerror(errno));
rte_errno = errno;
goto error;
}
rte_intr_type_set(sh->intr_handle_nl, RTE_INTR_HANDLE_EXT);
rte_intr_fd_set(sh->intr_handle_nl, nlsk_fd);
if (rte_intr_callback_register(sh->intr_handle_nl,
mlx5_dev_interrupt_handler_nl,
sh) != 0) {
DRV_LOG(ERR, "Failed to register Netlink events interrupt");
rte_intr_fd_set(sh->intr_handle_nl, -1);
goto error;
}
return 0;
error:
close(nlsk_fd);
return -1;
}
/**
* Install shared asynchronous device events handler.
* This function is implemented to support event sharing
@ -2539,76 +2505,47 @@ mlx5_os_dev_shared_handler_install_lsc(struct mlx5_dev_ctx_shared *sh)
void
mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
{
int ret;
int flags;
struct ibv_context *ctx = sh->cdev->ctx;
int nlsk_fd;
sh->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
if (sh->intr_handle == NULL) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
rte_errno = ENOMEM;
sh->intr_handle = mlx5_os_interrupt_handler_create
(RTE_INTR_INSTANCE_F_SHARED, true,
ctx->async_fd, mlx5_dev_interrupt_handler, sh);
if (!sh->intr_handle) {
DRV_LOG(ERR, "Failed to allocate intr_handle.");
return;
}
rte_intr_fd_set(sh->intr_handle, -1);
flags = fcntl(ctx->async_fd, F_GETFL);
ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
DRV_LOG(INFO, "failed to change file descriptor async event"
" queue");
} else {
rte_intr_fd_set(sh->intr_handle, ctx->async_fd);
rte_intr_type_set(sh->intr_handle, RTE_INTR_HANDLE_EXT);
if (rte_intr_callback_register(sh->intr_handle,
mlx5_dev_interrupt_handler, sh)) {
DRV_LOG(INFO, "Fail to install the shared interrupt.");
rte_intr_fd_set(sh->intr_handle, -1);
}
nlsk_fd = mlx5_nl_init(NETLINK_ROUTE, RTMGRP_LINK);
if (nlsk_fd < 0) {
DRV_LOG(ERR, "Failed to create a socket for Netlink events: %s",
rte_strerror(rte_errno));
return;
}
sh->intr_handle_nl = rte_intr_instance_alloc
(RTE_INTR_INSTANCE_F_SHARED);
sh->intr_handle_nl = mlx5_os_interrupt_handler_create
(RTE_INTR_INSTANCE_F_SHARED, true,
nlsk_fd, mlx5_dev_interrupt_handler_nl, sh);
if (sh->intr_handle_nl == NULL) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
rte_errno = ENOMEM;
return;
}
rte_intr_fd_set(sh->intr_handle_nl, -1);
if (mlx5_os_dev_shared_handler_install_lsc(sh) < 0) {
DRV_LOG(INFO, "Fail to install the shared Netlink event handler.");
rte_intr_fd_set(sh->intr_handle_nl, -1);
}
if (sh->cdev->config.devx) {
#ifdef HAVE_IBV_DEVX_ASYNC
sh->intr_handle_devx =
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
if (!sh->intr_handle_devx) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
rte_errno = ENOMEM;
return;
}
rte_intr_fd_set(sh->intr_handle_devx, -1);
struct mlx5dv_devx_cmd_comp *devx_comp;
sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
devx_comp = sh->devx_comp;
if (!devx_comp) {
DRV_LOG(INFO, "failed to allocate devx_comp.");
return;
}
flags = fcntl(devx_comp->fd, F_GETFL);
ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
DRV_LOG(INFO, "failed to change file descriptor"
" devx comp");
sh->intr_handle_devx = mlx5_os_interrupt_handler_create
(RTE_INTR_INSTANCE_F_SHARED, true,
devx_comp->fd,
mlx5_dev_interrupt_handler_devx, sh);
if (!sh->intr_handle_devx) {
DRV_LOG(ERR, "Failed to allocate intr_handle.");
return;
}
rte_intr_fd_set(sh->intr_handle_devx, devx_comp->fd);
rte_intr_type_set(sh->intr_handle_devx,
RTE_INTR_HANDLE_EXT);
if (rte_intr_callback_register(sh->intr_handle_devx,
mlx5_dev_interrupt_handler_devx, sh)) {
DRV_LOG(INFO, "Fail to install the devx shared"
" interrupt.");
rte_intr_fd_set(sh->intr_handle_devx, -1);
}
#endif /* HAVE_IBV_DEVX_ASYNC */
}
}
@ -2624,24 +2561,13 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
void
mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
{
int nlsk_fd;
if (rte_intr_fd_get(sh->intr_handle) >= 0)
mlx5_intr_callback_unregister(sh->intr_handle,
mlx5_dev_interrupt_handler, sh);
rte_intr_instance_free(sh->intr_handle);
nlsk_fd = rte_intr_fd_get(sh->intr_handle_nl);
if (nlsk_fd >= 0) {
mlx5_intr_callback_unregister
(sh->intr_handle_nl, mlx5_dev_interrupt_handler_nl, sh);
close(nlsk_fd);
}
rte_intr_instance_free(sh->intr_handle_nl);
mlx5_os_interrupt_handler_destroy(sh->intr_handle,
mlx5_dev_interrupt_handler, sh);
mlx5_os_interrupt_handler_destroy(sh->intr_handle_nl,
mlx5_dev_interrupt_handler_nl, sh);
#ifdef HAVE_IBV_DEVX_ASYNC
if (rte_intr_fd_get(sh->intr_handle_devx) >= 0)
rte_intr_callback_unregister(sh->intr_handle_devx,
mlx5_dev_interrupt_handler_devx, sh);
rte_intr_instance_free(sh->intr_handle_devx);
mlx5_os_interrupt_handler_destroy(sh->intr_handle_devx,
mlx5_dev_interrupt_handler_devx, sh);
if (sh->devx_comp)
mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
#endif

View File

@ -133,51 +133,6 @@ mlx5_pmd_socket_handle(void *cb __rte_unused)
fclose(file);
}
/**
* Install interrupt handler.
*
* @param dev
* Pointer to Ethernet device.
* @return
* 0 on success, a negative errno value otherwise.
*/
static int
mlx5_pmd_interrupt_handler_install(void)
{
MLX5_ASSERT(server_socket != -1);
server_intr_handle =
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
if (server_intr_handle == NULL) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
return -ENOMEM;
}
if (rte_intr_fd_set(server_intr_handle, server_socket))
return -rte_errno;
if (rte_intr_type_set(server_intr_handle, RTE_INTR_HANDLE_EXT))
return -rte_errno;
return rte_intr_callback_register(server_intr_handle,
mlx5_pmd_socket_handle, NULL);
}
/**
* Uninstall interrupt handler.
*/
static void
mlx5_pmd_interrupt_handler_uninstall(void)
{
if (server_socket != -1) {
mlx5_intr_callback_unregister(server_intr_handle,
mlx5_pmd_socket_handle,
NULL);
}
rte_intr_fd_set(server_intr_handle, 0);
rte_intr_type_set(server_intr_handle, RTE_INTR_HANDLE_UNKNOWN);
rte_intr_instance_free(server_intr_handle);
}
/**
* Initialise the socket to communicate with external tools.
*
@ -224,7 +179,10 @@ mlx5_pmd_socket_init(void)
strerror(errno));
goto remove;
}
if (mlx5_pmd_interrupt_handler_install()) {
server_intr_handle = mlx5_os_interrupt_handler_create
(RTE_INTR_INSTANCE_F_PRIVATE, false,
server_socket, mlx5_pmd_socket_handle, NULL);
if (server_intr_handle == NULL) {
DRV_LOG(WARNING, "cannot register interrupt handler for mlx5 socket: %s",
strerror(errno));
goto remove;
@ -248,7 +206,8 @@ mlx5_pmd_socket_uninit(void)
{
if (server_socket == -1)
return;
mlx5_pmd_interrupt_handler_uninstall();
mlx5_os_interrupt_handler_destroy(server_intr_handle,
mlx5_pmd_socket_handle, NULL);
claim_zero(close(server_socket));
server_socket = -1;
MKSTR(path, MLX5_SOCKET_PATH, getpid());

View File

@ -1682,8 +1682,6 @@ int mlx5_sysfs_switch_info(unsigned int ifindex,
struct mlx5_switch_info *info);
void mlx5_translate_port_name(const char *port_name_in,
struct mlx5_switch_info *port_info_out);
void mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
rte_intr_callback_fn cb_fn, void *cb_arg);
int mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex,
char *ifname);
int mlx5_get_module_info(struct rte_eth_dev *dev,

View File

@ -741,11 +741,8 @@ mlx5_txpp_interrupt_handler(void *cb_arg)
static void
mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)
{
if (!rte_intr_fd_get(sh->txpp.intr_handle))
return;
mlx5_intr_callback_unregister(sh->txpp.intr_handle,
mlx5_txpp_interrupt_handler, sh);
rte_intr_instance_free(sh->txpp.intr_handle);
mlx5_os_interrupt_handler_destroy(sh->txpp.intr_handle,
mlx5_txpp_interrupt_handler, sh);
}
/* Attach interrupt handler and fires first request to Rearm Queue. */
@ -769,23 +766,12 @@ mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
rte_errno = errno;
return -rte_errno;
}
sh->txpp.intr_handle =
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
if (sh->txpp.intr_handle == NULL) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
return -ENOMEM;
}
fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
if (rte_intr_fd_set(sh->txpp.intr_handle, fd))
return -rte_errno;
if (rte_intr_type_set(sh->txpp.intr_handle, RTE_INTR_HANDLE_EXT))
return -rte_errno;
if (rte_intr_callback_register(sh->txpp.intr_handle,
mlx5_txpp_interrupt_handler, sh)) {
rte_intr_fd_set(sh->txpp.intr_handle, 0);
DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
sh->txpp.intr_handle = mlx5_os_interrupt_handler_create
(RTE_INTR_INSTANCE_F_SHARED, false,
fd, mlx5_txpp_interrupt_handler, sh);
if (!sh->txpp.intr_handle) {
DRV_LOG(ERR, "Fail to allocate intr_handle");
return -rte_errno;
}
/* Subscribe CQ event to the event channel controlled by the driver. */

View File

@ -140,28 +140,6 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
/*
* Unregister callback handler safely. The handler may be active
* while we are trying to unregister it, in this case code -EAGAIN
* is returned by rte_intr_callback_unregister(). This routine checks
* the return code and tries to unregister handler again.
*
* @param handle
* interrupt handle
* @param cb_fn
* pointer to callback routine
* @cb_arg
* opaque callback parameter
*/
void
mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
rte_intr_callback_fn cb_fn, void *cb_arg)
{
RTE_SET_USED(handle);
RTE_SET_USED(cb_fn);
RTE_SET_USED(cb_arg);
}
/**
* DPDK callback to get flow control status.
*