net/mlx5: support user space Rx interrupt event

Implement rxq interrupt callbacks

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
Shahaf Shuler 2017-03-14 15:03:09 +02:00 committed by Ferruh Yigit
parent 49e2f374e4
commit 3c7d44af25
8 changed files with 227 additions and 1 deletions

View File

@ -7,6 +7,7 @@
Speed capabilities = Y Speed capabilities = Y
Link status = Y Link status = Y
Link status event = Y Link status event = Y
Rx interrupt = Y
Queue start/stop = Y Queue start/stop = Y
MTU update = Y MTU update = Y
Jumbo frame = Y Jumbo frame = Y

View File

@ -84,6 +84,11 @@ New Features
Added support for Hardware TSO for tunneled and non-tunneled packets. Added support for Hardware TSO for tunneled and non-tunneled packets.
Tunneling protocols supported are GRE and VXLAN. Tunneling protocols supported are GRE and VXLAN.
* **Added support for Rx interrupts on mlx5 driver.**
Rx queues can be armed with an interrupt which will trigger on the
next packet arrival.
* **Updated the sfc_efx driver.** * **Updated the sfc_efx driver.**
* Generic flow API support for Ethernet, VLAN, IPv4, IPv6, UDP and TCP * Generic flow API support for Ethernet, VLAN, IPv4, IPv6, UDP and TCP

View File

@ -130,6 +130,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
/usr/include/linux/ethtool.h \ /usr/include/linux/ethtool.h \
enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \ enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
$(AUTOCONF_OUTPUT) $(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_UPDATE_CQ_CI \
infiniband/mlx5_hw.h \
func ibv_mlx5_exp_update_cq_ci \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one. # Create mlx5_autoconf.h or update it in case it differs from the new one.

View File

@ -227,6 +227,8 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.filter_ctrl = mlx5_dev_filter_ctrl, .filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status, .rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status, .tx_descriptor_status = mlx5_tx_descriptor_status,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
}; };
static struct { static struct {

View File

@ -36,6 +36,7 @@
#include <errno.h> #include <errno.h>
#include <string.h> #include <string.h>
#include <stdint.h> #include <stdint.h>
#include <fcntl.h>
/* Verbs header. */ /* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@ -57,6 +58,7 @@
#include <rte_malloc.h> #include <rte_malloc.h>
#include <rte_ethdev.h> #include <rte_ethdev.h>
#include <rte_common.h> #include <rte_common.h>
#include <rte_interrupts.h>
#ifdef PEDANTIC #ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic" #pragma GCC diagnostic error "-Wpedantic"
#endif #endif
@ -773,6 +775,8 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq)); claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));
if (rxq_ctrl->cq != NULL) if (rxq_ctrl->cq != NULL)
claim_zero(ibv_destroy_cq(rxq_ctrl->cq)); claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
if (rxq_ctrl->channel != NULL)
claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel));
if (rxq_ctrl->rd != NULL) { if (rxq_ctrl->rd != NULL) {
struct ibv_exp_destroy_res_domain_attr attr = { struct ibv_exp_destroy_res_domain_attr attr = {
.comp_mask = 0, .comp_mask = 0,
@ -1014,6 +1018,16 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
(void *)dev, strerror(ret)); (void *)dev, strerror(ret));
goto error; goto error;
} }
if (dev->data->dev_conf.intr_conf.rxq) {
tmpl.channel = ibv_create_comp_channel(priv->ctx);
if (tmpl.channel == NULL) {
dev->data->dev_conf.intr_conf.rxq = 0;
ret = ENOMEM;
ERROR("%p: Comp Channel creation failure: %s",
(void *)dev, strerror(ret));
goto error;
}
}
attr.cq = (struct ibv_exp_cq_init_attr){ attr.cq = (struct ibv_exp_cq_init_attr){
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
.res_domain = tmpl.rd, .res_domain = tmpl.rd,
@ -1023,7 +1037,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE; attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */ cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
} }
tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, NULL, 0, tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,
&attr.cq); &attr.cq);
if (tmpl.cq == NULL) { if (tmpl.cq == NULL) {
ret = ENOMEM; ret = ENOMEM;
@ -1347,3 +1361,113 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
rxq = (*priv->rxqs)[index]; rxq = (*priv->rxqs)[index];
return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n); return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);
} }
/**
* Fill epoll fd list for rxq interrupts.
*
* @param priv
* Private structure.
*
* @return
* 0 on success, negative on failure.
*/
int
priv_intr_efd_enable(struct priv *priv)
{
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
if (n == 0)
return 0;
if (n < rxqs_n) {
WARN("rxqs num is larger than EAL max interrupt vector "
"%u > %u unable to supprt rxq interrupts",
rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
return -EINVAL;
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
struct rxq_ctrl *rxq_ctrl =
container_of(rxq, struct rxq_ctrl, rxq);
int fd = rxq_ctrl->channel->fd;
int flags;
int rc;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
WARN("failed to change rxq interrupt file "
"descriptor %d for queue index %d", fd, i);
return -1;
}
intr_handle->efds[i] = fd;
}
intr_handle->nb_efd = n;
return 0;
}
/**
* Clean epoll fd list for rxq interrupts.
*
* @param priv
* Private structure.
*/
void
priv_intr_efd_disable(struct priv *priv)
{
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
rte_intr_free_epoll_fd(intr_handle);
}
/**
* Create and init interrupt vector array.
*
* @param priv
* Private structure.
*
* @return
* 0 on success, negative on failure.
*/
int
priv_create_intr_vec(struct priv *priv)
{
unsigned int rxqs_n = priv->rxqs_n;
unsigned int i;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
if (rxqs_n == 0)
return 0;
intr_handle->intr_vec = (int *)
rte_malloc("intr_vec", rxqs_n * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
WARN("Failed to allocate memory for intr_vec "
"rxq interrupt will not be supported");
return -ENOMEM;
}
for (i = 0; i != rxqs_n; ++i) {
/* 1:1 mapping between rxq and interrupt. */
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
}
return 0;
}
/**
* Destroy init interrupt vector array.
*
* @param priv
* Private structure.
*
* @return
* 0 on success, negative on failure.
*/
void
priv_destroy_intr_vec(struct priv *priv)
{
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
rte_free(intr_handle->intr_vec);
}

View File

@ -1749,3 +1749,76 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
(void)pkts_n; (void)pkts_n;
return 0; return 0;
} }
/**
* DPDK callback for rx queue interrupt enable.
*
* @param dev
* Pointer to Ethernet device structure.
* @param rx_queue_id
* RX queue number
*
* @return
* 0 on success, negative on failure.
*/
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#ifdef HAVE_UPDATE_CQ_CI
struct priv *priv = mlx5_get_priv(dev);
struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
struct ibv_cq *cq = rxq_ctrl->cq;
uint16_t ci = rxq->cq_ci;
int ret = 0;
ibv_mlx5_exp_update_cq_ci(cq, ci);
ret = ibv_req_notify_cq(cq, 0);
#else
int ret = -1;
(void)dev;
(void)rx_queue_id;
#endif
if (ret)
WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
return ret;
}
/**
* DPDK callback for rx queue interrupt disable.
*
* @param dev
* Pointer to Ethernet device structure.
* @param rx_queue_id
* RX queue number
*
* @return
* 0 on success, negative on failure.
*/
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#ifdef HAVE_UPDATE_CQ_CI
struct priv *priv = mlx5_get_priv(dev);
struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
struct ibv_cq *cq = rxq_ctrl->cq;
struct ibv_cq *ev_cq;
void *ev_ctx;
int ret = 0;
ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
if (ret || ev_cq != cq)
ret = -1;
else
ibv_ack_cq_events(cq, 1);
#else
int ret = -1;
(void)dev;
(void)rx_queue_id;
#endif
if (ret)
WARN("unable to disable interrupt on rx queue %d",
rx_queue_id);
return ret;
}

View File

@ -138,6 +138,7 @@ struct rxq_ctrl {
struct ibv_mr *mr; /* Memory Region (for mp). */ struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */ struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */ struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
struct ibv_comp_channel *channel;
unsigned int socket; /* CPU socket ID for allocations. */ unsigned int socket; /* CPU socket ID for allocations. */
struct rxq rxq; /* Data path structure. */ struct rxq rxq; /* Data path structure. */
}; };
@ -299,6 +300,10 @@ int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *); void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *); int priv_rehash_flows(struct priv *);
int priv_intr_efd_enable(struct priv *priv);
void priv_intr_efd_disable(struct priv *priv);
int priv_create_intr_vec(struct priv *priv);
void priv_destroy_intr_vec(struct priv *priv);
void rxq_cleanup(struct rxq_ctrl *); void rxq_cleanup(struct rxq_ctrl *);
int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *); int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t, int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
@ -329,6 +334,8 @@ uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
int mlx5_rx_descriptor_status(void *, uint16_t); int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t); int mlx5_tx_descriptor_status(void *, uint16_t);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
/* mlx5_mr.c */ /* mlx5_mr.c */

View File

@ -95,6 +95,11 @@ mlx5_dev_start(struct rte_eth_dev *dev)
goto error; goto error;
} }
priv_dev_interrupt_handler_install(priv, dev); priv_dev_interrupt_handler_install(priv, dev);
if (dev->data->dev_conf.intr_conf.rxq) {
err = priv_intr_efd_enable(priv);
if (!err)
err = priv_create_intr_vec(priv);
}
priv_xstats_init(priv); priv_xstats_init(priv);
priv_unlock(priv); priv_unlock(priv);
return 0; return 0;
@ -135,6 +140,10 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_fdir_disable(priv); priv_fdir_disable(priv);
priv_flow_stop(priv); priv_flow_stop(priv);
priv_dev_interrupt_handler_uninstall(priv, dev); priv_dev_interrupt_handler_uninstall(priv, dev);
if (priv->dev->data->dev_conf.intr_conf.rxq) {
priv_destroy_intr_vec(priv);
priv_intr_efd_disable(priv);
}
priv->started = 0; priv->started = 0;
priv_unlock(priv); priv_unlock(priv);
} }