net/mlx4: separate Rx configuration functions
Private functions are now prefixed with "mlx4_" to prevent them from conflicting with their mlx5 PMD counterparts at link time. No impact on functionality. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
parent
a2ce2121c0
commit
5b4c63bdae
@ -39,6 +39,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_ethdev.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_intr.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c
|
||||
|
@ -78,17 +78,6 @@ const char *pmd_mlx4_init_params[] = {
|
||||
|
||||
/* Device configuration. */
|
||||
|
||||
static int
|
||||
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
|
||||
unsigned int socket, const struct rte_eth_rxconf *conf,
|
||||
struct rte_mempool *mp);
|
||||
|
||||
static void
|
||||
rxq_cleanup(struct rxq *rxq);
|
||||
|
||||
static void
|
||||
priv_mac_addr_del(struct priv *priv);
|
||||
|
||||
/**
|
||||
* DPDK callback for Ethernet device configuration.
|
||||
*
|
||||
@ -235,524 +224,6 @@ mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
|
||||
return mr;
|
||||
}
|
||||
|
||||
/* RX queues handling. */
|
||||
|
||||
/**
|
||||
* Allocate RX queue elements.
|
||||
*
|
||||
* @param rxq
|
||||
* Pointer to RX queue structure.
|
||||
* @param elts_n
|
||||
* Number of elements to allocate.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n)
|
||||
{
|
||||
unsigned int i;
|
||||
struct rxq_elt (*elts)[elts_n] =
|
||||
rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
|
||||
rxq->socket);
|
||||
|
||||
if (elts == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: can't allocate packets array", (void *)rxq);
|
||||
goto error;
|
||||
}
|
||||
/* For each WR (packet). */
|
||||
for (i = 0; (i != elts_n); ++i) {
|
||||
struct rxq_elt *elt = &(*elts)[i];
|
||||
struct ibv_recv_wr *wr = &elt->wr;
|
||||
struct ibv_sge *sge = &(*elts)[i].sge;
|
||||
struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
|
||||
|
||||
if (buf == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: empty mbuf pool", (void *)rxq);
|
||||
goto error;
|
||||
}
|
||||
elt->buf = buf;
|
||||
wr->next = &(*elts)[(i + 1)].wr;
|
||||
wr->sg_list = sge;
|
||||
wr->num_sge = 1;
|
||||
/* Headroom is reserved by rte_pktmbuf_alloc(). */
|
||||
assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
|
||||
/* Buffer is supposed to be empty. */
|
||||
assert(rte_pktmbuf_data_len(buf) == 0);
|
||||
assert(rte_pktmbuf_pkt_len(buf) == 0);
|
||||
/* sge->addr must be able to store a pointer. */
|
||||
assert(sizeof(sge->addr) >= sizeof(uintptr_t));
|
||||
/* SGE keeps its headroom. */
|
||||
sge->addr = (uintptr_t)
|
||||
((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
|
||||
sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
|
||||
sge->lkey = rxq->mr->lkey;
|
||||
/* Redundant check for tailroom. */
|
||||
assert(sge->length == rte_pktmbuf_tailroom(buf));
|
||||
}
|
||||
/* The last WR pointer must be NULL. */
|
||||
(*elts)[(i - 1)].wr.next = NULL;
|
||||
DEBUG("%p: allocated and configured %u single-segment WRs",
|
||||
(void *)rxq, elts_n);
|
||||
rxq->elts_n = elts_n;
|
||||
rxq->elts_head = 0;
|
||||
rxq->elts = elts;
|
||||
return 0;
|
||||
error:
|
||||
if (elts != NULL) {
|
||||
for (i = 0; (i != RTE_DIM(*elts)); ++i)
|
||||
rte_pktmbuf_free_seg((*elts)[i].buf);
|
||||
rte_free(elts);
|
||||
}
|
||||
DEBUG("%p: failed, freed everything", (void *)rxq);
|
||||
assert(rte_errno > 0);
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free RX queue elements.
|
||||
*
|
||||
* @param rxq
|
||||
* Pointer to RX queue structure.
|
||||
*/
|
||||
static void
|
||||
rxq_free_elts(struct rxq *rxq)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int elts_n = rxq->elts_n;
|
||||
struct rxq_elt (*elts)[elts_n] = rxq->elts;
|
||||
|
||||
DEBUG("%p: freeing WRs", (void *)rxq);
|
||||
rxq->elts_n = 0;
|
||||
rxq->elts = NULL;
|
||||
if (elts == NULL)
|
||||
return;
|
||||
for (i = 0; (i != RTE_DIM(*elts)); ++i)
|
||||
rte_pktmbuf_free_seg((*elts)[i].buf);
|
||||
rte_free(elts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister a MAC address.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
*/
|
||||
static void
|
||||
priv_mac_addr_del(struct priv *priv)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
|
||||
#endif
|
||||
|
||||
if (!priv->mac_flow)
|
||||
return;
|
||||
DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
(void *)priv,
|
||||
(*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
|
||||
claim_zero(ibv_destroy_flow(priv->mac_flow));
|
||||
priv->mac_flow = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a MAC address.
|
||||
*
|
||||
* The MAC address is registered in queue 0.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
priv_mac_addr_add(struct priv *priv)
|
||||
{
|
||||
uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
|
||||
struct rxq *rxq;
|
||||
struct ibv_flow *flow;
|
||||
|
||||
/* If device isn't started, this is all we need to do. */
|
||||
if (!priv->started)
|
||||
return 0;
|
||||
if (priv->isolated)
|
||||
return 0;
|
||||
if (*priv->rxqs && (*priv->rxqs)[0])
|
||||
rxq = (*priv->rxqs)[0];
|
||||
else
|
||||
return 0;
|
||||
|
||||
/* Allocate flow specification on the stack. */
|
||||
struct __attribute__((packed)) {
|
||||
struct ibv_flow_attr attr;
|
||||
struct ibv_flow_spec_eth spec;
|
||||
} data;
|
||||
struct ibv_flow_attr *attr = &data.attr;
|
||||
struct ibv_flow_spec_eth *spec = &data.spec;
|
||||
|
||||
if (priv->mac_flow)
|
||||
priv_mac_addr_del(priv);
|
||||
/*
|
||||
* No padding must be inserted by the compiler between attr and spec.
|
||||
* This layout is expected by libibverbs.
|
||||
*/
|
||||
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
|
||||
*attr = (struct ibv_flow_attr){
|
||||
.type = IBV_FLOW_ATTR_NORMAL,
|
||||
.priority = 3,
|
||||
.num_of_specs = 1,
|
||||
.port = priv->port,
|
||||
.flags = 0
|
||||
};
|
||||
*spec = (struct ibv_flow_spec_eth){
|
||||
.type = IBV_FLOW_SPEC_ETH,
|
||||
.size = sizeof(*spec),
|
||||
.val = {
|
||||
.dst_mac = {
|
||||
(*mac)[0], (*mac)[1], (*mac)[2],
|
||||
(*mac)[3], (*mac)[4], (*mac)[5]
|
||||
},
|
||||
},
|
||||
.mask = {
|
||||
.dst_mac = "\xff\xff\xff\xff\xff\xff",
|
||||
}
|
||||
};
|
||||
DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
(void *)priv,
|
||||
(*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
|
||||
/* Create related flow. */
|
||||
flow = ibv_create_flow(rxq->qp, attr);
|
||||
if (flow == NULL) {
|
||||
rte_errno = errno ? errno : EINVAL;
|
||||
ERROR("%p: flow configuration failed, errno=%d: %s",
|
||||
(void *)rxq, rte_errno, strerror(errno));
|
||||
return -rte_errno;
|
||||
}
|
||||
assert(priv->mac_flow == NULL);
|
||||
priv->mac_flow = flow;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up a RX queue.
|
||||
*
|
||||
* Destroy objects, free allocated memory and reset the structure for reuse.
|
||||
*
|
||||
* @param rxq
|
||||
* Pointer to RX queue structure.
|
||||
*/
|
||||
static void
|
||||
rxq_cleanup(struct rxq *rxq)
|
||||
{
|
||||
DEBUG("cleaning up %p", (void *)rxq);
|
||||
rxq_free_elts(rxq);
|
||||
if (rxq->qp != NULL)
|
||||
claim_zero(ibv_destroy_qp(rxq->qp));
|
||||
if (rxq->cq != NULL)
|
||||
claim_zero(ibv_destroy_cq(rxq->cq));
|
||||
if (rxq->channel != NULL)
|
||||
claim_zero(ibv_destroy_comp_channel(rxq->channel));
|
||||
if (rxq->mr != NULL)
|
||||
claim_zero(ibv_dereg_mr(rxq->mr));
|
||||
memset(rxq, 0, sizeof(*rxq));
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate a Queue Pair.
|
||||
* Optionally setup inline receive if supported.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
* @param cq
|
||||
* Completion queue to associate with QP.
|
||||
* @param desc
|
||||
* Number of descriptors in QP (hint only).
|
||||
*
|
||||
* @return
|
||||
* QP pointer or NULL in case of error and rte_errno is set.
|
||||
*/
|
||||
static struct ibv_qp *
|
||||
rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
|
||||
{
|
||||
struct ibv_qp *qp;
|
||||
struct ibv_qp_init_attr attr = {
|
||||
/* CQ to be associated with the send queue. */
|
||||
.send_cq = cq,
|
||||
/* CQ to be associated with the receive queue. */
|
||||
.recv_cq = cq,
|
||||
.cap = {
|
||||
/* Max number of outstanding WRs. */
|
||||
.max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
|
||||
priv->device_attr.max_qp_wr :
|
||||
desc),
|
||||
/* Max number of scatter/gather elements in a WR. */
|
||||
.max_recv_sge = 1,
|
||||
},
|
||||
.qp_type = IBV_QPT_RAW_PACKET,
|
||||
};
|
||||
|
||||
qp = ibv_create_qp(priv->pd, &attr);
|
||||
if (!qp)
|
||||
rte_errno = errno ? errno : EINVAL;
|
||||
return qp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure a RX queue.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param rxq
|
||||
* Pointer to RX queue structure.
|
||||
* @param desc
|
||||
* Number of descriptors to configure in queue.
|
||||
* @param socket
|
||||
* NUMA socket on which memory must be allocated.
|
||||
* @param[in] conf
|
||||
* Thresholds parameters.
|
||||
* @param mp
|
||||
* Memory pool for buffer allocations.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
|
||||
unsigned int socket, const struct rte_eth_rxconf *conf,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct rxq tmpl = {
|
||||
.priv = priv,
|
||||
.mp = mp,
|
||||
.socket = socket
|
||||
};
|
||||
struct ibv_qp_attr mod;
|
||||
struct ibv_recv_wr *bad_wr;
|
||||
unsigned int mb_len;
|
||||
int ret;
|
||||
|
||||
(void)conf; /* Thresholds configuration (ignored). */
|
||||
mb_len = rte_pktmbuf_data_room_size(mp);
|
||||
if (desc == 0) {
|
||||
rte_errno = EINVAL;
|
||||
ERROR("%p: invalid number of Rx descriptors", (void *)dev);
|
||||
goto error;
|
||||
}
|
||||
/* Enable scattered packets support for this queue if necessary. */
|
||||
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
|
||||
(mb_len - RTE_PKTMBUF_HEADROOM)) {
|
||||
;
|
||||
} else if (dev->data->dev_conf.rxmode.enable_scatter) {
|
||||
WARN("%p: scattered mode has been requested but is"
|
||||
" not supported, this may lead to packet loss",
|
||||
(void *)dev);
|
||||
} else {
|
||||
WARN("%p: the requested maximum Rx packet size (%u) is"
|
||||
" larger than a single mbuf (%u) and scattered"
|
||||
" mode has not been requested",
|
||||
(void *)dev,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
mb_len - RTE_PKTMBUF_HEADROOM);
|
||||
}
|
||||
/* Use the entire RX mempool as the memory region. */
|
||||
tmpl.mr = mlx4_mp2mr(priv->pd, mp);
|
||||
if (tmpl.mr == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
ERROR("%p: MR creation failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
if (dev->data->dev_conf.intr_conf.rxq) {
|
||||
tmpl.channel = ibv_create_comp_channel(priv->ctx);
|
||||
if (tmpl.channel == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: Rx interrupt completion channel creation"
|
||||
" failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
|
||||
ERROR("%p: unable to make Rx interrupt completion"
|
||||
" channel non-blocking: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
|
||||
if (tmpl.cq == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: CQ creation failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
DEBUG("priv->device_attr.max_qp_wr is %d",
|
||||
priv->device_attr.max_qp_wr);
|
||||
DEBUG("priv->device_attr.max_sge is %d",
|
||||
priv->device_attr.max_sge);
|
||||
tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc);
|
||||
if (tmpl.qp == NULL) {
|
||||
ERROR("%p: QP creation failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
mod = (struct ibv_qp_attr){
|
||||
/* Move the QP to this state. */
|
||||
.qp_state = IBV_QPS_INIT,
|
||||
/* Primary port number. */
|
||||
.port_num = priv->port
|
||||
};
|
||||
ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
|
||||
if (ret) {
|
||||
rte_errno = ret;
|
||||
ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = rxq_alloc_elts(&tmpl, desc);
|
||||
if (ret) {
|
||||
ERROR("%p: RXQ allocation failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
|
||||
if (ret) {
|
||||
rte_errno = ret;
|
||||
ERROR("%p: ibv_post_recv() failed for WR %p: %s",
|
||||
(void *)dev,
|
||||
(void *)bad_wr,
|
||||
strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
mod = (struct ibv_qp_attr){
|
||||
.qp_state = IBV_QPS_RTR
|
||||
};
|
||||
ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
|
||||
if (ret) {
|
||||
rte_errno = ret;
|
||||
ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
/* Save port ID. */
|
||||
tmpl.port_id = dev->data->port_id;
|
||||
DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
|
||||
/* Clean up rxq in case we're reinitializing it. */
|
||||
DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
|
||||
rxq_cleanup(rxq);
|
||||
*rxq = tmpl;
|
||||
DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
|
||||
return 0;
|
||||
error:
|
||||
ret = rte_errno;
|
||||
rxq_cleanup(&tmpl);
|
||||
rte_errno = ret;
|
||||
assert(rte_errno > 0);
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to configure a RX queue.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param idx
|
||||
* RX queue index.
|
||||
* @param desc
|
||||
* Number of descriptors to configure in queue.
|
||||
* @param socket
|
||||
* NUMA socket on which memory must be allocated.
|
||||
* @param[in] conf
|
||||
* Thresholds parameters.
|
||||
* @param mp
|
||||
* Memory pool for buffer allocations.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
unsigned int socket, const struct rte_eth_rxconf *conf,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct rxq *rxq = (*priv->rxqs)[idx];
|
||||
int ret;
|
||||
|
||||
DEBUG("%p: configuring queue %u for %u descriptors",
|
||||
(void *)dev, idx, desc);
|
||||
if (idx >= priv->rxqs_n) {
|
||||
rte_errno = EOVERFLOW;
|
||||
ERROR("%p: queue index out of range (%u >= %u)",
|
||||
(void *)dev, idx, priv->rxqs_n);
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rxq != NULL) {
|
||||
DEBUG("%p: reusing already allocated queue index %u (%p)",
|
||||
(void *)dev, idx, (void *)rxq);
|
||||
if (priv->started) {
|
||||
rte_errno = EEXIST;
|
||||
return -rte_errno;
|
||||
}
|
||||
(*priv->rxqs)[idx] = NULL;
|
||||
if (idx == 0)
|
||||
priv_mac_addr_del(priv);
|
||||
rxq_cleanup(rxq);
|
||||
} else {
|
||||
rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
|
||||
if (rxq == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: unable to allocate queue index %u",
|
||||
(void *)dev, idx);
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
ret = rxq_setup(dev, rxq, desc, socket, conf, mp);
|
||||
if (ret)
|
||||
rte_free(rxq);
|
||||
else {
|
||||
rxq->stats.idx = idx;
|
||||
DEBUG("%p: adding RX queue %p to list",
|
||||
(void *)dev, (void *)rxq);
|
||||
(*priv->rxqs)[idx] = rxq;
|
||||
/* Update receive callback. */
|
||||
dev->rx_pkt_burst = mlx4_rx_burst;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to release a RX queue.
|
||||
*
|
||||
* @param dpdk_rxq
|
||||
* Generic RX queue pointer.
|
||||
*/
|
||||
static void
|
||||
mlx4_rx_queue_release(void *dpdk_rxq)
|
||||
{
|
||||
struct rxq *rxq = (struct rxq *)dpdk_rxq;
|
||||
struct priv *priv;
|
||||
unsigned int i;
|
||||
|
||||
if (rxq == NULL)
|
||||
return;
|
||||
priv = rxq->priv;
|
||||
for (i = 0; (i != priv->rxqs_n); ++i)
|
||||
if ((*priv->rxqs)[i] == rxq) {
|
||||
DEBUG("%p: removing RX queue %p from list",
|
||||
(void *)priv->dev, (void *)rxq);
|
||||
(*priv->rxqs)[i] = NULL;
|
||||
if (i == 0)
|
||||
priv_mac_addr_del(priv);
|
||||
break;
|
||||
}
|
||||
rxq_cleanup(rxq);
|
||||
rte_free(rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to start the device.
|
||||
*
|
||||
@ -774,7 +245,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
|
||||
priv->started = 1;
|
||||
ret = priv_mac_addr_add(priv);
|
||||
ret = mlx4_mac_addr_add(priv);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = mlx4_intr_install(priv);
|
||||
@ -792,7 +263,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
err:
|
||||
/* Rollback. */
|
||||
priv_mac_addr_del(priv);
|
||||
mlx4_mac_addr_del(priv);
|
||||
priv->started = 0;
|
||||
return ret;
|
||||
}
|
||||
@ -816,7 +287,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
|
||||
priv->started = 0;
|
||||
mlx4_priv_flow_stop(priv);
|
||||
mlx4_intr_uninstall(priv);
|
||||
priv_mac_addr_del(priv);
|
||||
mlx4_mac_addr_del(priv);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -839,7 +310,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
|
||||
DEBUG("%p: closing device \"%s\"",
|
||||
(void *)dev,
|
||||
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
|
||||
priv_mac_addr_del(priv);
|
||||
mlx4_mac_addr_del(priv);
|
||||
/*
|
||||
* Prevent crashes when queues are still in use. This is unfortunately
|
||||
* still required for DPDK 1.3 because some programs (such as testpmd)
|
||||
@ -855,7 +326,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
|
||||
if (tmp == NULL)
|
||||
continue;
|
||||
(*priv->rxqs)[i] = NULL;
|
||||
rxq_cleanup(tmp);
|
||||
mlx4_rxq_cleanup(tmp);
|
||||
rte_free(tmp);
|
||||
}
|
||||
priv->rxqs_n = 0;
|
||||
@ -1270,7 +741,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
mac.addr_bytes[4], mac.addr_bytes[5]);
|
||||
/* Register MAC address. */
|
||||
priv->mac = mac;
|
||||
if (priv_mac_addr_add(priv))
|
||||
if (mlx4_mac_addr_add(priv))
|
||||
goto port_error;
|
||||
#ifndef NDEBUG
|
||||
{
|
||||
|
579
drivers/net/mlx4/mlx4_rxq.c
Normal file
579
drivers/net/mlx4/mlx4_rxq.c
Normal file
@ -0,0 +1,579 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright 2017 6WIND S.A.
|
||||
* Copyright 2017 Mellanox
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of 6WIND S.A. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Rx queues configuration for mlx4 driver.
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
/* Verbs headers do not support -pedantic. */
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic ignored "-Wpedantic"
|
||||
#endif
|
||||
#include <infiniband/verbs.h>
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic error "-Wpedantic"
|
||||
#endif
|
||||
|
||||
#include <rte_common.h>
|
||||
#include <rte_errno.h>
|
||||
#include <rte_ethdev.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_mbuf.h>
|
||||
#include <rte_mempool.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
|
||||
/**
|
||||
* Allocate Rx queue elements.
|
||||
*
|
||||
* @param rxq
|
||||
* Pointer to Rx queue structure.
|
||||
* @param elts_n
|
||||
* Number of elements to allocate.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx4_rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n)
|
||||
{
|
||||
unsigned int i;
|
||||
struct rxq_elt (*elts)[elts_n] =
|
||||
rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
|
||||
rxq->socket);
|
||||
|
||||
if (elts == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: can't allocate packets array", (void *)rxq);
|
||||
goto error;
|
||||
}
|
||||
/* For each WR (packet). */
|
||||
for (i = 0; (i != elts_n); ++i) {
|
||||
struct rxq_elt *elt = &(*elts)[i];
|
||||
struct ibv_recv_wr *wr = &elt->wr;
|
||||
struct ibv_sge *sge = &(*elts)[i].sge;
|
||||
struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
|
||||
|
||||
if (buf == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: empty mbuf pool", (void *)rxq);
|
||||
goto error;
|
||||
}
|
||||
elt->buf = buf;
|
||||
wr->next = &(*elts)[(i + 1)].wr;
|
||||
wr->sg_list = sge;
|
||||
wr->num_sge = 1;
|
||||
/* Headroom is reserved by rte_pktmbuf_alloc(). */
|
||||
assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
|
||||
/* Buffer is supposed to be empty. */
|
||||
assert(rte_pktmbuf_data_len(buf) == 0);
|
||||
assert(rte_pktmbuf_pkt_len(buf) == 0);
|
||||
/* sge->addr must be able to store a pointer. */
|
||||
assert(sizeof(sge->addr) >= sizeof(uintptr_t));
|
||||
/* SGE keeps its headroom. */
|
||||
sge->addr = (uintptr_t)
|
||||
((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
|
||||
sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
|
||||
sge->lkey = rxq->mr->lkey;
|
||||
/* Redundant check for tailroom. */
|
||||
assert(sge->length == rte_pktmbuf_tailroom(buf));
|
||||
}
|
||||
/* The last WR pointer must be NULL. */
|
||||
(*elts)[(i - 1)].wr.next = NULL;
|
||||
DEBUG("%p: allocated and configured %u single-segment WRs",
|
||||
(void *)rxq, elts_n);
|
||||
rxq->elts_n = elts_n;
|
||||
rxq->elts_head = 0;
|
||||
rxq->elts = elts;
|
||||
return 0;
|
||||
error:
|
||||
if (elts != NULL) {
|
||||
for (i = 0; (i != RTE_DIM(*elts)); ++i)
|
||||
rte_pktmbuf_free_seg((*elts)[i].buf);
|
||||
rte_free(elts);
|
||||
}
|
||||
DEBUG("%p: failed, freed everything", (void *)rxq);
|
||||
assert(rte_errno > 0);
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free Rx queue elements.
|
||||
*
|
||||
* @param rxq
|
||||
* Pointer to Rx queue structure.
|
||||
*/
|
||||
static void
|
||||
mlx4_rxq_free_elts(struct rxq *rxq)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int elts_n = rxq->elts_n;
|
||||
struct rxq_elt (*elts)[elts_n] = rxq->elts;
|
||||
|
||||
DEBUG("%p: freeing WRs", (void *)rxq);
|
||||
rxq->elts_n = 0;
|
||||
rxq->elts = NULL;
|
||||
if (elts == NULL)
|
||||
return;
|
||||
for (i = 0; (i != RTE_DIM(*elts)); ++i)
|
||||
rte_pktmbuf_free_seg((*elts)[i].buf);
|
||||
rte_free(elts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up a Rx queue.
|
||||
*
|
||||
* Destroy objects, free allocated memory and reset the structure for reuse.
|
||||
*
|
||||
* @param rxq
|
||||
* Pointer to Rx queue structure.
|
||||
*/
|
||||
void
|
||||
mlx4_rxq_cleanup(struct rxq *rxq)
|
||||
{
|
||||
DEBUG("cleaning up %p", (void *)rxq);
|
||||
mlx4_rxq_free_elts(rxq);
|
||||
if (rxq->qp != NULL)
|
||||
claim_zero(ibv_destroy_qp(rxq->qp));
|
||||
if (rxq->cq != NULL)
|
||||
claim_zero(ibv_destroy_cq(rxq->cq));
|
||||
if (rxq->channel != NULL)
|
||||
claim_zero(ibv_destroy_comp_channel(rxq->channel));
|
||||
if (rxq->mr != NULL)
|
||||
claim_zero(ibv_dereg_mr(rxq->mr));
|
||||
memset(rxq, 0, sizeof(*rxq));
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate a Queue Pair.
|
||||
* Optionally setup inline receive if supported.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
* @param cq
|
||||
* Completion queue to associate with QP.
|
||||
* @param desc
|
||||
* Number of descriptors in QP (hint only).
|
||||
*
|
||||
* @return
|
||||
* QP pointer or NULL in case of error and rte_errno is set.
|
||||
*/
|
||||
static struct ibv_qp *
|
||||
mlx4_rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
|
||||
{
|
||||
struct ibv_qp *qp;
|
||||
struct ibv_qp_init_attr attr = {
|
||||
/* CQ to be associated with the send queue. */
|
||||
.send_cq = cq,
|
||||
/* CQ to be associated with the receive queue. */
|
||||
.recv_cq = cq,
|
||||
.cap = {
|
||||
/* Max number of outstanding WRs. */
|
||||
.max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
|
||||
priv->device_attr.max_qp_wr :
|
||||
desc),
|
||||
/* Max number of scatter/gather elements in a WR. */
|
||||
.max_recv_sge = 1,
|
||||
},
|
||||
.qp_type = IBV_QPT_RAW_PACKET,
|
||||
};
|
||||
|
||||
qp = ibv_create_qp(priv->pd, &attr);
|
||||
if (!qp)
|
||||
rte_errno = errno ? errno : EINVAL;
|
||||
return qp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure a Rx queue.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param rxq
|
||||
* Pointer to Rx queue structure.
|
||||
* @param desc
|
||||
* Number of descriptors to configure in queue.
|
||||
* @param socket
|
||||
* NUMA socket on which memory must be allocated.
|
||||
* @param[in] conf
|
||||
* Thresholds parameters.
|
||||
* @param mp
|
||||
* Memory pool for buffer allocations.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx4_rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
|
||||
unsigned int socket, const struct rte_eth_rxconf *conf,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct rxq tmpl = {
|
||||
.priv = priv,
|
||||
.mp = mp,
|
||||
.socket = socket
|
||||
};
|
||||
struct ibv_qp_attr mod;
|
||||
struct ibv_recv_wr *bad_wr;
|
||||
unsigned int mb_len;
|
||||
int ret;
|
||||
|
||||
(void)conf; /* Thresholds configuration (ignored). */
|
||||
mb_len = rte_pktmbuf_data_room_size(mp);
|
||||
if (desc == 0) {
|
||||
rte_errno = EINVAL;
|
||||
ERROR("%p: invalid number of Rx descriptors", (void *)dev);
|
||||
goto error;
|
||||
}
|
||||
/* Enable scattered packets support for this queue if necessary. */
|
||||
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
|
||||
(mb_len - RTE_PKTMBUF_HEADROOM)) {
|
||||
;
|
||||
} else if (dev->data->dev_conf.rxmode.enable_scatter) {
|
||||
WARN("%p: scattered mode has been requested but is"
|
||||
" not supported, this may lead to packet loss",
|
||||
(void *)dev);
|
||||
} else {
|
||||
WARN("%p: the requested maximum Rx packet size (%u) is"
|
||||
" larger than a single mbuf (%u) and scattered"
|
||||
" mode has not been requested",
|
||||
(void *)dev,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
mb_len - RTE_PKTMBUF_HEADROOM);
|
||||
}
|
||||
/* Use the entire Rx mempool as the memory region. */
|
||||
tmpl.mr = mlx4_mp2mr(priv->pd, mp);
|
||||
if (tmpl.mr == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
ERROR("%p: MR creation failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
if (dev->data->dev_conf.intr_conf.rxq) {
|
||||
tmpl.channel = ibv_create_comp_channel(priv->ctx);
|
||||
if (tmpl.channel == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: Rx interrupt completion channel creation"
|
||||
" failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
|
||||
ERROR("%p: unable to make Rx interrupt completion"
|
||||
" channel non-blocking: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
|
||||
if (tmpl.cq == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: CQ creation failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
DEBUG("priv->device_attr.max_qp_wr is %d",
|
||||
priv->device_attr.max_qp_wr);
|
||||
DEBUG("priv->device_attr.max_sge is %d",
|
||||
priv->device_attr.max_sge);
|
||||
tmpl.qp = mlx4_rxq_setup_qp(priv, tmpl.cq, desc);
|
||||
if (tmpl.qp == NULL) {
|
||||
ERROR("%p: QP creation failure: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
mod = (struct ibv_qp_attr){
|
||||
/* Move the QP to this state. */
|
||||
.qp_state = IBV_QPS_INIT,
|
||||
/* Primary port number. */
|
||||
.port_num = priv->port
|
||||
};
|
||||
ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
|
||||
if (ret) {
|
||||
rte_errno = ret;
|
||||
ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = mlx4_rxq_alloc_elts(&tmpl, desc);
|
||||
if (ret) {
|
||||
ERROR("%p: RXQ allocation failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
|
||||
if (ret) {
|
||||
rte_errno = ret;
|
||||
ERROR("%p: ibv_post_recv() failed for WR %p: %s",
|
||||
(void *)dev,
|
||||
(void *)bad_wr,
|
||||
strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
mod = (struct ibv_qp_attr){
|
||||
.qp_state = IBV_QPS_RTR
|
||||
};
|
||||
ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
|
||||
if (ret) {
|
||||
rte_errno = ret;
|
||||
ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
/* Save port ID. */
|
||||
tmpl.port_id = dev->data->port_id;
|
||||
DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
|
||||
/* Clean up rxq in case we're reinitializing it. */
|
||||
DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
|
||||
mlx4_rxq_cleanup(rxq);
|
||||
*rxq = tmpl;
|
||||
DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
|
||||
return 0;
|
||||
error:
|
||||
ret = rte_errno;
|
||||
mlx4_rxq_cleanup(&tmpl);
|
||||
rte_errno = ret;
|
||||
assert(rte_errno > 0);
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to configure a Rx queue.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param idx
|
||||
* Rx queue index.
|
||||
* @param desc
|
||||
* Number of descriptors to configure in queue.
|
||||
* @param socket
|
||||
* NUMA socket on which memory must be allocated.
|
||||
* @param[in] conf
|
||||
* Thresholds parameters.
|
||||
* @param mp
|
||||
* Memory pool for buffer allocations.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
unsigned int socket, const struct rte_eth_rxconf *conf,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct rxq *rxq = (*priv->rxqs)[idx];
|
||||
int ret;
|
||||
|
||||
DEBUG("%p: configuring queue %u for %u descriptors",
|
||||
(void *)dev, idx, desc);
|
||||
if (idx >= priv->rxqs_n) {
|
||||
rte_errno = EOVERFLOW;
|
||||
ERROR("%p: queue index out of range (%u >= %u)",
|
||||
(void *)dev, idx, priv->rxqs_n);
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rxq != NULL) {
|
||||
DEBUG("%p: reusing already allocated queue index %u (%p)",
|
||||
(void *)dev, idx, (void *)rxq);
|
||||
if (priv->started) {
|
||||
rte_errno = EEXIST;
|
||||
return -rte_errno;
|
||||
}
|
||||
(*priv->rxqs)[idx] = NULL;
|
||||
if (idx == 0)
|
||||
mlx4_mac_addr_del(priv);
|
||||
mlx4_rxq_cleanup(rxq);
|
||||
} else {
|
||||
rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
|
||||
if (rxq == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: unable to allocate queue index %u",
|
||||
(void *)dev, idx);
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
ret = mlx4_rxq_setup(dev, rxq, desc, socket, conf, mp);
|
||||
if (ret) {
|
||||
rte_free(rxq);
|
||||
} else {
|
||||
rxq->stats.idx = idx;
|
||||
DEBUG("%p: adding Rx queue %p to list",
|
||||
(void *)dev, (void *)rxq);
|
||||
(*priv->rxqs)[idx] = rxq;
|
||||
/* Update receive callback. */
|
||||
dev->rx_pkt_burst = mlx4_rx_burst;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to release a Rx queue.
|
||||
*
|
||||
* @param dpdk_rxq
|
||||
* Generic Rx queue pointer.
|
||||
*/
|
||||
void
|
||||
mlx4_rx_queue_release(void *dpdk_rxq)
|
||||
{
|
||||
struct rxq *rxq = (struct rxq *)dpdk_rxq;
|
||||
struct priv *priv;
|
||||
unsigned int i;
|
||||
|
||||
if (rxq == NULL)
|
||||
return;
|
||||
priv = rxq->priv;
|
||||
for (i = 0; (i != priv->rxqs_n); ++i)
|
||||
if ((*priv->rxqs)[i] == rxq) {
|
||||
DEBUG("%p: removing Rx queue %p from list",
|
||||
(void *)priv->dev, (void *)rxq);
|
||||
(*priv->rxqs)[i] = NULL;
|
||||
if (i == 0)
|
||||
mlx4_mac_addr_del(priv);
|
||||
break;
|
||||
}
|
||||
mlx4_rxq_cleanup(rxq);
|
||||
rte_free(rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister a MAC address.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
*/
|
||||
void
|
||||
mlx4_mac_addr_del(struct priv *priv)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
|
||||
#endif
|
||||
|
||||
if (!priv->mac_flow)
|
||||
return;
|
||||
DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
(void *)priv,
|
||||
(*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
|
||||
claim_zero(ibv_destroy_flow(priv->mac_flow));
|
||||
priv->mac_flow = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a MAC address.
|
||||
*
|
||||
* The MAC address is registered in queue 0.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx4_mac_addr_add(struct priv *priv)
|
||||
{
|
||||
uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
|
||||
struct rxq *rxq;
|
||||
struct ibv_flow *flow;
|
||||
|
||||
/* If device isn't started, this is all we need to do. */
|
||||
if (!priv->started)
|
||||
return 0;
|
||||
if (priv->isolated)
|
||||
return 0;
|
||||
if (*priv->rxqs && (*priv->rxqs)[0])
|
||||
rxq = (*priv->rxqs)[0];
|
||||
else
|
||||
return 0;
|
||||
|
||||
/* Allocate flow specification on the stack. */
|
||||
struct __attribute__((packed)) {
|
||||
struct ibv_flow_attr attr;
|
||||
struct ibv_flow_spec_eth spec;
|
||||
} data;
|
||||
struct ibv_flow_attr *attr = &data.attr;
|
||||
struct ibv_flow_spec_eth *spec = &data.spec;
|
||||
|
||||
if (priv->mac_flow)
|
||||
mlx4_mac_addr_del(priv);
|
||||
/*
|
||||
* No padding must be inserted by the compiler between attr and spec.
|
||||
* This layout is expected by libibverbs.
|
||||
*/
|
||||
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
|
||||
*attr = (struct ibv_flow_attr){
|
||||
.type = IBV_FLOW_ATTR_NORMAL,
|
||||
.priority = 3,
|
||||
.num_of_specs = 1,
|
||||
.port = priv->port,
|
||||
.flags = 0
|
||||
};
|
||||
*spec = (struct ibv_flow_spec_eth){
|
||||
.type = IBV_FLOW_SPEC_ETH,
|
||||
.size = sizeof(*spec),
|
||||
.val = {
|
||||
.dst_mac = {
|
||||
(*mac)[0], (*mac)[1], (*mac)[2],
|
||||
(*mac)[3], (*mac)[4], (*mac)[5]
|
||||
},
|
||||
},
|
||||
.mask = {
|
||||
.dst_mac = "\xff\xff\xff\xff\xff\xff",
|
||||
}
|
||||
};
|
||||
DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
(void *)priv,
|
||||
(*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
|
||||
/* Create related flow. */
|
||||
flow = ibv_create_flow(rxq->qp, attr);
|
||||
if (flow == NULL) {
|
||||
rte_errno = errno ? errno : EINVAL;
|
||||
ERROR("%p: flow configuration failed, errno=%d: %s",
|
||||
(void *)rxq, rte_errno, strerror(errno));
|
||||
return -rte_errno;
|
||||
}
|
||||
assert(priv->mac_flow == NULL);
|
||||
priv->mac_flow = flow;
|
||||
return 0;
|
||||
}
|
@ -120,6 +120,17 @@ struct txq {
|
||||
unsigned int socket; /**< CPU socket ID for allocations. */
|
||||
};
|
||||
|
||||
/* mlx4_rxq.c */
|
||||
|
||||
void mlx4_rxq_cleanup(struct rxq *rxq);
|
||||
int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
|
||||
uint16_t desc, unsigned int socket,
|
||||
const struct rte_eth_rxconf *conf,
|
||||
struct rte_mempool *mp);
|
||||
void mlx4_rx_queue_release(void *dpdk_rxq);
|
||||
void mlx4_mac_addr_del(struct priv *priv);
|
||||
int mlx4_mac_addr_add(struct priv *priv);
|
||||
|
||||
/* mlx4_rxtx.c */
|
||||
|
||||
uint32_t mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp);
|
||||
|
Loading…
Reference in New Issue
Block a user