net/mlx5: move Rx queue DevX resource

To support shared RX queue, moves DevX RQ which is per queue resource to
Rx queue private data.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Xueming Li 2021-11-04 20:33:17 +08:00 committed by Raslan Darawsheh
parent 5db77fef78
commit 5ceb3a02b0
8 changed files with 240 additions and 221 deletions

View File

@ -29,13 +29,13 @@
/**
* Modify Rx WQ vlan stripping offload
*
* @param rxq_obj
* Rx queue object.
* @param rxq
* Rx queue.
*
* @return 0 on success, non-0 otherwise
*/
static int
mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
{
uint16_t vlan_offloads =
(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
@ -47,14 +47,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
.flags = vlan_offloads,
};
return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
}
/**
* Modifies the attributes for the specified WQ.
*
* @param rxq_obj
* Verbs Rx queue object.
* @param rxq
* Verbs Rx queue.
* @param type
* Type of change queue state.
*
@ -62,14 +62,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)
{
struct ibv_wq_attr mod = {
.attr_mask = IBV_WQ_ATTR_STATE,
.wq_state = (enum ibv_wq_state)type,
};
return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
}
/**
@ -139,21 +139,18 @@ mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
/**
* Create a CQ Verbs object.
*
* @param dev
* Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array.
* @param rxq
* Pointer to Rx queue.
*
* @return
* The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
*/
static struct ibv_cq *
mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_priv *priv = rxq->priv;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
struct {
@ -199,7 +196,7 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
DRV_LOG(DEBUG,
"Port %u Rx CQE compression is disabled for HW"
" timestamp.",
dev->data->port_id);
priv->dev_data->port_id);
}
#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
if (RTE_CACHE_LINE_SIZE == 128) {
@ -216,21 +213,18 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
/**
* Create a WQ Verbs object.
*
* @param dev
* Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array.
* @param rxq
* Pointer to Rx queue.
*
* @return
* The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
*/
static struct ibv_wq *
mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_priv *priv = rxq->priv;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
unsigned int wqe_n = 1 << rxq_data->elts_n;
struct {
@ -297,7 +291,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
DRV_LOG(ERR,
"Port %u Rx queue %u requested %u*%u but got"
" %u*%u WRs*SGEs.",
dev->data->port_id, idx,
priv->dev_data->port_id, rxq->idx,
wqe_n >> rxq_data->sges_n,
(1 << rxq_data->sges_n),
wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
@ -312,21 +306,20 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
/**
* Create the Rx queue Verbs object.
*
* @param dev
* Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array.
* @param rxq
* Pointer to Rx queue.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
uint16_t idx = rxq->idx;
struct mlx5_priv *priv = rxq->priv;
uint16_t port_id = priv->dev_data->port_id;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
struct mlx5dv_cq cq_info;
struct mlx5dv_rwq rwq;
@ -341,17 +334,17 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
if (!tmpl->ibv_channel) {
DRV_LOG(ERR, "Port %u: comp channel creation failure.",
dev->data->port_id);
port_id);
rte_errno = ENOMEM;
goto error;
}
tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
}
/* Create CQ using Verbs API. */
tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx);
tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);
if (!tmpl->ibv_cq) {
DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
dev->data->port_id, idx);
port_id, idx);
rte_errno = ENOMEM;
goto error;
}
@ -366,7 +359,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
DRV_LOG(ERR,
"Port %u wrong MLX5_CQE_SIZE environment "
"variable value: it should be set to %u.",
dev->data->port_id, RTE_CACHE_LINE_SIZE);
port_id, RTE_CACHE_LINE_SIZE);
rte_errno = EINVAL;
goto error;
}
@ -377,19 +370,19 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
rxq_data->cq_uar = cq_info.cq_uar;
rxq_data->cqn = cq_info.cqn;
/* Create WQ (RQ) using Verbs API. */
tmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx);
tmpl->wq = mlx5_rxq_ibv_wq_create(rxq);
if (!tmpl->wq) {
DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
dev->data->port_id, idx);
port_id, idx);
rte_errno = ENOMEM;
goto error;
}
/* Change queue state to ready. */
ret = mlx5_ibv_modify_wq(tmpl, IBV_WQS_RDY);
ret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);
if (ret) {
DRV_LOG(ERR,
"Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
dev->data->port_id, idx);
port_id, idx);
rte_errno = ret;
goto error;
}
@ -405,7 +398,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
rxq_data->cq_arm_sn = 0;
mlx5_rxq_initialize(rxq_data);
rxq_data->cq_ci = 0;
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
return 0;
error:
@ -423,12 +416,14 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
/**
* Release an Rx verbs queue object.
*
* @param rxq_obj
* Verbs Rx queue object.
* @param rxq
* Pointer to Rx queue.
*/
static void
mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)
mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)
{
struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
MLX5_ASSERT(rxq_obj);
MLX5_ASSERT(rxq_obj->wq);
MLX5_ASSERT(rxq_obj->ibv_cq);
@ -652,12 +647,24 @@ static void
mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
struct mlx5_rxq_obj *rxq_obj;
if (rxq->wq)
claim_zero(mlx5_glue->destroy_wq(rxq->wq));
if (rxq->ibv_cq)
claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
if (rxq == NULL)
return;
if (rxq->ctrl == NULL)
goto free_priv;
rxq_obj = rxq->ctrl->obj;
if (rxq_obj == NULL)
goto free_ctrl;
if (rxq_obj->wq)
claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
if (rxq_obj->ibv_cq)
claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
mlx5_free(rxq_obj);
free_ctrl:
mlx5_free(rxq->ctrl);
free_priv:
mlx5_free(rxq);
priv->drop_queue.rxq = NULL;
}
@ -676,39 +683,58 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_context *ctx = priv->sh->cdev->ctx;
struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
struct mlx5_rxq_obj *rxq_obj = NULL;
if (rxq)
if (rxq != NULL)
return 0;
rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
if (!rxq) {
if (rxq == NULL) {
DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
priv->drop_queue.rxq = rxq;
rxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
if (!rxq->ibv_cq) {
rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,
SOCKET_ID_ANY);
if (rxq_ctrl == NULL) {
DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue control memory.",
dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
rxq->ctrl = rxq_ctrl;
rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,
SOCKET_ID_ANY);
if (rxq_obj == NULL) {
DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
rxq_ctrl->obj = rxq_obj;
rxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
if (!rxq_obj->ibv_cq) {
DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.",
dev->data->port_id);
rte_errno = errno;
goto error;
}
rxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
rxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
.pd = priv->sh->cdev->pd,
.cq = rxq->ibv_cq,
.cq = rxq_obj->ibv_cq,
});
if (!rxq->wq) {
if (!rxq_obj->wq) {
DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.",
dev->data->port_id);
rte_errno = errno;
goto error;
}
priv->drop_queue.rxq = rxq;
return 0;
error:
mlx5_rxq_ibv_obj_drop_release(dev);
@ -737,7 +763,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
ret = mlx5_rxq_ibv_obj_drop_create(dev);
if (ret < 0)
goto error;
rxq = priv->drop_queue.rxq;
rxq = priv->drop_queue.rxq->ctrl->obj;
ind_tbl = mlx5_glue->create_rwq_ind_table
(priv->sh->cdev->ctx,
&(struct ibv_rwq_ind_table_init_attr){

View File

@ -300,7 +300,7 @@ struct mlx5_vf_vlan {
/* Flow drop context necessary due to Verbs API. */
struct mlx5_drop {
struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
struct mlx5_rxq_obj *rxq; /* Rx queue object. */
struct mlx5_rxq_priv *rxq; /* Rx queue. */
};
/* Loopback dummy queue resources required due to Verbs API. */
@ -1267,7 +1267,6 @@ struct mlx5_rxq_obj {
};
struct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */
struct {
struct mlx5_devx_rq rq_obj; /* DevX RQ object. */
struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
void *devx_channel;
};
@ -1349,11 +1348,11 @@ struct mlx5_rxq_priv;
/* HW objects operations structure. */
struct mlx5_obj_ops {
int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
int (*rxq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_priv *rxq, int on);
int (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);
int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
int (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, uint8_t type);
void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
int (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);
void (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);
int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
struct mlx5_ind_table_obj *ind_tbl);
int (*ind_table_modify)(struct rte_eth_dev *dev,

View File

@ -30,14 +30,16 @@
/**
* Modify RQ vlan stripping offload
*
* @param rxq_obj
* Rx queue object.
* @param rxq
* Rx queue.
* @param on
* Enable/disable VLAN stripping.
*
* @return
* 0 on success, non-0 otherwise
*/
static int
mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
{
struct mlx5_devx_modify_rq_attr rq_attr;
@ -46,14 +48,14 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
rq_attr.state = MLX5_RQC_STATE_RDY;
rq_attr.vsd = (on ? 0 : 1);
rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
}
/**
* Modify RQ using DevX API.
*
* @param rxq_obj
* DevX Rx queue object.
* @param rxq
* DevX rx queue.
* @param type
* Type of change queue state.
*
@ -61,7 +63,7 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)
{
struct mlx5_devx_modify_rq_attr rq_attr;
@ -86,7 +88,7 @@ mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
default:
break;
}
return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
}
/**
@ -145,42 +147,34 @@ mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
return 0;
}
/**
* Destroy the Rx queue DevX object.
*
* @param rxq_obj
* Rxq object to destroy.
*/
static void
mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj)
{
mlx5_devx_rq_destroy(&rxq_obj->rq_obj);
memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj));
mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
}
/**
* Release an Rx DevX queue object.
*
* @param rxq_obj
* DevX Rx queue object.
* @param rxq
* DevX Rx queue.
*/
static void
mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
{
MLX5_ASSERT(rxq_obj);
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
MLX5_ASSERT(rxq != NULL);
MLX5_ASSERT(rxq_ctrl != NULL);
if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
MLX5_ASSERT(rxq_obj->rq);
mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);
claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
} else {
MLX5_ASSERT(rxq_obj->cq_obj.cq);
MLX5_ASSERT(rxq_obj->rq_obj.rq);
mlx5_rxq_release_devx_resources(rxq_obj);
if (rxq_obj->devx_channel)
mlx5_devx_rq_destroy(&rxq->devx_rq);
memset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));
mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
if (rxq_obj->devx_channel) {
mlx5_os_devx_destroy_event_channel
(rxq_obj->devx_channel);
rxq_obj->devx_channel = NULL;
}
}
}
@ -224,22 +218,19 @@ mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
/**
* Create a RQ object using DevX.
*
* @param dev
* Pointer to Ethernet device.
* @param rxq_data
* RX queue data.
* @param rxq
* Pointer to Rx queue.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
struct mlx5_rxq_data *rxq_data)
mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_priv *priv = rxq->priv;
struct mlx5_common_device *cdev = priv->sh->cdev;
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
struct mlx5_devx_create_rq_attr rq_attr = { 0 };
uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;
uint32_t wqe_size, log_wqe_size;
@ -281,31 +272,29 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
rq_attr.wq_attr.pd = cdev->pdn;
rq_attr.counter_set_id = priv->counter_set_id;
/* Create RQ using DevX API. */
return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,
return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size,
log_desc_n, &rq_attr, rxq_ctrl->socket);
}
/**
* Create a DevX CQ object for an Rx queue.
*
* @param dev
* Pointer to Ethernet device.
* @param rxq_data
* RX queue data.
* @param rxq
* Pointer to Rx queue.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
struct mlx5_rxq_data *rxq_data)
mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
{
struct mlx5_devx_cq *cq_obj = 0;
struct mlx5_devx_cq_attr cq_attr = { 0 };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_priv *priv = rxq->priv;
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
uint16_t port_id = priv->dev_data->port_id;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
uint32_t log_cqe_n;
uint16_t event_nums[1] = { 0 };
@ -346,7 +335,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
}
DRV_LOG(DEBUG,
"Port %u Rx CQE compression is enabled, format %d.",
dev->data->port_id, priv->config.cqe_comp_fmt);
port_id, priv->config.cqe_comp_fmt);
/*
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
@ -355,13 +344,12 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
cqe_n *= 2;
} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
DRV_LOG(DEBUG,
"Port %u Rx CQE compression is disabled for HW"
" timestamp.",
dev->data->port_id);
"Port %u Rx CQE compression is disabled for HW timestamp.",
port_id);
} else if (priv->config.cqe_comp && rxq_data->lro) {
DRV_LOG(DEBUG,
"Port %u Rx CQE compression is disabled for LRO.",
dev->data->port_id);
port_id);
}
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
log_cqe_n = log2above(cqe_n);
@ -399,27 +387,23 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
/**
* Create the Rx hairpin queue object.
*
* @param dev
* Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array.
* @param rxq
* Pointer to Rx queue.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
uint16_t idx = rxq->idx;
struct mlx5_priv *priv = rxq->priv;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_devx_create_rq_attr attr = { 0 };
struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
uint32_t max_wq_data;
MLX5_ASSERT(rxq_data);
MLX5_ASSERT(tmpl);
MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);
tmpl->rxq_ctrl = rxq_ctrl;
attr.hairpin = 1;
max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
@ -448,39 +432,36 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
if (!tmpl->rq) {
DRV_LOG(ERR,
"Port %u Rx hairpin queue %u can't create rq object.",
dev->data->port_id, idx);
priv->dev_data->port_id, idx);
rte_errno = errno;
return -rte_errno;
}
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
return 0;
}
/**
* Create the Rx queue DevX object.
*
* @param dev
* Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array.
* @param rxq
* Pointer to Rx queue.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_priv *priv = rxq->priv;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
int ret = 0;
MLX5_ASSERT(rxq_data);
MLX5_ASSERT(tmpl);
if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
return mlx5_rxq_obj_hairpin_new(dev, idx);
return mlx5_rxq_obj_hairpin_new(rxq);
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
int devx_ev_flag =
@ -498,34 +479,32 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
}
/* Create CQ using DevX API. */
ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
ret = mlx5_rxq_create_devx_cq_resources(rxq);
if (ret) {
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
/* Create RQ using DevX API. */
ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
ret = mlx5_rxq_create_devx_rq_resources(rxq);
if (ret) {
DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
dev->data->port_id, idx);
priv->dev_data->port_id, rxq->idx);
rte_errno = ENOMEM;
goto error;
}
/* Change queue state to ready. */
ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
if (ret)
goto error;
rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.wq.umem_buf;
rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.wq.db_rec;
rxq_data->cq_arm_sn = 0;
rxq_data->cq_ci = 0;
rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;
mlx5_rxq_initialize(rxq_data);
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
rxq_ctrl->wqn = tmpl->rq_obj.rq->id;
priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;
rxq_ctrl->wqn = rxq->devx_rq.rq->id;
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
mlx5_rxq_devx_obj_release(tmpl);
mlx5_rxq_devx_obj_release(rxq);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@ -571,15 +550,15 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
rqt_attr->rqt_actual_size = rqt_n;
if (queues == NULL) {
for (i = 0; i < rqt_n; i++)
rqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id;
rqt_attr->rq_list[i] =
priv->drop_queue.rxq->devx_rq.rq->id;
return rqt_attr;
}
for (i = 0; i != queues_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);
rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id;
MLX5_ASSERT(rxq != NULL);
rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
}
MLX5_ASSERT(i > 0);
for (j = 0; i != rqt_n; ++j, ++i)
@ -719,7 +698,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
}
}
} else {
rxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type;
rxq_obj_type = priv->drop_queue.rxq->ctrl->type;
}
memset(tir_attr, 0, sizeof(*tir_attr));
tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
@ -891,9 +870,9 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
int socket_id = dev->device->numa_node;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_obj *rxq = NULL;
struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
struct mlx5_rxq_obj *rxq_obj = NULL;
int ret;
/*
@ -901,6 +880,13 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
* They are required to hold pointers for cleanup
* and are only accessible via drop queue DevX objects.
*/
rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
if (rxq == NULL) {
DRV_LOG(ERR, "Port %u could not allocate drop queue private",
dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
0, socket_id);
if (rxq_ctrl == NULL) {
@ -909,27 +895,29 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
rte_errno = ENOMEM;
goto error;
}
rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
if (rxq == NULL) {
rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id);
if (rxq_obj == NULL) {
DRV_LOG(ERR, "Port %u could not allocate drop queue object",
dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
rxq->rxq_ctrl = rxq_ctrl;
rxq_obj->rxq_ctrl = rxq_ctrl;
rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
rxq_ctrl->sh = priv->sh;
rxq_ctrl->obj = rxq;
rxq_data = &rxq_ctrl->rxq;
rxq_ctrl->obj = rxq_obj;
rxq->ctrl = rxq_ctrl;
rxq->priv = priv;
LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
/* Create CQ using DevX API. */
ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
ret = mlx5_rxq_create_devx_cq_resources(rxq);
if (ret != 0) {
DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
dev->data->port_id);
goto error;
}
/* Create RQ using DevX API. */
ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
ret = mlx5_rxq_create_devx_rq_resources(rxq);
if (ret != 0) {
DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
dev->data->port_id);
@ -945,18 +933,20 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
if (rxq != NULL) {
if (rxq->rq_obj.rq != NULL)
mlx5_devx_rq_destroy(&rxq->rq_obj);
if (rxq->cq_obj.cq != NULL)
mlx5_devx_cq_destroy(&rxq->cq_obj);
if (rxq->devx_channel)
if (rxq != NULL && rxq->devx_rq.rq != NULL)
mlx5_devx_rq_destroy(&rxq->devx_rq);
if (rxq_obj != NULL) {
if (rxq_obj->cq_obj.cq != NULL)
mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
if (rxq_obj->devx_channel)
mlx5_os_devx_destroy_event_channel
(rxq->devx_channel);
mlx5_free(rxq);
(rxq_obj->devx_channel);
mlx5_free(rxq_obj);
}
if (rxq_ctrl != NULL)
mlx5_free(rxq_ctrl);
if (rxq != NULL)
mlx5_free(rxq);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@ -971,12 +961,13 @@ static void
mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl;
struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
mlx5_rxq_devx_obj_release(rxq);
mlx5_free(rxq);
mlx5_free(rxq_ctrl->obj);
mlx5_free(rxq_ctrl);
mlx5_free(rxq);
priv->drop_queue.rxq = NULL;
}
@ -996,7 +987,7 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
mlx5_devx_tir_destroy(hrxq);
if (hrxq->ind_table->ind_table != NULL)
mlx5_devx_ind_table_destroy(hrxq->ind_table);
if (priv->drop_queue.rxq->rq != NULL)
if (priv->drop_queue.rxq->devx_rq.rq != NULL)
mlx5_rxq_devx_obj_drop_release(dev);
}

View File

@ -174,6 +174,7 @@ struct mlx5_rxq_priv {
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
struct mlx5_devx_rq devx_rq;
struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
uint32_t hairpin_status; /* Hairpin binding status. */
};

View File

@ -471,13 +471,13 @@ int
mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
int ret;
MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
strerror(errno));
@ -485,7 +485,7 @@ mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
return ret;
}
/* Remove all processes CQEs. */
rxq_sync_cq(rxq);
rxq_sync_cq(&rxq_ctrl->rxq);
/* Free all involved mbufs. */
rxq_free_elts(rxq_ctrl);
/* Set the actual queue state. */
@ -557,26 +557,26 @@ int
mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
int ret;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* Allocate needed buffers. */
ret = rxq_alloc_elts(rxq_ctrl);
ret = rxq_alloc_elts(rxq->ctrl);
if (ret) {
DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
rte_errno = errno;
return ret;
}
rte_io_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
*rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
rte_io_wmb();
/* Reset RQ consumer before moving queue to READY state. */
*rxq->rq_db = rte_cpu_to_be_32(0);
*rxq_data->rq_db = rte_cpu_to_be_32(0);
rte_io_wmb();
ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
strerror(errno));
@ -584,8 +584,8 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
return ret;
}
/* Reinitialize RQ - set WQEs. */
mlx5_rxq_initialize(rxq);
rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
mlx5_rxq_initialize(rxq_data);
rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
/* Set actual queue state. */
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
@ -1835,15 +1835,19 @@ int
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
if (priv->rxq_privs == NULL)
return 0;
rxq = mlx5_rxq_get(dev, idx);
if (rxq == NULL)
return 0;
if (mlx5_rxq_deref(dev, idx) > 1)
return 1;
if (rxq_ctrl->obj) {
priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->obj != NULL) {
priv->obj_ops.rxq_obj_release(rxq);
LIST_REMOVE(rxq_ctrl->obj, next);
mlx5_free(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;

View File

@ -373,11 +373,9 @@ mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
struct mlx5_priv *priv = dev->data->dev_private;
if (sm->is_wq) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, sm->queue_id);
ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
ret = priv->obj_ops.rxq_obj_modify(rxq, sm->state);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
sm->state, strerror(errno));

View File

@ -231,7 +231,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
rte_errno = ENOMEM;
goto error;
}
ret = priv->obj_ops.rxq_obj_new(dev, i);
ret = priv->obj_ops.rxq_obj_new(rxq);
if (ret) {
mlx5_free(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;

View File

@ -91,11 +91,11 @@ void
mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queue);
struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
int ret = 0;
MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
/* Validate hw support */
if (!priv->config.hw_vlan_strip) {
DRV_LOG(ERR, "port %u VLAN stripping is not supported",
@ -109,20 +109,20 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
return;
}
DRV_LOG(DEBUG, "port %u set VLAN stripping offloads %d for port %uqueue %d",
dev->data->port_id, on, rxq->port_id, queue);
if (!rxq_ctrl->obj) {
dev->data->port_id, on, rxq_data->port_id, queue);
if (rxq->ctrl->obj == NULL) {
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
rxq_data->vlan_strip = !!on;
return;
}
ret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq_ctrl->obj, on);
ret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq, on);
if (ret) {
DRV_LOG(ERR, "Port %u failed to modify object stripping mode:"
" %s", dev->data->port_id, strerror(rte_errno));
return;
}
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
rxq_data->vlan_strip = !!on;
}
/**