net/mlx5: add hash Rx queue object

Hash Rx queue is an high level queue providing the RSS hash algorithm,
key and indirection table to spread the packets.  Those objects can be
easily shared between several Verbs flows.  This commit bring this
capability to the PMD.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
Nélio Laranjeiro 2017-10-09 16:44:51 +02:00 committed by Ferruh Yigit
parent 4c7a0f5ff8
commit f5479b6848
5 changed files with 311 additions and 103 deletions

View File

@ -235,6 +235,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->reta_idx != NULL) if (priv->reta_idx != NULL)
rte_free(priv->reta_idx); rte_free(priv->reta_idx);
priv_socket_uninit(priv); priv_socket_uninit(priv);
ret = mlx5_priv_hrxq_ibv_verify(priv);
if (ret)
WARN("%p: some Hash Rx queue still remain", (void *)priv);
ret = mlx5_priv_ind_table_ibv_verify(priv); ret = mlx5_priv_ind_table_ibv_verify(priv);
if (ret) if (ret)
WARN("%p: some Indirection table still remain", (void *)priv); WARN("%p: some Indirection table still remain", (void *)priv);

View File

@ -144,11 +144,12 @@ struct priv {
struct rte_intr_handle intr_handle; /* Interrupt handler. */ struct rte_intr_handle intr_handle; /* Interrupt handler. */
unsigned int (*reta_idx)[]; /* RETA index table. */ unsigned int (*reta_idx)[]; /* RETA index table. */
unsigned int reta_idx_n; /* RETA index size. */ unsigned int reta_idx_n; /* RETA index size. */
struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */ struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */ TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */ LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */ LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */ LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
/* Verbs Indirection tables. */ /* Verbs Indirection tables. */

View File

@ -87,17 +87,37 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
const void *default_mask, const void *default_mask,
void *data); void *data);
struct rte_flow { /** Structure for Drop queue. */
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ struct mlx5_hrxq_drop {
struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
struct mlx5_ind_table_ibv *ind_table; /**< Indirection table. */
struct ibv_qp *qp; /**< Verbs queue pair. */ struct ibv_qp *qp; /**< Verbs queue pair. */
struct ibv_flow *ibv_flow; /**< Verbs flow. */
struct ibv_wq *wq; /**< Verbs work queue. */ struct ibv_wq *wq; /**< Verbs work queue. */
struct ibv_cq *cq; /**< Verbs completion queue. */ struct ibv_cq *cq; /**< Verbs completion queue. */
};
/* Flows structures. */
struct mlx5_flow {
uint64_t hash_fields; /**< Fields that participate in the hash. */
struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
};
/* Drop flows structures. */
struct mlx5_flow_drop {
struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
};
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */ uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */ uint32_t drop:1; /**< Drop queue. */
uint64_t hash_fields; /**< Fields that participate in the hash. */ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
struct ibv_flow *ibv_flow; /**< Verbs flow. */
uint16_t queues_n; /**< Number of entries in queue[]. */
uint16_t (*queues)[]; /**< Queues indexes to use. */
union {
struct mlx5_flow frxq; /**< Flow with Rx queue. */
struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
};
}; };
/** Static initializer for items. */ /** Static initializer for items. */
@ -288,14 +308,6 @@ struct mlx5_flow_parse {
struct mlx5_flow_action actions; /**< Parsed action result. */ struct mlx5_flow_action actions; /**< Parsed action result. */
}; };
/** Structure for Drop queue. */
struct rte_flow_drop {
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
struct ibv_qp *qp; /**< Verbs queue pair. */
struct ibv_wq *wq; /**< Verbs work queue. */
struct ibv_cq *cq; /**< Verbs completion queue. */
};
static const struct rte_flow_ops mlx5_flow_ops = { static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate, .validate = mlx5_flow_validate,
.create = mlx5_flow_create, .create = mlx5_flow_create,
@ -1052,8 +1064,8 @@ priv_flow_create_action_queue_drop(struct priv *priv,
rte_flow->ibv_attr = flow->ibv_attr; rte_flow->ibv_attr = flow->ibv_attr;
if (!priv->dev->data->dev_started) if (!priv->dev->data->dev_started)
return rte_flow; return rte_flow;
rte_flow->qp = priv->flow_drop_queue->qp; rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
rte_flow->ibv_flow = ibv_create_flow(rte_flow->qp, rte_flow->ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
rte_flow->ibv_attr); rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) { if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@ -1091,62 +1103,52 @@ priv_flow_create_action_queue(struct priv *priv,
assert(priv->pd); assert(priv->pd);
assert(priv->ctx); assert(priv->ctx);
assert(!flow->actions.drop); assert(!flow->actions.drop);
rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0); rte_flow =
rte_calloc(__func__, 1,
sizeof(*flow) +
flow->actions.queues_n * sizeof(uint16_t),
0);
if (!rte_flow) { if (!rte_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate flow memory"); NULL, "cannot allocate flow memory");
return NULL; return NULL;
} }
rte_flow->mark = flow->actions.mark;
rte_flow->ibv_attr = flow->ibv_attr;
rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
memcpy(rte_flow->queues, flow->actions.queues,
flow->actions.queues_n * sizeof(uint16_t));
rte_flow->queues_n = flow->actions.queues_n;
rte_flow->frxq.hash_fields = flow->hash_fields;
rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
rss_hash_default_key_len,
flow->hash_fields,
(*rte_flow->queues),
rte_flow->queues_n);
if (rte_flow->frxq.hrxq) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "duplicated flow");
goto error;
}
rte_flow->frxq.hrxq = mlx5_priv_hrxq_new(priv, rss_hash_default_key,
rss_hash_default_key_len,
flow->hash_fields,
(*rte_flow->queues),
rte_flow->queues_n);
if (!rte_flow->frxq.hrxq) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot create hash rxq");
goto error;
}
for (i = 0; i != flow->actions.queues_n; ++i) { for (i = 0; i != flow->actions.queues_n; ++i) {
struct mlx5_rxq_data *q = struct mlx5_rxq_data *q =
(*priv->rxqs)[flow->actions.queues[i]]; (*priv->rxqs)[flow->actions.queues[i]];
q->mark |= flow->actions.mark; q->mark |= flow->actions.mark;
} }
rte_flow->mark = flow->actions.mark;
rte_flow->ibv_attr = flow->ibv_attr;
rte_flow->hash_fields = flow->hash_fields;
rte_flow->ind_table =
mlx5_priv_ind_table_ibv_get(priv, flow->actions.queues,
flow->actions.queues_n);
if (!rte_flow->ind_table) {
rte_flow->ind_table =
mlx5_priv_ind_table_ibv_new(priv, flow->actions.queues,
flow->actions.queues_n);
if (!rte_flow->ind_table) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
NULL,
"cannot allocate indirection table");
goto error;
}
}
rte_flow->qp = ibv_create_qp_ex(
priv->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
IBV_QP_INIT_ATTR_PD |
IBV_QP_INIT_ATTR_IND_TABLE |
IBV_QP_INIT_ATTR_RX_HASH,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function =
IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_hash_default_key_len,
.rx_hash_key = rss_hash_default_key,
.rx_hash_fields_mask = rte_flow->hash_fields,
},
.rwq_ind_tbl = rte_flow->ind_table->ind_table,
.pd = priv->pd
});
if (!rte_flow->qp) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate QP");
goto error;
}
if (!priv->dev->data->dev_started) if (!priv->dev->data->dev_started)
return rte_flow; return rte_flow;
rte_flow->ibv_flow = ibv_create_flow(rte_flow->qp, rte_flow->ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
rte_flow->ibv_attr); rte_flow->ibv_attr);
if (!rte_flow->ibv_flow) { if (!rte_flow->ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@ -1156,10 +1158,8 @@ priv_flow_create_action_queue(struct priv *priv,
return rte_flow; return rte_flow;
error: error:
assert(rte_flow); assert(rte_flow);
if (rte_flow->qp) if (rte_flow->frxq.hrxq)
ibv_destroy_qp(rte_flow->qp); mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
if (rte_flow->ind_table)
mlx5_priv_ind_table_ibv_release(priv, rte_flow->ind_table);
rte_free(rte_flow); rte_free(rte_flow);
return NULL; return NULL;
} }
@ -1277,45 +1277,43 @@ priv_flow_destroy(struct priv *priv,
struct rte_flow *flow) struct rte_flow *flow)
{ {
unsigned int i; unsigned int i;
uint16_t *queues;
uint16_t queues_n;
TAILQ_REMOVE(&priv->flows, flow, next); if (flow->drop || !flow->mark)
if (flow->ibv_flow)
claim_zero(ibv_destroy_flow(flow->ibv_flow));
if (flow->drop)
goto free; goto free;
if (flow->qp) queues = flow->frxq.hrxq->ind_table->queues;
claim_zero(ibv_destroy_qp(flow->qp)); queues_n = flow->frxq.hrxq->ind_table->queues_n;
for (i = 0; i != flow->ind_table->queues_n; ++i) { for (i = 0; i != queues_n; ++i) {
struct rte_flow *tmp; struct rte_flow *tmp;
struct mlx5_rxq_data *rxq_data = struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
(*priv->rxqs)[flow->ind_table->queues[i]]; int mark = 0;
/* /*
* To remove the mark from the queue, the queue must not be * To remove the mark from the queue, the queue must not be
* present in any other marked flow (RSS or not). * present in any other marked flow (RSS or not).
*/ */
if (flow->mark) { TAILQ_FOREACH(tmp, &priv->flows, next) {
int mark = 0; unsigned int j;
TAILQ_FOREACH(tmp, &priv->flows, next) { if (!tmp->mark)
unsigned int j; continue;
for (j = 0;
if (tmp->drop) (j != tmp->frxq.hrxq->ind_table->queues_n) &&
continue; !mark;
if (!tmp->mark) j++)
continue; if (tmp->frxq.hrxq->ind_table->queues[j] ==
for (j = 0; queues[i])
(j != tmp->ind_table->queues_n) && !mark; mark = 1;
j++)
if (tmp->ind_table->queues[j] ==
flow->ind_table->queues[i])
mark = 1;
}
rxq_data->mark = mark;
} }
rxq_data->mark = mark;
} }
mlx5_priv_ind_table_ibv_release(priv, flow->ind_table);
free: free:
if (flow->ibv_flow)
claim_zero(ibv_destroy_flow(flow->ibv_flow));
if (!flow->drop)
mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
TAILQ_REMOVE(&priv->flows, flow, next);
rte_free(flow->ibv_attr); rte_free(flow->ibv_attr);
DEBUG("Flow destroyed %p", (void *)flow); DEBUG("Flow destroyed %p", (void *)flow);
rte_free(flow); rte_free(flow);
@ -1389,7 +1387,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
static int static int
priv_flow_create_drop_queue(struct priv *priv) priv_flow_create_drop_queue(struct priv *priv)
{ {
struct rte_flow_drop *fdq = NULL; struct mlx5_hrxq_drop *fdq = NULL;
assert(priv->pd); assert(priv->pd);
assert(priv->ctx); assert(priv->ctx);
@ -1472,7 +1470,7 @@ error:
static void static void
priv_flow_delete_drop_queue(struct priv *priv) priv_flow_delete_drop_queue(struct priv *priv)
{ {
struct rte_flow_drop *fdq = priv->flow_drop_queue; struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
if (!fdq) if (!fdq)
return; return;
@ -1504,9 +1502,12 @@ priv_flow_stop(struct priv *priv)
TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) { TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {
claim_zero(ibv_destroy_flow(flow->ibv_flow)); claim_zero(ibv_destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL; flow->ibv_flow = NULL;
mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
flow->frxq.hrxq = NULL;
if (flow->mark) { if (flow->mark) {
unsigned int n; unsigned int n;
struct mlx5_ind_table_ibv *ind_tbl = flow->ind_table; struct mlx5_ind_table_ibv *ind_tbl =
flow->frxq.hrxq->ind_table;
for (n = 0; n < ind_tbl->queues_n; ++n) for (n = 0; n < ind_tbl->queues_n; ++n)
(*priv->rxqs)[ind_tbl->queues[n]]->mark = 0; (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
@ -1535,13 +1536,31 @@ priv_flow_start(struct priv *priv)
if (ret) if (ret)
return -1; return -1;
TAILQ_FOREACH(flow, &priv->flows, next) { TAILQ_FOREACH(flow, &priv->flows, next) {
struct ibv_qp *qp; if (flow->frxq.hrxq)
goto flow_create;
if (flow->drop) flow->frxq.hrxq =
qp = priv->flow_drop_queue->qp; mlx5_priv_hrxq_get(priv, rss_hash_default_key,
else rss_hash_default_key_len,
qp = flow->qp; flow->frxq.hash_fields,
flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr); (*flow->queues),
flow->queues_n);
if (flow->frxq.hrxq)
goto flow_create;
flow->frxq.hrxq =
mlx5_priv_hrxq_new(priv, rss_hash_default_key,
rss_hash_default_key_len,
flow->frxq.hash_fields,
(*flow->queues),
flow->queues_n);
if (!flow->frxq.hrxq) {
DEBUG("Flow %p cannot be applied",
(void *)flow);
rte_errno = EINVAL;
return rte_errno;
}
flow_create:
flow->ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
flow->ibv_attr);
if (!flow->ibv_flow) { if (!flow->ibv_flow) {
DEBUG("Flow %p cannot be applied", (void *)flow); DEBUG("Flow %p cannot be applied", (void *)flow);
rte_errno = EINVAL; rte_errno = EINVAL;
@ -1551,8 +1570,11 @@ priv_flow_start(struct priv *priv)
if (flow->mark) { if (flow->mark) {
unsigned int n; unsigned int n;
for (n = 0; n < flow->ind_table->queues_n; ++n) { for (n = 0;
uint16_t idx = flow->ind_table->queues[n]; n < flow->frxq.hrxq->ind_table->queues_n;
++n) {
uint16_t idx =
flow->frxq.hrxq->ind_table->queues[n];
(*priv->rxqs)[idx]->mark = 1; (*priv->rxqs)[idx]->mark = 1;
} }
} }

View File

@ -1775,3 +1775,168 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
} }
return ret; return ret;
} }
/**
* Create an Rx Hash queue.
*
* @param priv
* Pointer to private structure.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
* RSS key length.
* @param hash_fields
* Verbs protocol hash field to make the RSS on.
* @param queues
* Queues entering in hash queue.
* @param queues_n
* Number of queues.
*
* @return
* An hash Rx queue on success.
*/
struct mlx5_hrxq*
mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
{
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
if (!ind_tbl)
return NULL;
qp = ibv_create_qp_ex(
priv->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
IBV_QP_INIT_ATTR_PD |
IBV_QP_INIT_ATTR_IND_TABLE |
IBV_QP_INIT_ATTR_RX_HASH,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_key_len,
.rx_hash_key = rss_key,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
if (!qp)
goto error;
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
if (!hrxq)
goto error;
hrxq->ind_table = ind_tbl;
hrxq->qp = qp;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
if (qp)
claim_zero(ibv_destroy_qp(qp));
return NULL;
}
/**
* Get an Rx Hash queue.
*
* @param priv
* Pointer to private structure.
* @param rss_conf
* RSS configuration for the Rx hash queue.
* @param queues
* Queues entering in hash queue.
* @param queues_n
* Number of queues.
*
* @return
* An hash Rx queue on success.
*/
struct mlx5_hrxq*
mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
{
struct mlx5_hrxq *hrxq;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
struct mlx5_ind_table_ibv *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
continue;
if (hrxq->hash_fields != hash_fields)
continue;
ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
return NULL;
}
/**
* Release the hash Rx queue.
*
* @param priv
* Pointer to private structure.
* @param hrxq
* Pointer to Hash Rx queue to release.
*
* @return
* 0 on success, errno value on failure.
*/
int
mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
{
DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(ibv_destroy_qp(hrxq->qp));
mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
return EBUSY;
}
/**
* Verify the Rx Queue list is empty
*
* @param priv
* Pointer to private structure.
*
* @return the number of object not released.
*/
int
mlx5_priv_hrxq_ibv_verify(struct priv *priv)
{
struct mlx5_hrxq *hrxq;
int ret = 0;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
DEBUG("%p: Verbs Hash Rx queue %p still referenced",
(void *)priv, (void *)hrxq);
++ret;
}
return ret;
}

View File

@ -165,6 +165,17 @@ struct mlx5_ind_table_ibv {
uint16_t queues[]; /**< Queue list. */ uint16_t queues[]; /**< Queue list. */
}; };
/* Hash Rx queue. */
struct mlx5_hrxq {
LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
uint8_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
/* Hash RX queue types. */ /* Hash RX queue types. */
enum hash_rxq_type { enum hash_rxq_type {
HASH_RXQ_TCPV4, HASH_RXQ_TCPV4,
@ -362,6 +373,12 @@ struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *,
uint16_t); uint16_t);
int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *); int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *);
int mlx5_priv_ind_table_ibv_verify(struct priv *); int mlx5_priv_ind_table_ibv_verify(struct priv *);
struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t,
uint64_t, uint16_t [], uint16_t);
struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
uint64_t, uint16_t [], uint16_t);
int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
int mlx5_priv_hrxq_ibv_verify(struct priv *);
/* mlx5_txq.c */ /* mlx5_txq.c */