mlx4: merge Rx queue setup functions

Make rxq_setup_qp() handle inline support like rxq_setup_qp_rss() instead of
having two separate functions.

Signed-off-by: Alex Rosenbaum <alexr@mellanox.com>
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
Alex Rosenbaum 2015-06-30 11:27:51 +02:00 committed by Thomas Monjalon
parent c3f2341582
commit decb879408

View File

@ -2653,10 +2653,9 @@ repost:
return ret; return ret;
} }
#ifdef INLINE_RECV
/** /**
* Allocate a Queue Pair in case inline receive is supported. * Allocate a Queue Pair.
* Optionally setup inline receive if supported.
* *
* @param priv * @param priv
* Pointer to private structure. * Pointer to private structure.
@ -2676,7 +2675,6 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
.send_cq = cq, .send_cq = cq,
/* CQ to be associated with the receive queue. */ /* CQ to be associated with the receive queue. */
.recv_cq = cq, .recv_cq = cq,
.max_inl_recv = priv->inl_recv_size,
.cap = { .cap = {
/* Max number of outstanding WRs. */ /* Max number of outstanding WRs. */
.max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ? .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
@ -2689,61 +2687,22 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
MLX4_PMD_SGE_WR_N), MLX4_PMD_SGE_WR_N),
}, },
.qp_type = IBV_QPT_RAW_PACKET, .qp_type = IBV_QPT_RAW_PACKET,
.pd = priv->pd .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
.pd = priv->pd,
}; };
attr.comp_mask = IBV_EXP_QP_INIT_ATTR_PD; #ifdef INLINE_RECV
attr.max_inl_recv = priv->inl_recv_size;
attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV; attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
#endif
return ibv_exp_create_qp(priv->ctx, &attr); return ibv_exp_create_qp(priv->ctx, &attr);
} }
#else /* INLINE_RECV */
/**
* Allocate a Queue Pair.
*
* @param priv
* Pointer to private structure.
* @param cq
* Completion queue to associate with QP.
* @param desc
* Number of descriptors in QP (hint only).
*
* @return
* QP pointer or NULL in case of error.
*/
static struct ibv_qp *
rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
{
struct ibv_qp_init_attr attr = {
/* CQ to be associated with the send queue. */
.send_cq = cq,
/* CQ to be associated with the receive queue. */
.recv_cq = cq,
.cap = {
/* Max number of outstanding WRs. */
.max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
priv->device_attr.max_qp_wr :
desc),
/* Max number of scatter/gather elements in a WR. */
.max_recv_sge = ((priv->device_attr.max_sge <
MLX4_PMD_SGE_WR_N) ?
priv->device_attr.max_sge :
MLX4_PMD_SGE_WR_N),
},
.qp_type = IBV_QPT_RAW_PACKET
};
return ibv_create_qp(priv->pd, &attr);
}
#endif /* INLINE_RECV */
#ifdef RSS_SUPPORT #ifdef RSS_SUPPORT
/** /**
* Allocate a RSS Queue Pair. * Allocate a RSS Queue Pair.
* Optionally setup inline receive if supported.
* *
* @param priv * @param priv
* Pointer to private structure. * Pointer to private structure.
@ -2766,9 +2725,6 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
.send_cq = cq, .send_cq = cq,
/* CQ to be associated with the receive queue. */ /* CQ to be associated with the receive queue. */
.recv_cq = cq, .recv_cq = cq,
#ifdef INLINE_RECV
.max_inl_recv = priv->inl_recv_size,
#endif
.cap = { .cap = {
/* Max number of outstanding WRs. */ /* Max number of outstanding WRs. */
.max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ? .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
@ -2787,6 +2743,7 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
}; };
#ifdef INLINE_RECV #ifdef INLINE_RECV
attr.max_inl_recv = priv->inl_recv_size,
attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV; attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
#endif #endif
if (parent) { if (parent) {