net/mlx5: switch to the shared protection domain

The PMD code is updated to use Protected Domain from the
shared IB device context. The Domain is shared between
all devices belonging to the same multiport Infiniband device.
If IB device has only one port, the PD is not shared, because
there is only ethernet device created over IB one.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
This commit is contained in:
Viacheslav Ovsiienko 2019-03-27 13:15:41 +00:00 committed by Ferruh Yigit
parent 9c0a9eed37
commit 1b782252cb
5 changed files with 8 additions and 10 deletions

View File

@ -1098,7 +1098,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->ctx = sh->ctx;
priv->ibv_port = spawn->ibv_port;
priv->device_attr = sh->device_attr;
priv->pd = sh->pd;
priv->mtu = ETHER_MTU;
#ifndef RTE_ARCH_64
/* Initialize UAR access locks for 32bit implementations. */

View File

@ -225,7 +225,6 @@ struct mlx5_priv {
uint32_t ibv_port; /* IB device port number. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
struct ibv_pd *pd; /* Protection Domain. */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
/* Bit-field of MAC addresses owned by the PMD. */

View File

@ -720,7 +720,7 @@ mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
* mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
* through mlx5_alloc_verbs_buf().
*/
mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DEBUG("port %u fail to create a verbs MR for address (%p)",
@ -1138,7 +1138,7 @@ mlx5_create_mr_ext(struct rte_eth_dev *dev, uintptr_t addr, size_t len,
RTE_CACHE_LINE_SIZE, socket_id);
if (mr == NULL)
return NULL;
mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DRV_LOG(WARNING,

View File

@ -867,7 +867,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
.max_wr = wqe_n >> rxq_data->sges_n,
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
.pd = priv->pd,
.pd = priv->sh->pd,
.cq = tmpl->cq,
.comp_mask =
IBV_WQ_FLAGS_CVLAN_STRIPPING |
@ -1831,7 +1831,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
.pd = priv->sh->pd,
},
&qp_init_attr);
#else
@ -1850,7 +1850,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
.pd = priv->sh->pd,
});
#endif
if (!qp) {
@ -2006,7 +2006,7 @@ mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
.pd = priv->pd,
.pd = priv->sh->pd,
.cq = cq,
});
if (!wq) {
@ -2160,7 +2160,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
.rx_hash_fields_mask = 0,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd
.pd = priv->sh->pd
});
if (!qp) {
DEBUG("port %u cannot allocate QP for drop queue",

View File

@ -426,7 +426,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
* Tx burst.
*/
.sq_sig_all = 0,
.pd = priv->pd,
.pd = priv->sh->pd,
.comp_mask = IBV_QP_INIT_ATTR_PD,
};
if (txq_data->max_inline)