net/mlx5: remove more Direct Verbs dependencies
Several DV-based structs of type 'struct mlx5dv_devx_XXX' are replaced with 'void *' to enable compilation under non-Linux operating systems. New getter functions were added to retrieve the specific fields that were previously accessed directly. Replaced structs: 'struct mlx5dv_pp *' 'struct mlx5dv_devx_event_channel *' 'struct mlx5dv_devx_umem *' 'struct mlx5dv_devx_uar *' Signed-off-by: Ophir Munk <ophirmu@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com>
This commit is contained in:
parent
1e577c9e5f
commit
1f66ac5bbe
@ -90,4 +90,115 @@ mlx5_os_get_umem_id(void *umem)
|
||||
return 0;
|
||||
return ((struct mlx5dv_devx_umem *)umem)->umem_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get fd. Given a pointer to DevX channel object of type
|
||||
* 'struct mlx5dv_devx_event_channel*' - return its fd.
|
||||
*
|
||||
* @param[in] channel
|
||||
* Pointer to channel object.
|
||||
*
|
||||
* @return
|
||||
* The fd if channel is valid, 0 otherwise.
|
||||
*/
|
||||
static inline int
|
||||
mlx5_os_get_devx_channel_fd(void *channel)
|
||||
{
|
||||
if (!channel)
|
||||
return 0;
|
||||
return ((struct mlx5dv_devx_event_channel *)channel)->fd;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get mmap offset. Given a pointer to an DevX UAR object of type
|
||||
* 'struct mlx5dv_devx_uar *' - return its mmap offset.
|
||||
*
|
||||
* @param[in] uar
|
||||
* Pointer to UAR object.
|
||||
*
|
||||
* @return
|
||||
* The mmap offset if uar is valid, 0 otherwise.
|
||||
*/
|
||||
static inline off_t
|
||||
mlx5_os_get_devx_uar_mmap_offset(void *uar)
|
||||
{
|
||||
#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
|
||||
if (!uar)
|
||||
return 0;
|
||||
return ((struct mlx5dv_devx_uar *)uar)->mmap_off;
|
||||
#else
|
||||
RTE_SET_USED(uar);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Get base addr pointer. Given a pointer to an UAR object of type
|
||||
* 'struct mlx5dv_devx_uar *' - return its base address.
|
||||
*
|
||||
* @param[in] uar
|
||||
* Pointer to an UAR object.
|
||||
*
|
||||
* @return
|
||||
* The base address if UAR is valid, 0 otherwise.
|
||||
*/
|
||||
static inline void *
|
||||
mlx5_os_get_devx_uar_base_addr(void *uar)
|
||||
{
|
||||
#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
|
||||
if (!uar)
|
||||
return NULL;
|
||||
return ((struct mlx5dv_devx_uar *)uar)->base_addr;
|
||||
#else
|
||||
RTE_SET_USED(uar);
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Get reg addr pointer. Given a pointer to an UAR object of type
|
||||
* 'struct mlx5dv_devx_uar *' - return its reg address.
|
||||
*
|
||||
* @param[in] uar
|
||||
* Pointer to an UAR object.
|
||||
*
|
||||
* @return
|
||||
* The reg address if UAR is valid, 0 otherwise.
|
||||
*/
|
||||
static inline void *
|
||||
mlx5_os_get_devx_uar_reg_addr(void *uar)
|
||||
{
|
||||
#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
|
||||
if (!uar)
|
||||
return NULL;
|
||||
return ((struct mlx5dv_devx_uar *)uar)->reg_addr;
|
||||
#else
|
||||
RTE_SET_USED(uar);
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Get page id. Given a pointer to an UAR object of type
|
||||
* 'struct mlx5dv_devx_uar *' - return its page id.
|
||||
*
|
||||
* @param[in] uar
|
||||
* Pointer to an UAR object.
|
||||
*
|
||||
* @return
|
||||
* The page id if UAR is valid, 0 otherwise.
|
||||
*/
|
||||
static inline uint32_t
|
||||
mlx5_os_get_devx_uar_page_id(void *uar)
|
||||
{
|
||||
#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
|
||||
if (!uar)
|
||||
return 0;
|
||||
return ((struct mlx5dv_devx_uar *)uar)->page_id;
|
||||
#else
|
||||
RTE_SET_USED(uar);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */
|
||||
|
@ -723,6 +723,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
||||
{
|
||||
uint32_t uar_mapping, retry;
|
||||
int err = 0;
|
||||
void *base_addr;
|
||||
|
||||
for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
|
||||
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
|
||||
@ -781,7 +782,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
||||
err = ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
if (sh->tx_uar->base_addr)
|
||||
base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
|
||||
if (base_addr)
|
||||
break;
|
||||
/*
|
||||
* The UARs are allocated by rdma_core within the
|
||||
@ -820,7 +822,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
|
||||
err = ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
if (sh->devx_rx_uar->base_addr)
|
||||
base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
|
||||
if (base_addr)
|
||||
break;
|
||||
/*
|
||||
* The UARs are allocated by rdma_core within the
|
||||
@ -943,8 +946,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
||||
err = mlx5_alloc_rxtx_uars(sh, config);
|
||||
if (err)
|
||||
goto error;
|
||||
MLX5_ASSERT(sh->tx_uar && sh->tx_uar->base_addr);
|
||||
MLX5_ASSERT(sh->devx_rx_uar && sh->devx_rx_uar->base_addr);
|
||||
MLX5_ASSERT(sh->tx_uar);
|
||||
MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
|
||||
|
||||
MLX5_ASSERT(sh->devx_rx_uar);
|
||||
MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
|
||||
}
|
||||
sh->flow_id_pool = mlx5_flow_id_pool_alloc
|
||||
((1 << HAIRPIN_FLOW_ID_BITS) - 1);
|
||||
|
@ -527,7 +527,7 @@ struct mlx5_flow_id_pool {
|
||||
struct mlx5_txpp_wq {
|
||||
/* Completion Queue related data.*/
|
||||
struct mlx5_devx_obj *cq;
|
||||
struct mlx5dv_devx_umem *cq_umem;
|
||||
void *cq_umem;
|
||||
union {
|
||||
volatile void *cq_buf;
|
||||
volatile struct mlx5_cqe *cqes;
|
||||
@ -537,7 +537,7 @@ struct mlx5_txpp_wq {
|
||||
uint32_t arm_sn:2;
|
||||
/* Send Queue related data.*/
|
||||
struct mlx5_devx_obj *sq;
|
||||
struct mlx5dv_devx_umem *sq_umem;
|
||||
void *sq_umem;
|
||||
union {
|
||||
volatile void *sq_buf;
|
||||
volatile struct mlx5_wqe *wqes;
|
||||
@ -563,10 +563,10 @@ struct mlx5_dev_txpp {
|
||||
int32_t skew; /* Scheduling skew. */
|
||||
uint32_t eqn; /* Event Queue number. */
|
||||
struct rte_intr_handle intr_handle; /* Periodic interrupt. */
|
||||
struct mlx5dv_devx_event_channel *echan; /* Event Channel. */
|
||||
void *echan; /* Event Channel. */
|
||||
struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
|
||||
struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */
|
||||
struct mlx5dv_pp *pp; /* Packet pacing context. */
|
||||
void *pp; /* Packet pacing context. */
|
||||
uint16_t pp_id; /* Packet pacing context index. */
|
||||
uint16_t ts_n; /* Number of captured timestamps. */
|
||||
uint16_t ts_p; /* Pointer to statisticks timestamp. */
|
||||
@ -653,10 +653,10 @@ struct mlx5_dev_ctx_shared {
|
||||
struct mlx5_devx_obj *tis; /* TIS object. */
|
||||
struct mlx5_devx_obj *td; /* Transport domain. */
|
||||
struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
|
||||
struct mlx5dv_devx_uar *tx_uar; /* Tx/packer pacing shared UAR. */
|
||||
void *tx_uar; /* Tx/packet pacing shared UAR. */
|
||||
struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
|
||||
/* Flex parser profiles information. */
|
||||
struct mlx5dv_devx_uar *devx_rx_uar; /* DevX UAR for Rx. */
|
||||
void *devx_rx_uar; /* DevX UAR for Rx. */
|
||||
struct mlx5_dev_shared_port port[]; /* per device port data array. */
|
||||
};
|
||||
|
||||
|
@ -1444,7 +1444,7 @@ mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
|
||||
wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
|
||||
wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
|
||||
wq_attr->dbr_umem_valid = 1;
|
||||
wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
|
||||
wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
|
||||
wq_attr->wq_umem_valid = 1;
|
||||
}
|
||||
|
||||
@ -1620,8 +1620,9 @@ mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
|
||||
DRV_LOG(ERR, "Failed to register umem for CQ.");
|
||||
goto error;
|
||||
}
|
||||
cq_attr.uar_page_id = priv->sh->devx_rx_uar->page_id;
|
||||
cq_attr.q_umem_id = rxq_ctrl->cq_umem->umem_id;
|
||||
cq_attr.uar_page_id =
|
||||
mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
|
||||
cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
|
||||
cq_attr.q_umem_valid = 1;
|
||||
cq_attr.log_cq_size = log_cqe_n;
|
||||
cq_attr.log_page_size = rte_log2_u32(page_size);
|
||||
@ -1805,7 +1806,8 @@ mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
|
||||
rte_errno);
|
||||
goto error;
|
||||
}
|
||||
tmpl->fd = tmpl->devx_channel->fd;
|
||||
tmpl->fd =
|
||||
mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
|
||||
}
|
||||
}
|
||||
if (mlx5_rxq_mprq_enabled(rxq_data))
|
||||
@ -1897,7 +1899,8 @@ mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
|
||||
rxq_data->cq_db =
|
||||
(uint32_t *)((uintptr_t)dbr_page->dbrs +
|
||||
(uintptr_t)rxq_ctrl->cq_dbr_offset);
|
||||
rxq_data->cq_uar = priv->sh->devx_rx_uar->base_addr;
|
||||
rxq_data->cq_uar =
|
||||
mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
|
||||
/* Create CQ using DevX API. */
|
||||
tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
|
||||
if (!tmpl->devx_cq) {
|
||||
|
@ -185,7 +185,7 @@ struct mlx5_rxq_obj {
|
||||
struct {
|
||||
struct mlx5_devx_obj *rq; /* DevX Rx Queue object. */
|
||||
struct mlx5_devx_obj *devx_cq; /* DevX CQ object. */
|
||||
struct mlx5dv_devx_event_channel *devx_channel;
|
||||
void *devx_channel;
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -212,8 +212,8 @@ struct mlx5_rxq_ctrl {
|
||||
uint32_t cq_dbr_umem_id;
|
||||
uint64_t cq_dbr_offset;
|
||||
/* Storing CQ door-bell information, needed when freeing door-bell. */
|
||||
struct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */
|
||||
struct mlx5dv_devx_umem *cq_umem; /* CQ buffer registration info. */
|
||||
void *wq_umem; /* WQ buffer registration info. */
|
||||
void *cq_umem; /* CQ buffer registration info. */
|
||||
struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
|
||||
};
|
||||
|
||||
@ -361,12 +361,12 @@ struct mlx5_txq_obj {
|
||||
struct {
|
||||
struct rte_eth_dev *dev;
|
||||
struct mlx5_devx_obj *cq_devx;
|
||||
struct mlx5dv_devx_umem *cq_umem;
|
||||
void *cq_umem;
|
||||
void *cq_buf;
|
||||
int64_t cq_dbrec_offset;
|
||||
struct mlx5_devx_dbr_page *cq_dbrec_page;
|
||||
struct mlx5_devx_obj *sq_devx;
|
||||
struct mlx5dv_devx_umem *sq_umem;
|
||||
void *sq_umem;
|
||||
void *sq_buf;
|
||||
int64_t sq_dbrec_offset;
|
||||
struct mlx5_devx_dbr_page *sq_dbrec_page;
|
||||
|
@ -113,13 +113,13 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
|
||||
rte_errno = errno;
|
||||
return -errno;
|
||||
}
|
||||
if (!sh->txpp.pp->index) {
|
||||
if (!((struct mlx5dv_pp *)sh->txpp.pp)->index) {
|
||||
DRV_LOG(ERR, "Zero packet pacing index allocated.");
|
||||
mlx5_txpp_free_pp_index(sh);
|
||||
rte_errno = ENOTSUP;
|
||||
return -ENOTSUP;
|
||||
}
|
||||
sh->txpp.pp_id = sh->txpp.pp->index;
|
||||
sh->txpp.pp_id = ((struct mlx5dv_pp *)(sh->txpp.pp))->index;
|
||||
return 0;
|
||||
#else
|
||||
RTE_SET_USED(sh);
|
||||
@ -175,6 +175,7 @@ mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
|
||||
uint32_t w32[2];
|
||||
uint64_t w64;
|
||||
} cs;
|
||||
void *reg_addr;
|
||||
|
||||
wq->sq_ci = ci + 1;
|
||||
cs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32
|
||||
@ -186,7 +187,8 @@ mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)
|
||||
/* Make sure the doorbell record is updated. */
|
||||
rte_wmb();
|
||||
/* Write to doorbel register to start processing. */
|
||||
__mlx5_uar_write64_relaxed(cs.w64, sh->tx_uar->reg_addr, NULL);
|
||||
reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
|
||||
__mlx5_uar_write64_relaxed(cs.w64, reg_addr, NULL);
|
||||
rte_wmb();
|
||||
}
|
||||
|
||||
@ -282,7 +284,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
|
||||
/* Create completion queue object for Rearm Queue. */
|
||||
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
|
||||
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
|
||||
cq_attr.uar_page_id = sh->tx_uar->page_id;
|
||||
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
||||
cq_attr.eqn = sh->txpp.eqn;
|
||||
cq_attr.q_umem_valid = 1;
|
||||
cq_attr.q_umem_offset = 0;
|
||||
@ -335,7 +337,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
|
||||
sq_attr.tis_num = sh->tis->id;
|
||||
sq_attr.cqn = wq->cq->id;
|
||||
sq_attr.cd_master = 1;
|
||||
sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
|
||||
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
||||
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
|
||||
sq_attr.wq_attr.pd = sh->pdn;
|
||||
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
|
||||
@ -522,14 +524,14 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
|
||||
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
|
||||
cq_attr.use_first_only = 1;
|
||||
cq_attr.overrun_ignore = 1;
|
||||
cq_attr.uar_page_id = sh->tx_uar->page_id;
|
||||
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
||||
cq_attr.eqn = sh->txpp.eqn;
|
||||
cq_attr.q_umem_valid = 1;
|
||||
cq_attr.q_umem_offset = 0;
|
||||
cq_attr.q_umem_id = wq->cq_umem->umem_id;
|
||||
cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
|
||||
cq_attr.db_umem_valid = 1;
|
||||
cq_attr.db_umem_offset = umem_dbrec;
|
||||
cq_attr.db_umem_id = wq->cq_umem->umem_id;
|
||||
cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
|
||||
cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_CLKQ_SIZE);
|
||||
cq_attr.log_page_size = rte_log2_u32(page_size);
|
||||
wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
|
||||
@ -587,16 +589,16 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
|
||||
sq_attr.cqn = wq->cq->id;
|
||||
sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
|
||||
sq_attr.wq_attr.cd_slave = 1;
|
||||
sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
|
||||
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
||||
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
|
||||
sq_attr.wq_attr.pd = sh->pdn;
|
||||
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
|
||||
sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
|
||||
sq_attr.wq_attr.dbr_umem_valid = 1;
|
||||
sq_attr.wq_attr.dbr_addr = umem_dbrec;
|
||||
sq_attr.wq_attr.dbr_umem_id = wq->sq_umem->umem_id;
|
||||
sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
|
||||
sq_attr.wq_attr.wq_umem_valid = 1;
|
||||
sq_attr.wq_attr.wq_umem_id = wq->sq_umem->umem_id;
|
||||
sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
|
||||
/* umem_offset must be zero for static_sq_wq queue. */
|
||||
sq_attr.wq_attr.wq_umem_offset = 0;
|
||||
wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
|
||||
@ -630,11 +632,14 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
|
||||
static inline void
|
||||
mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)
|
||||
{
|
||||
void *base_addr;
|
||||
|
||||
struct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;
|
||||
uint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;
|
||||
uint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;
|
||||
uint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);
|
||||
uint32_t *addr = RTE_PTR_ADD(sh->tx_uar->base_addr, MLX5_CQ_DOORBELL);
|
||||
base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
|
||||
uint32_t *addr = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
|
||||
|
||||
rte_compiler_barrier();
|
||||
aq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);
|
||||
@ -881,8 +886,8 @@ static int
|
||||
mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
|
||||
{
|
||||
uint16_t event_nums[1] = {0};
|
||||
int flags;
|
||||
int ret;
|
||||
int fd;
|
||||
|
||||
rte_atomic32_set(&sh->txpp.err_miss_int, 0);
|
||||
rte_atomic32_set(&sh->txpp.err_rearm_queue, 0);
|
||||
@ -890,15 +895,16 @@ mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
|
||||
rte_atomic32_set(&sh->txpp.err_ts_past, 0);
|
||||
rte_atomic32_set(&sh->txpp.err_ts_future, 0);
|
||||
/* Attach interrupt handler to process Rearm Queue completions. */
|
||||
flags = fcntl(sh->txpp.echan->fd, F_GETFL);
|
||||
ret = fcntl(sh->txpp.echan->fd, F_SETFL, flags | O_NONBLOCK);
|
||||
fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
|
||||
ret = mlx5_os_set_nonblock_channel_fd(fd);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to change event channel FD.");
|
||||
rte_errno = errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
memset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));
|
||||
sh->txpp.intr_handle.fd = sh->txpp.echan->fd;
|
||||
fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan);
|
||||
sh->txpp.intr_handle.fd = fd;
|
||||
sh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;
|
||||
if (rte_intr_callback_register(&sh->txpp.intr_handle,
|
||||
mlx5_txpp_interrupt_handler, sh)) {
|
||||
|
@ -907,6 +907,7 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
size_t page_size;
|
||||
struct mlx5_cqe *cqe;
|
||||
uint32_t i, nqe;
|
||||
void *reg_addr;
|
||||
size_t alignment = (size_t)-1;
|
||||
int ret = 0;
|
||||
|
||||
@ -991,11 +992,11 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
/* Create completion queue object with DevX. */
|
||||
cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
|
||||
MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
|
||||
cq_attr.uar_page_id = sh->tx_uar->page_id;
|
||||
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
||||
cq_attr.eqn = sh->txpp.eqn;
|
||||
cq_attr.q_umem_valid = 1;
|
||||
cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
|
||||
cq_attr.q_umem_id = txq_obj->cq_umem->umem_id;
|
||||
cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
|
||||
cq_attr.db_umem_valid = 1;
|
||||
cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
|
||||
cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
|
||||
@ -1069,7 +1070,7 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
|
||||
sq_attr.allow_swp = !!priv->config.swp;
|
||||
sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
|
||||
sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
|
||||
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
|
||||
sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
|
||||
sq_attr.wq_attr.pd = sh->pdn;
|
||||
sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
|
||||
@ -1079,7 +1080,7 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
sq_attr.wq_attr.dbr_umem_id =
|
||||
mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
|
||||
sq_attr.wq_attr.wq_umem_valid = 1;
|
||||
sq_attr.wq_attr.wq_umem_id = txq_obj->sq_umem->umem_id;
|
||||
sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
|
||||
sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
|
||||
txq_obj->sq_devx = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
|
||||
if (!txq_obj->sq_devx) {
|
||||
@ -1120,9 +1121,11 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
priv->sh->tdn = priv->sh->td->id;
|
||||
#endif
|
||||
MLX5_ASSERT(sh->tx_uar);
|
||||
MLX5_ASSERT(sh->tx_uar->reg_addr);
|
||||
txq_ctrl->bf_reg = sh->tx_uar->reg_addr;
|
||||
txq_ctrl->uar_mmap_offset = sh->tx_uar->mmap_off;
|
||||
reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
|
||||
MLX5_ASSERT(reg_addr);
|
||||
txq_ctrl->bf_reg = reg_addr;
|
||||
txq_ctrl->uar_mmap_offset =
|
||||
mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
|
||||
rte_atomic32_set(&txq_obj->refcnt, 1);
|
||||
txq_uar_init(txq_ctrl);
|
||||
LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
|
||||
|
Loading…
Reference in New Issue
Block a user