common/mlx5: refactor memory management
Refactor common memory btree and cache management to common driver. Replace some input parameters of MR APIs to more common data structure like PD, port_id, share_cache,... so that multiple PMD drivers can use those MR APIs. Modify mlx5 net pmd driver to use MR management APIs from common driver. Signed-off-by: Vu Pham <vuhuong@mellanox.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
This commit is contained in:
parent
a4de9586ac
commit
b8dc6b0e29
@ -18,6 +18,7 @@ SRCS-y += mlx5_devx_cmds.c
|
||||
SRCS-y += mlx5_common.c
|
||||
SRCS-y += mlx5_nl.c
|
||||
SRCS-y += mlx5_common_mp.c
|
||||
SRCS-y += mlx5_common_mr.c
|
||||
ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
|
||||
INSTALL-y-lib += $(LIB_GLUE)
|
||||
endif
|
||||
|
@ -55,6 +55,7 @@ sources = files(
|
||||
'mlx5_common.c',
|
||||
'mlx5_nl.c',
|
||||
'mlx5_common_mp.c',
|
||||
'mlx5_common_mr.c',
|
||||
)
|
||||
if not dlopen_ibverbs
|
||||
sources += files('mlx5_glue.c')
|
||||
|
1108
drivers/common/mlx5/mlx5_common_mr.c
Normal file
1108
drivers/common/mlx5/mlx5_common_mr.c
Normal file
File diff suppressed because it is too large
Load Diff
160
drivers/common/mlx5/mlx5_common_mr.h
Normal file
160
drivers/common/mlx5/mlx5_common_mr.h
Normal file
@ -0,0 +1,160 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright 2018 6WIND S.A.
|
||||
* Copyright 2018 Mellanox Technologies, Ltd
|
||||
*/
|
||||
|
||||
#ifndef RTE_PMD_MLX5_COMMON_MR_H_
|
||||
#define RTE_PMD_MLX5_COMMON_MR_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/queue.h>
|
||||
|
||||
/* Verbs header. */
|
||||
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic ignored "-Wpedantic"
|
||||
#endif
|
||||
#include <infiniband/verbs.h>
|
||||
#include <infiniband/mlx5dv.h>
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic error "-Wpedantic"
|
||||
#endif
|
||||
|
||||
#include <rte_rwlock.h>
|
||||
#include <rte_bitmap.h>
|
||||
#include <rte_memory.h>
|
||||
|
||||
#include "mlx5_common_mp.h"
|
||||
|
||||
/* Size of per-queue MR cache array for linear search. */
|
||||
#define MLX5_MR_CACHE_N 8
|
||||
#define MLX5_MR_BTREE_CACHE_N 256
|
||||
|
||||
/* Memory Region object. */
|
||||
struct mlx5_mr {
|
||||
LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
|
||||
struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
|
||||
const struct rte_memseg_list *msl;
|
||||
int ms_base_idx; /* Start index of msl->memseg_arr[]. */
|
||||
int ms_n; /* Number of memsegs in use. */
|
||||
uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
|
||||
struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
|
||||
};
|
||||
|
||||
/* Cache entry for Memory Region. */
|
||||
struct mr_cache_entry {
|
||||
uintptr_t start; /* Start address of MR. */
|
||||
uintptr_t end; /* End address of MR. */
|
||||
uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
|
||||
} __rte_packed;
|
||||
|
||||
/* MR Cache table for Binary search. */
|
||||
struct mlx5_mr_btree {
|
||||
uint16_t len; /* Number of entries. */
|
||||
uint16_t size; /* Total number of entries. */
|
||||
int overflow; /* Mark failure of table expansion. */
|
||||
struct mr_cache_entry (*table)[];
|
||||
} __rte_packed;
|
||||
|
||||
/* Per-queue MR control descriptor. */
|
||||
struct mlx5_mr_ctrl {
|
||||
uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
|
||||
uint32_t cur_gen; /* Generation number saved to flush caches. */
|
||||
uint16_t mru; /* Index of last hit entry in top-half cache. */
|
||||
uint16_t head; /* Index of the oldest entry in top-half cache. */
|
||||
struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
|
||||
struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
|
||||
} __rte_packed;
|
||||
|
||||
LIST_HEAD(mlx5_mr_list, mlx5_mr);
|
||||
|
||||
/* Global per-device MR cache. */
|
||||
struct mlx5_mr_share_cache {
|
||||
uint32_t dev_gen; /* Generation number to flush local caches. */
|
||||
rte_rwlock_t rwlock; /* MR cache Lock. */
|
||||
struct mlx5_mr_btree cache; /* Global MR cache table. */
|
||||
struct mlx5_mr_list mr_list; /* Registered MR list. */
|
||||
struct mlx5_mr_list mr_free_list; /* Freed MR list. */
|
||||
} __rte_packed;
|
||||
|
||||
/**
|
||||
* Look up LKey from given lookup table by linear search. Firstly look up the
|
||||
* last-hit entry. If miss, the entire array is searched. If found, update the
|
||||
* last-hit index and return LKey.
|
||||
*
|
||||
* @param lkp_tbl
|
||||
* Pointer to lookup table.
|
||||
* @param[in,out] cached_idx
|
||||
* Pointer to last-hit index.
|
||||
* @param n
|
||||
* Size of lookup table.
|
||||
* @param addr
|
||||
* Search key.
|
||||
*
|
||||
* @return
|
||||
* Searched LKey on success, UINT32_MAX on no match.
|
||||
*/
|
||||
static __rte_always_inline uint32_t
|
||||
mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
|
||||
uint16_t n, uintptr_t addr)
|
||||
{
|
||||
uint16_t idx;
|
||||
|
||||
if (likely(addr >= lkp_tbl[*cached_idx].start &&
|
||||
addr < lkp_tbl[*cached_idx].end))
|
||||
return lkp_tbl[*cached_idx].lkey;
|
||||
for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
|
||||
if (addr >= lkp_tbl[idx].start &&
|
||||
addr < lkp_tbl[idx].end) {
|
||||
/* Found. */
|
||||
*cached_idx = idx;
|
||||
return lkp_tbl[idx].lkey;
|
||||
}
|
||||
}
|
||||
return UINT32_MAX;
|
||||
}
|
||||
|
||||
__rte_experimental
|
||||
int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
|
||||
__rte_experimental
|
||||
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
|
||||
__rte_experimental
|
||||
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
|
||||
__rte_experimental
|
||||
uint32_t mlx5_mr_addr2mr_bh(struct ibv_pd *pd, struct mlx5_mp_id *mp_id,
|
||||
struct mlx5_mr_share_cache *share_cache,
|
||||
struct mlx5_mr_ctrl *mr_ctrl,
|
||||
uintptr_t addr, unsigned int mr_ext_memseg_en);
|
||||
__rte_experimental
|
||||
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
|
||||
__rte_experimental
|
||||
void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
|
||||
__rte_experimental
|
||||
void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
|
||||
__rte_experimental
|
||||
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
|
||||
__rte_experimental
|
||||
int
|
||||
mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
|
||||
struct mlx5_mr *mr);
|
||||
__rte_experimental
|
||||
uint32_t
|
||||
mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
|
||||
struct mr_cache_entry *entry, uintptr_t addr);
|
||||
__rte_experimental
|
||||
struct mlx5_mr *
|
||||
mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
|
||||
struct mr_cache_entry *entry, uintptr_t addr);
|
||||
__rte_experimental
|
||||
struct mlx5_mr *
|
||||
mlx5_create_mr_ext(struct ibv_pd *pd, uintptr_t addr, size_t len,
|
||||
int socket_id);
|
||||
__rte_experimental
|
||||
uint32_t
|
||||
mlx5_mr_create_primary(struct ibv_pd *pd,
|
||||
struct mlx5_mr_share_cache *share_cache,
|
||||
struct mr_cache_entry *entry, uintptr_t addr,
|
||||
unsigned int mr_ext_memseg_en);
|
||||
|
||||
#endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
|
@ -61,4 +61,18 @@ EXPERIMENTAL {
|
||||
mlx5_mp_req_mr_create;
|
||||
mlx5_mp_req_queue_state_modify;
|
||||
mlx5_mp_req_verbs_cmd_fd;
|
||||
|
||||
mlx5_mr_btree_init;
|
||||
mlx5_mr_btree_free;
|
||||
mlx5_mr_btree_dump;
|
||||
mlx5_mr_addr2mr_bh;
|
||||
mlx5_mr_release_cache;
|
||||
mlx5_mr_dump_cache;
|
||||
mlx5_mr_rebuild_cache;
|
||||
mlx5_mr_insert_cache;
|
||||
mlx5_mr_lookup_cache;
|
||||
mlx5_mr_lookup_list;
|
||||
mlx5_create_mr_ext;
|
||||
mlx5_mr_create_primary;
|
||||
mlx5_mr_flush_local_cache;
|
||||
};
|
||||
|
@ -623,7 +623,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
|
||||
* At this point the device is not added to the memory
|
||||
* event list yet, context is just being created.
|
||||
*/
|
||||
err = mlx5_mr_btree_init(&sh->mr.cache,
|
||||
err = mlx5_mr_btree_init(&sh->share_cache.cache,
|
||||
MLX5_MR_BTREE_CACHE_N * 2,
|
||||
spawn->pci_dev->device.numa_node);
|
||||
if (err) {
|
||||
@ -695,7 +695,7 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
|
||||
LIST_REMOVE(sh, mem_event_cb);
|
||||
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
|
||||
/* Release created Memory Regions. */
|
||||
mlx5_mr_release(sh);
|
||||
mlx5_mr_release_cache(&sh->share_cache);
|
||||
/* Remove context from the global device list. */
|
||||
LIST_REMOVE(sh, next);
|
||||
/*
|
||||
|
@ -37,10 +37,10 @@
|
||||
#include <mlx5_prm.h>
|
||||
#include <mlx5_nl.h>
|
||||
#include <mlx5_common_mp.h>
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
#include "mlx5_defs.h"
|
||||
#include "mlx5_utils.h"
|
||||
#include "mlx5_mr.h"
|
||||
#include "mlx5_autoconf.h"
|
||||
|
||||
/** Key string for IPC. */
|
||||
@ -199,8 +199,6 @@ struct mlx5_verbs_alloc_ctx {
|
||||
const void *obj; /* Pointer to the DPDK object. */
|
||||
};
|
||||
|
||||
LIST_HEAD(mlx5_mr_list, mlx5_mr);
|
||||
|
||||
/* Flow drop context necessary due to Verbs API. */
|
||||
struct mlx5_drop {
|
||||
struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
|
||||
@ -411,13 +409,7 @@ struct mlx5_ibv_shared {
|
||||
struct ibv_device_attr_ex device_attr; /* Device properties. */
|
||||
LIST_ENTRY(mlx5_ibv_shared) mem_event_cb;
|
||||
/**< Called by memory event callback. */
|
||||
struct {
|
||||
uint32_t dev_gen; /* Generation number to flush local caches. */
|
||||
rte_rwlock_t rwlock; /* MR Lock. */
|
||||
struct mlx5_mr_btree cache; /* Global MR cache table. */
|
||||
struct mlx5_mr_list mr_list; /* Registered MR list. */
|
||||
struct mlx5_mr_list mr_free_list; /* Freed MR list. */
|
||||
} mr;
|
||||
struct mlx5_mr_share_cache share_cache;
|
||||
/* Shared DV/DR flow data section. */
|
||||
pthread_mutex_t dv_mutex; /* DV context mutex. */
|
||||
uint32_t dv_meta_mask; /* flow META metadata supported mask. */
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <rte_string_fns.h>
|
||||
|
||||
#include <mlx5_common_mp.h>
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
#include "mlx5.h"
|
||||
#include "mlx5_rxtx.h"
|
||||
@ -25,7 +26,7 @@ mlx5_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
|
||||
(const struct mlx5_mp_param *)mp_msg->param;
|
||||
struct rte_eth_dev *dev;
|
||||
struct mlx5_priv *priv;
|
||||
struct mlx5_mr_cache entry;
|
||||
struct mr_cache_entry entry;
|
||||
uint32_t lkey;
|
||||
int ret;
|
||||
|
||||
@ -40,7 +41,10 @@ mlx5_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
|
||||
switch (param->type) {
|
||||
case MLX5_MP_REQ_CREATE_MR:
|
||||
mp_init_msg(&priv->mp_id, &mp_res, param->type);
|
||||
lkey = mlx5_mr_create_primary(dev, &entry, param->args.addr);
|
||||
lkey = mlx5_mr_create_primary(priv->sh->pd,
|
||||
&priv->sh->share_cache,
|
||||
&entry, param->args.addr,
|
||||
priv->config.mr_ext_memseg_en);
|
||||
if (lkey == UINT32_MAX)
|
||||
res->result = -rte_errno;
|
||||
ret = rte_mp_reply(&mp_res, peer);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,99 +24,16 @@
|
||||
#include <rte_ethdev.h>
|
||||
#include <rte_rwlock.h>
|
||||
#include <rte_bitmap.h>
|
||||
#include <rte_memory.h>
|
||||
|
||||
/* Memory Region object. */
|
||||
struct mlx5_mr {
|
||||
LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
|
||||
struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
|
||||
const struct rte_memseg_list *msl;
|
||||
int ms_base_idx; /* Start index of msl->memseg_arr[]. */
|
||||
int ms_n; /* Number of memsegs in use. */
|
||||
uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
|
||||
struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
|
||||
};
|
||||
|
||||
/* Cache entry for Memory Region. */
|
||||
struct mlx5_mr_cache {
|
||||
uintptr_t start; /* Start address of MR. */
|
||||
uintptr_t end; /* End address of MR. */
|
||||
uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
|
||||
} __rte_packed;
|
||||
|
||||
/* MR Cache table for Binary search. */
|
||||
struct mlx5_mr_btree {
|
||||
uint16_t len; /* Number of entries. */
|
||||
uint16_t size; /* Total number of entries. */
|
||||
int overflow; /* Mark failure of table expansion. */
|
||||
struct mlx5_mr_cache (*table)[];
|
||||
} __rte_packed;
|
||||
|
||||
/* Per-queue MR control descriptor. */
|
||||
struct mlx5_mr_ctrl {
|
||||
uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
|
||||
uint32_t cur_gen; /* Generation number saved to flush caches. */
|
||||
uint16_t mru; /* Index of last hit entry in top-half cache. */
|
||||
uint16_t head; /* Index of the oldest entry in top-half cache. */
|
||||
struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
|
||||
struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
|
||||
} __rte_packed;
|
||||
|
||||
struct mlx5_ibv_shared;
|
||||
extern struct mlx5_dev_list mlx5_mem_event_cb_list;
|
||||
extern rte_rwlock_t mlx5_mem_event_rwlock;
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
/* First entry must be NULL for comparison. */
|
||||
#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
|
||||
|
||||
int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
|
||||
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
|
||||
uint32_t mlx5_mr_create_primary(struct rte_eth_dev *dev,
|
||||
struct mlx5_mr_cache *entry, uintptr_t addr);
|
||||
void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
|
||||
size_t len, void *arg);
|
||||
int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
|
||||
struct rte_mempool *mp);
|
||||
void mlx5_mr_release(struct mlx5_ibv_shared *sh);
|
||||
|
||||
/* Debug purpose functions. */
|
||||
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt);
|
||||
void mlx5_mr_dump_dev(struct mlx5_ibv_shared *sh);
|
||||
|
||||
/**
|
||||
* Look up LKey from given lookup table by linear search. Firstly look up the
|
||||
* last-hit entry. If miss, the entire array is searched. If found, update the
|
||||
* last-hit index and return LKey.
|
||||
*
|
||||
* @param lkp_tbl
|
||||
* Pointer to lookup table.
|
||||
* @param[in,out] cached_idx
|
||||
* Pointer to last-hit index.
|
||||
* @param n
|
||||
* Size of lookup table.
|
||||
* @param addr
|
||||
* Search key.
|
||||
*
|
||||
* @return
|
||||
* Searched LKey on success, UINT32_MAX on no match.
|
||||
*/
|
||||
static __rte_always_inline uint32_t
|
||||
mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx,
|
||||
uint16_t n, uintptr_t addr)
|
||||
{
|
||||
uint16_t idx;
|
||||
|
||||
if (likely(addr >= lkp_tbl[*cached_idx].start &&
|
||||
addr < lkp_tbl[*cached_idx].end))
|
||||
return lkp_tbl[*cached_idx].lkey;
|
||||
for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
|
||||
if (addr >= lkp_tbl[idx].start &&
|
||||
addr < lkp_tbl[idx].end) {
|
||||
/* Found. */
|
||||
*cached_idx = idx;
|
||||
return lkp_tbl[idx].lkey;
|
||||
}
|
||||
}
|
||||
return UINT32_MAX;
|
||||
}
|
||||
|
||||
#endif /* RTE_PMD_MLX5_MR_H_ */
|
||||
|
@ -33,6 +33,7 @@
|
||||
|
||||
#include "mlx5_defs.h"
|
||||
#include "mlx5.h"
|
||||
#include "mlx5_mr.h"
|
||||
#include "mlx5_utils.h"
|
||||
#include "mlx5_rxtx.h"
|
||||
#include "mlx5_autoconf.h"
|
||||
|
@ -34,11 +34,11 @@
|
||||
#include <mlx5_glue.h>
|
||||
#include <mlx5_prm.h>
|
||||
#include <mlx5_common.h>
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
#include "mlx5_defs.h"
|
||||
#include "mlx5_utils.h"
|
||||
#include "mlx5.h"
|
||||
#include "mlx5_mr.h"
|
||||
#include "mlx5_autoconf.h"
|
||||
|
||||
/* Support tunnel matching. */
|
||||
@ -598,8 +598,8 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
|
||||
uint32_t lkey;
|
||||
|
||||
/* Linear search on MR cache array. */
|
||||
lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
|
||||
MLX5_MR_CACHE_N, addr);
|
||||
lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
|
||||
MLX5_MR_CACHE_N, addr);
|
||||
if (likely(lkey != UINT32_MAX))
|
||||
return lkey;
|
||||
/* Take slower bottom-half (Binary Search) on miss. */
|
||||
@ -630,8 +630,8 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
|
||||
if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
|
||||
mlx5_mr_flush_local_cache(mr_ctrl);
|
||||
/* Linear search on MR cache array. */
|
||||
lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
|
||||
MLX5_MR_CACHE_N, addr);
|
||||
lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
|
||||
MLX5_MR_CACHE_N, addr);
|
||||
if (likely(lkey != UINT32_MAX))
|
||||
return lkey;
|
||||
/* Take slower bottom-half on miss. */
|
||||
|
@ -13,6 +13,8 @@
|
||||
|
||||
#include "mlx5_autoconf.h"
|
||||
|
||||
#include "mlx5_mr.h"
|
||||
|
||||
/* HW checksum offload capabilities of vectorized Tx. */
|
||||
#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
|
||||
(DEV_TX_OFFLOAD_IPV4_CKSUM | \
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <rte_alarm.h>
|
||||
|
||||
#include "mlx5.h"
|
||||
#include "mlx5_mr.h"
|
||||
#include "mlx5_rxtx.h"
|
||||
#include "mlx5_utils.h"
|
||||
#include "rte_pmd_mlx5.h"
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <mlx5_glue.h>
|
||||
#include <mlx5_devx_cmds.h>
|
||||
#include <mlx5_common.h>
|
||||
#include <mlx5_common_mr.h>
|
||||
|
||||
#include "mlx5_defs.h"
|
||||
#include "mlx5_utils.h"
|
||||
@ -1289,7 +1290,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
goto error;
|
||||
}
|
||||
/* Save pointer of global generation number to check memory event. */
|
||||
tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
|
||||
tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
|
||||
MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
|
||||
tmpl->txq.offloads = conf->offloads |
|
||||
dev->data->dev_conf.txmode.offloads;
|
||||
|
Loading…
Reference in New Issue
Block a user