rdma: Update for memory map

Add a parameter which determines the owner of the
map - target or initiator. It allows to set different
access flags when creating Memory Regions

Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Change-Id: I0016847fe116e193d0954db1c8e65066b4ff82bf
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10283
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Alexey Marchuk 2021-11-18 22:12:53 +03:00 committed by Tomasz Zawadzki
parent 454561bf89
commit 64fa301f67
6 changed files with 31 additions and 12 deletions

View File

@ -111,6 +111,11 @@ struct spdk_rdma_srq {
bool shared_stats;
};
enum spdk_rdma_memory_map_role {
SPDK_RDMA_MEMORY_MAP_ROLE_TARGET,
SPDK_RDMA_MEMORY_MAP_ROLE_INITIATOR
};
/**
* Create RDMA SRQ
*
@ -232,10 +237,12 @@ int spdk_rdma_qp_flush_recv_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_re
*
* \param pd Protection Domain which will be used to create Memory Regions
* \param hooks Optional hooks which are used to create Protection Domain or ger RKey
* \param role Specifies whether this map is used by RDMA target or initiator, determines access flags of registered MRs
* \return Pointer to memory map or NULL on failure
*/
struct spdk_rdma_mem_map *spdk_rdma_create_mem_map(struct ibv_pd *pd,
struct spdk_nvme_rdma_hooks *hooks);
struct spdk_rdma_mem_map *
spdk_rdma_create_mem_map(struct ibv_pd *pd, struct spdk_nvme_rdma_hooks *hooks,
enum spdk_rdma_memory_map_role role);
/**
* Free previously allocated memory map

View File

@ -1307,7 +1307,8 @@ _nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_q
}
SPDK_DEBUGLOG(nvme, "RDMA responses registered\n");
rqpair->mr_map = spdk_rdma_create_mem_map(rqpair->rdma_qp->qp->pd, &g_nvme_hooks);
rqpair->mr_map = spdk_rdma_create_mem_map(rqpair->rdma_qp->qp->pd, &g_nvme_hooks,
SPDK_RDMA_MEMORY_MAP_ROLE_INITIATOR);
if (!rqpair->mr_map) {
SPDK_ERRLOG("Unable to register RDMA memory translation map\n");
return -1;

View File

@ -2409,7 +2409,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
assert(device->map == NULL);
device->map = spdk_rdma_create_mem_map(device->pd, &g_nvmf_hooks);
device->map = spdk_rdma_create_mem_map(device->pd, &g_nvmf_hooks, SPDK_RDMA_MEMORY_MAP_ROLE_TARGET);
if (!device->map) {
SPDK_ERRLOG("Unable to allocate memory map for listen address\n");
rc = -ENOMEM;

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
SO_VER := 2
SO_VER := 3
SO_MINOR := 0
SPDK_MAP_FILE = $(abspath $(CURDIR)/spdk_rdma.map)

View File

@ -46,6 +46,7 @@ struct spdk_rdma_mem_map {
struct ibv_pd *pd;
struct spdk_nvme_rdma_hooks *hooks;
uint32_t ref_count;
enum spdk_rdma_memory_map_role role;
LIST_ENTRY(spdk_rdma_mem_map) link;
};
@ -60,6 +61,7 @@ rdma_mem_notify(void *cb_ctx, struct spdk_mem_map *map,
struct spdk_rdma_mem_map *rmap = cb_ctx;
struct ibv_pd *pd = rmap->pd;
struct ibv_mr *mr;
uint32_t access_flags = 0;
int rc;
switch (action) {
@ -68,10 +70,17 @@ rdma_mem_notify(void *cb_ctx, struct spdk_mem_map *map,
rc = spdk_mem_map_set_translation(map, (uint64_t)vaddr, size, rmap->hooks->get_rkey(pd, vaddr,
size));
} else {
mr = ibv_reg_mr(pd, vaddr, size,
IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_READ |
IBV_ACCESS_REMOTE_WRITE);
switch (rmap->role) {
case SPDK_RDMA_MEMORY_MAP_ROLE_TARGET:
access_flags = IBV_ACCESS_LOCAL_WRITE;
break;
case SPDK_RDMA_MEMORY_MAP_ROLE_INITIATOR:
access_flags = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE;
break;
default:
SPDK_UNREACHABLE();
}
mr = ibv_reg_mr(pd, vaddr, size, access_flags);
if (mr == NULL) {
SPDK_ERRLOG("ibv_reg_mr() failed\n");
return -1;
@ -121,14 +130,15 @@ _rdma_free_mem_map(struct spdk_rdma_mem_map *map)
}
struct spdk_rdma_mem_map *
spdk_rdma_create_mem_map(struct ibv_pd *pd, struct spdk_nvme_rdma_hooks *hooks)
spdk_rdma_create_mem_map(struct ibv_pd *pd, struct spdk_nvme_rdma_hooks *hooks,
enum spdk_rdma_memory_map_role role)
{
struct spdk_rdma_mem_map *map;
pthread_mutex_lock(&g_rdma_mr_maps_mutex);
/* Look up existing mem map registration for this pd */
LIST_FOREACH(map, &g_rdma_mr_maps, link) {
if (map->pd == pd) {
if (map->pd == pd && map->role == role) {
map->ref_count++;
pthread_mutex_unlock(&g_rdma_mr_maps_mutex);
return map;
@ -148,6 +158,7 @@ spdk_rdma_create_mem_map(struct ibv_pd *pd, struct spdk_nvme_rdma_hooks *hooks)
map->pd = pd;
map->ref_count = 1;
map->hooks = hooks;
map->role = role;
map->map = spdk_mem_map_alloc(0, &g_rdma_map_ops, map);
if (!map->map) {
SPDK_ERRLOG("Unable to create memory map\n");

View File

@ -63,7 +63,7 @@ DEFINE_STUB(spdk_rdma_qp_queue_recv_wrs, bool, (struct spdk_rdma_qp *spdk_rdma_q
DEFINE_STUB(spdk_rdma_qp_flush_recv_wrs, int, (struct spdk_rdma_qp *spdk_rdma_qp,
struct ibv_recv_wr **bad_wr), 0);
DEFINE_STUB(spdk_rdma_create_mem_map, struct spdk_rdma_mem_map *, (struct ibv_pd *pd,
struct spdk_nvme_rdma_hooks *hooks), NULL);
struct spdk_nvme_rdma_hooks *hooks, enum spdk_rdma_memory_map_role role), NULL)
DEFINE_STUB_V(spdk_rdma_free_mem_map, (struct spdk_rdma_mem_map **map));
/* used to mock out having to split an SGL over a memory region */