nvme: Add API to get SPDK memory domain per nvme controller
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com> Change-Id: I6db64c7075b1337b1489b2716fc686a6bed595e3 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7239 Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Paul Luse <paul.e.luse@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Ziye Yang <ziye.yang@intel.com>
This commit is contained in:
parent
d06b6097e3
commit
a422d8b06f
@ -15,6 +15,10 @@ the memory domains and request hardware perform DMA transfers between them.
|
||||
Added API `spdk_log_to_syslog_level` to return syslog level based on SPDK's
|
||||
log level.
|
||||
|
||||
### nvme
|
||||
|
||||
New API `spdk_nvme_ctrlr_get_memory_domain` has been added, it allows to get SPDK memory domain used by nvme controller.
|
||||
|
||||
## v21.07:
|
||||
|
||||
### accel_fw
|
||||
|
@ -3,6 +3,7 @@
|
||||
*
|
||||
* Copyright (c) Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
|
||||
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -3641,6 +3642,14 @@ int spdk_nvme_cuse_register(struct spdk_nvme_ctrlr *ctrlr);
|
||||
*/
|
||||
int spdk_nvme_cuse_unregister(struct spdk_nvme_ctrlr *ctrlr);
|
||||
|
||||
/**
|
||||
* Get SPDK memory domain used by the given nvme controller.
|
||||
*
|
||||
* \param ctrlr Opaque handle to the NVMe controller.
|
||||
* \return Pointer to memory domain used by this controller or NULL
|
||||
*/
|
||||
struct spdk_memory_domain *spdk_nvme_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr);
|
||||
|
||||
/**
|
||||
* Opaque handle for a transport poll group. Used by the transport function table.
|
||||
*/
|
||||
@ -3745,6 +3754,8 @@ struct spdk_nvme_transport_ops {
|
||||
|
||||
void (*poll_group_free_stats)(struct spdk_nvme_transport_poll_group *tgroup,
|
||||
struct spdk_nvme_transport_poll_group_stat *stats);
|
||||
|
||||
struct spdk_memory_domain *(*ctrlr_get_memory_domain)(const struct spdk_nvme_ctrlr *ctrlr);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -3,6 +3,7 @@
|
||||
*
|
||||
* Copyright (c) Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
|
||||
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -4587,3 +4588,9 @@ spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
|
||||
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
}
|
||||
|
||||
struct spdk_memory_domain *
|
||||
spdk_nvme_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
return nvme_transport_ctrlr_get_memory_domain(ctrlr);
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
*
|
||||
* Copyright (c) Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
|
||||
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -1360,6 +1361,8 @@ int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct spdk_nvme_qpair *qpair);
|
||||
void nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct spdk_nvme_qpair *qpair);
|
||||
struct spdk_memory_domain *nvme_transport_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr
|
||||
*ctrlr);
|
||||
void nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
|
||||
int nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair);
|
||||
int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
|
||||
|
@ -2784,6 +2784,14 @@ nvme_rdma_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
|
||||
free(stats);
|
||||
}
|
||||
|
||||
static struct spdk_memory_domain *
|
||||
nvme_rdma_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(ctrlr->adminq);
|
||||
|
||||
return rqpair->memory_domain->domain;
|
||||
}
|
||||
|
||||
void
|
||||
spdk_nvme_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
|
||||
{
|
||||
@ -2811,6 +2819,8 @@ const struct spdk_nvme_transport_ops rdma_ops = {
|
||||
.ctrlr_connect_qpair = nvme_rdma_ctrlr_connect_qpair,
|
||||
.ctrlr_disconnect_qpair = nvme_rdma_ctrlr_disconnect_qpair,
|
||||
|
||||
.ctrlr_get_memory_domain = nvme_rdma_ctrlr_get_memory_domain,
|
||||
|
||||
.qpair_abort_reqs = nvme_rdma_qpair_abort_reqs,
|
||||
.qpair_reset = nvme_rdma_qpair_reset,
|
||||
.qpair_submit_request = nvme_rdma_qpair_submit_request,
|
||||
|
@ -4,6 +4,7 @@
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
|
||||
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -432,6 +433,19 @@ nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
|
||||
}
|
||||
|
||||
struct spdk_memory_domain *
|
||||
nvme_transport_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
|
||||
|
||||
assert(transport != NULL);
|
||||
if (transport->ops.ctrlr_get_memory_domain) {
|
||||
return transport->ops.ctrlr_get_memory_domain(ctrlr);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
|
||||
{
|
||||
|
@ -103,6 +103,7 @@
|
||||
spdk_nvme_ctrlr_alloc_qid;
|
||||
spdk_nvme_ctrlr_free_qid;
|
||||
spdk_nvme_ctrlr_set_remove_cb;
|
||||
spdk_nvme_ctrlr_get_memory_domain;
|
||||
|
||||
spdk_nvme_poll_group_create;
|
||||
spdk_nvme_poll_group_add;
|
||||
|
@ -3,6 +3,7 @@
|
||||
*
|
||||
* Copyright (c) Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
|
||||
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -81,6 +82,14 @@ DEFINE_STUB(spdk_nvme_ctrlr_cmd_security_send, int, (struct spdk_nvme_ctrlr *ctr
|
||||
uint8_t secp, uint16_t spsp, uint8_t nssf, void *payload,
|
||||
uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
|
||||
|
||||
DEFINE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domain, struct spdk_memory_domain *);
|
||||
struct spdk_memory_domain *
|
||||
nvme_transport_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
HANDLE_RETURN_MOCK(nvme_transport_ctrlr_get_memory_domain);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
|
||||
const struct spdk_nvme_ctrlr_opts *opts,
|
||||
void *devhandle)
|
||||
@ -3067,6 +3076,21 @@ test_nvme_ctrlr_ana_resize(void)
|
||||
nvme_ctrlr_destruct(&ctrlr);
|
||||
}
|
||||
|
||||
static void
|
||||
test_nvme_ctrlr_get_memory_domain(void)
|
||||
{
|
||||
struct spdk_nvme_ctrlr ctrlr = {};
|
||||
struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xbaadbeef;
|
||||
|
||||
MOCK_SET(nvme_transport_ctrlr_get_memory_domain, domain);
|
||||
CU_ASSERT(spdk_nvme_ctrlr_get_memory_domain(&ctrlr) == domain);
|
||||
|
||||
MOCK_SET(nvme_transport_ctrlr_get_memory_domain, NULL);
|
||||
CU_ASSERT(spdk_nvme_ctrlr_get_memory_domain(&ctrlr) == NULL);
|
||||
|
||||
MOCK_CLEAR(nvme_transport_ctrlr_get_memory_domain);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
CU_pSuite suite = NULL;
|
||||
@ -3119,6 +3143,7 @@ int main(int argc, char **argv)
|
||||
CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_log_pages);
|
||||
CU_ADD_TEST(suite, test_nvme_ctrlr_parse_ana_log_page);
|
||||
CU_ADD_TEST(suite, test_nvme_ctrlr_ana_resize);
|
||||
CU_ADD_TEST(suite, test_nvme_ctrlr_get_memory_domain);
|
||||
|
||||
CU_basic_set_mode(CU_BRM_VERBOSE);
|
||||
CU_basic_run_tests();
|
||||
|
@ -1200,6 +1200,21 @@ test_nvme_rdma_memory_domain(void)
|
||||
CU_ASSERT(dma_dev_count_start == dma_dev_count_end);
|
||||
}
|
||||
|
||||
static void
|
||||
test_rdma_ctrlr_get_memory_domain(void)
|
||||
{
|
||||
struct nvme_rdma_ctrlr rctrlr = {};
|
||||
struct nvme_rdma_qpair rqpair = {};
|
||||
struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xbaadbeef;
|
||||
struct nvme_rdma_memory_domain rdma_domain = { .domain = domain };
|
||||
|
||||
rqpair.memory_domain = &rdma_domain;
|
||||
rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
|
||||
rctrlr.ctrlr.adminq = &rqpair.qpair;
|
||||
|
||||
CU_ASSERT(nvme_rdma_ctrlr_get_memory_domain(&rctrlr.ctrlr) == domain);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
CU_pSuite suite = NULL;
|
||||
@ -1229,6 +1244,7 @@ int main(int argc, char **argv)
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_qpair_init);
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request);
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_memory_domain);
|
||||
CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domain);
|
||||
|
||||
CU_basic_set_mode(CU_BRM_VERBOSE);
|
||||
CU_basic_run_tests();
|
||||
|
@ -3,6 +3,7 @@
|
||||
*
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -219,6 +220,38 @@ test_nvme_transport_poll_group_add_remove(void)
|
||||
CU_ASSERT(rc == -ENOENT);
|
||||
}
|
||||
|
||||
static struct spdk_memory_domain *g_ut_ctrlr_memory_domain = (struct spdk_memory_domain *)
|
||||
0xfeedbeef;
|
||||
|
||||
static struct spdk_memory_domain *
|
||||
g_ut_ctrlr_get_memory_domain(const struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
return g_ut_ctrlr_memory_domain;
|
||||
}
|
||||
|
||||
static void
|
||||
test_ctrlr_get_memory_domain(void)
|
||||
{
|
||||
struct spdk_nvme_ctrlr ctrlr = {
|
||||
.trid = {
|
||||
.trstring = "new_transport"
|
||||
}
|
||||
};
|
||||
struct spdk_nvme_transport new_transport = {
|
||||
.ops = { .ctrlr_get_memory_domain = g_ut_ctrlr_get_memory_domain }
|
||||
};
|
||||
|
||||
ut_construct_transport(&new_transport, "new_transport");
|
||||
|
||||
/* transport contains necessary op */
|
||||
CU_ASSERT(nvme_transport_ctrlr_get_memory_domain(&ctrlr) == g_ut_ctrlr_memory_domain);
|
||||
|
||||
/* transport doesn't contain necessary op */
|
||||
new_transport.ops.ctrlr_get_memory_domain = NULL;
|
||||
CU_ASSERT(nvme_transport_ctrlr_get_memory_domain(&ctrlr) == NULL);
|
||||
|
||||
TAILQ_REMOVE(&g_spdk_nvme_transports, &new_transport, link);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
@ -233,6 +266,7 @@ int main(int argc, char **argv)
|
||||
CU_ADD_TEST(suite, test_nvme_transport_poll_group_connect_qpair);
|
||||
CU_ADD_TEST(suite, test_nvme_transport_poll_group_disconnect_qpair);
|
||||
CU_ADD_TEST(suite, test_nvme_transport_poll_group_add_remove);
|
||||
CU_ADD_TEST(suite, test_ctrlr_get_memory_domain);
|
||||
|
||||
CU_basic_set_mode(CU_BRM_VERBOSE);
|
||||
CU_basic_run_tests();
|
||||
|
Loading…
Reference in New Issue
Block a user