nvme: alloc buffer internally for non-I/O requests

Rather than forcing the NVMe library user to pass a specially-allocated
block of memory (e.g. rte_malloc() in the case of the default
nvme_impl.h), just make the NVMe library allocate a suitable buffer
itself and copy to/from the user buffer as needed.

The fast path I/O functions still require special rte_malloc()
allocations, since we don't want to add an allocation and copy to the
I/O critical path.

Change-Id: I7fe88c0ba60c859a33bbe95b7713f423c6bf1ea8
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-09-12 14:30:59 -07:00
parent df70bc1559
commit 5e9d859327
6 changed files with 102 additions and 24 deletions

View File

@ -147,6 +147,67 @@ nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
static void
nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
{
struct nvme_request *req = arg;
enum spdk_nvme_data_transfer xfer;
if (req->user_buffer && req->payload_size) {
/* Copy back to the user buffer and free the contig buffer */
assert(req->payload.type == NVME_PAYLOAD_TYPE_CONTIG);
xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
if (xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST ||
xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
memcpy(req->user_buffer, req->payload.u.contig, req->payload_size);
}
nvme_free(req->payload.u.contig);
}
/* Call the user's original callback now that the buffer has been copied */
req->user_cb_fn(req->user_cb_arg, cpl);
}
/**
* Allocate a request as well as a physically contiguous buffer to copy to/from the user's buffer.
*
* This is intended for use in non-fast-path functions (admin commands, reservations, etc.)
* where the overhead of a copy is not a problem.
*/
struct nvme_request *
nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, bool host_to_controller)
{
struct nvme_request *req;
void *contig_buffer = NULL;
uint64_t phys_addr;
if (buffer && payload_size) {
contig_buffer = nvme_malloc("nvme_user_copy", payload_size, 4096, &phys_addr);
if (!contig_buffer) {
return NULL;
}
if (host_to_controller) {
memcpy(contig_buffer, buffer, payload_size);
}
}
req = nvme_allocate_request_contig(contig_buffer, payload_size, nvme_user_copy_cmd_complete, NULL);
if (!req) {
nvme_free(buffer);
return NULL;
}
req->user_cb_fn = cb_fn;
req->user_cb_arg = cb_arg;
req->user_buffer = buffer;
req->cb_arg = req;
return req;
}
void
nvme_free_request(struct nvme_request *req)
{

View File

@ -84,9 +84,8 @@ nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr, void *payload,
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
req = nvme_allocate_request_contig(payload,
sizeof(struct spdk_nvme_ctrlr_data),
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_data),
cb_fn, cb_arg, false);
if (req == NULL) {
return -ENOMEM;
}
@ -110,9 +109,8 @@ nvme_ctrlr_cmd_identify_namespace(struct spdk_nvme_ctrlr *ctrlr, uint16_t nsid,
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
req = nvme_allocate_request_contig(payload,
sizeof(struct spdk_nvme_ns_data),
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ns_data),
cb_fn, cb_arg, false);
if (req == NULL) {
return -ENOMEM;
}
@ -234,8 +232,8 @@ nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
int rc;
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list),
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_list),
cb_fn, cb_arg, true);
if (req == NULL) {
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
@ -261,8 +259,8 @@ nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
int rc;
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ctrlr_list),
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ctrlr_list),
cb_fn, cb_arg, true);
if (req == NULL) {
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
@ -288,8 +286,8 @@ nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data
int rc;
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_ns_data),
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_ns_data),
cb_fn, cb_arg, true);
if (req == NULL) {
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
@ -445,7 +443,7 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
int rc;
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, payload_size, cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, payload_size, cb_fn, cb_arg, false);
if (req == NULL) {
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;
@ -519,8 +517,7 @@ nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
int rc;
pthread_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_contig(payload, size,
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, size, cb_fn, cb_arg, true);
if (req == NULL) {
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;

View File

@ -230,6 +230,14 @@ struct nvme_request {
* status once all child requests are completed.
*/
struct spdk_nvme_cpl parent_status;
/**
* The user_cb_fn and user_cb_arg fields are used for holding the original
* callback data when using nvme_allocate_request_user_copy.
*/
spdk_nvme_cmd_cb user_cb_fn;
void *user_cb_arg;
void *user_buffer;
};
struct nvme_completion_poll_status {
@ -574,6 +582,8 @@ struct nvme_request *nvme_allocate_request(const struct nvme_payload *payload,
struct nvme_request *nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_contig(void *buffer, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
void nvme_free_request(struct nvme_request *req);
void nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child);
bool nvme_intel_has_quirk(struct pci_id *id, uint64_t quirk);

View File

@ -455,9 +455,8 @@ spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
req = nvme_allocate_request_contig(payload,
sizeof(struct spdk_nvme_reservation_register_data),
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_reservation_register_data),
cb_fn, cb_arg, true);
if (req == NULL) {
return -ENOMEM;
}
@ -488,8 +487,8 @@ spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
cb_arg, true);
if (req == NULL) {
return -ENOMEM;
}
@ -520,9 +519,8 @@ spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
req = nvme_allocate_request_contig(payload,
sizeof(struct spdk_nvme_reservation_acquire_data),
cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, sizeof(struct spdk_nvme_reservation_acquire_data),
cb_fn, cb_arg, true);
if (req == NULL) {
return -ENOMEM;
}
@ -555,7 +553,7 @@ spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
return -EINVAL;
num_dwords = len / 4;
req = nvme_allocate_request_contig(payload, len, cb_fn, cb_arg);
req = nvme_allocate_request_user_copy(payload, len, cb_fn, cb_arg, false);
if (req == NULL) {
return -ENOMEM;
}

View File

@ -277,6 +277,14 @@ nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
struct nvme_request *
nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, bool host_to_controller)
{
/* For the unit test, we don't actually need to copy the buffer */
return nvme_allocate_request_contig(buffer, payload_size, cb_fn, cb_arg);
}
int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{

View File

@ -607,6 +607,7 @@ test_nvme_ns_cmd_reservation_register(void)
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
nvme_free(g_request->payload.u.contig);
nvme_free_request(g_request);
free(payload);
}
@ -642,6 +643,7 @@ test_nvme_ns_cmd_reservation_release(void)
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
nvme_free(g_request->payload.u.contig);
nvme_free_request(g_request);
free(payload);
}
@ -677,6 +679,7 @@ test_nvme_ns_cmd_reservation_acquire(void)
CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
nvme_free(g_request->payload.u.contig);
nvme_free_request(g_request);
free(payload);
}
@ -704,6 +707,7 @@ test_nvme_ns_cmd_reservation_report(void)
CU_ASSERT(g_request->cmd.cdw10 == (0x1000 / 4));
nvme_free(g_request->payload.u.contig);
nvme_free_request(g_request);
free(payload);
}