nvme: return number of completions processed
nvme_ctrlr_process_io_completions() and nvme_ctrlr_process_admin_completions() now return the number of completions processed. This also adds the possibility of returning an error from the process_*_completions functions (currently unused, but this at least gets the API ready in case error conditions are added later). Change-Id: I1b32ee4f2f3c1c474d646fa2d6b8b7bbb769785f Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
e73007a63a
commit
2ced60e9bf
@ -160,10 +160,12 @@ int nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
|
||||
* \param max_completions Limit the number of completions to be processed in one call, or 0
|
||||
* for unlimited.
|
||||
*
|
||||
* \return Number of completions processed (may be 0) or negative on error.
|
||||
*
|
||||
* This function is thread safe and can be called at any point after nvme_attach().
|
||||
*
|
||||
*/
|
||||
void nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions);
|
||||
int32_t nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions);
|
||||
|
||||
/**
|
||||
* \brief Send the given admin command to the NVMe controller.
|
||||
@ -195,9 +197,11 @@ int nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
|
||||
* at the time of this function call. It does not wait for outstanding commands to
|
||||
* finish.
|
||||
*
|
||||
* \return Number of completions processed (may be 0) or negative on error.
|
||||
*
|
||||
* This function is thread safe and can be called at any point after nvme_attach().
|
||||
*/
|
||||
void nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr);
|
||||
int32_t nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr);
|
||||
|
||||
|
||||
/** \brief Opaque handle to a namespace. Obtained by calling nvme_ctrlr_get_ns(). */
|
||||
|
@ -732,19 +732,23 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
|
||||
nvme_qpair_submit_request(qpair, req);
|
||||
}
|
||||
|
||||
void
|
||||
int32_t
|
||||
nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions)
|
||||
{
|
||||
nvme_assert(nvme_thread_ioq_index >= 0, ("no ioq_index assigned for thread\n"));
|
||||
nvme_qpair_process_completions(&ctrlr->ioq[nvme_thread_ioq_index], max_completions);
|
||||
return nvme_qpair_process_completions(&ctrlr->ioq[nvme_thread_ioq_index], max_completions);
|
||||
}
|
||||
|
||||
void
|
||||
int32_t
|
||||
nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr)
|
||||
{
|
||||
int32_t num_completions;
|
||||
|
||||
nvme_mutex_lock(&ctrlr->ctrlr_lock);
|
||||
nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
num_completions = nvme_qpair_process_completions(&ctrlr->adminq, 0);
|
||||
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
|
||||
return num_completions;
|
||||
}
|
||||
|
||||
const struct nvme_controller_data *
|
||||
|
@ -402,7 +402,7 @@ int nvme_qpair_construct(struct nvme_qpair *qpair, uint16_t id,
|
||||
void nvme_qpair_destroy(struct nvme_qpair *qpair);
|
||||
void nvme_qpair_enable(struct nvme_qpair *qpair);
|
||||
void nvme_qpair_disable(struct nvme_qpair *qpair);
|
||||
void nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions);
|
||||
int32_t nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions);
|
||||
void nvme_qpair_submit_request(struct nvme_qpair *qpair,
|
||||
struct nvme_request *req);
|
||||
void nvme_qpair_reset(struct nvme_qpair *qpair);
|
||||
|
@ -461,11 +461,12 @@ nvme_qpair_check_enabled(struct nvme_qpair *qpair)
|
||||
*
|
||||
* \sa nvme_cb_fn_t
|
||||
*/
|
||||
void
|
||||
int32_t
|
||||
nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions)
|
||||
{
|
||||
struct nvme_tracker *tr;
|
||||
struct nvme_completion *cpl;
|
||||
uint32_t num_completions = 0;
|
||||
|
||||
if (!nvme_qpair_check_enabled(qpair)) {
|
||||
/*
|
||||
@ -474,7 +475,16 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completion
|
||||
* associated with this interrupt will get retried when the
|
||||
* reset is complete.
|
||||
*/
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (max_completions == 0) {
|
||||
/*
|
||||
* max_completions == 0 means unlimited; set it to the max uint32_t value
|
||||
* to avoid a special case in the loop. The maximum possible queue size is
|
||||
* only 64K, so num_completions will never reach this value.
|
||||
*/
|
||||
max_completions = UINT32_MAX;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
@ -501,10 +511,12 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completion
|
||||
|
||||
spdk_mmio_write_4(qpair->cq_hdbl, qpair->cq_head);
|
||||
|
||||
if (max_completions > 0 && --max_completions == 0) {
|
||||
if (++num_completions == max_completions) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return num_completions;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -62,9 +62,10 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
|
||||
CU_ASSERT(req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST);
|
||||
}
|
||||
|
||||
void
|
||||
int32_t
|
||||
nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
x
Reference in New Issue
Block a user