nvme: move nvme_request_add_child to nvme_ns_cmd.c

This is the only place nvme_request_add_child() is used, so move it
nearby and make it static to allow the compiler to inline it.

Change-Id: If4a7e17fde0b0272e1d4432c1dcedbec27c25371
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2015-09-28 10:53:57 -07:00
parent 21b37d4ee6
commit d2e10e88ec
3 changed files with 44 additions and 45 deletions

View File

@ -138,6 +138,8 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
* TAILQ_ENTRY. children, and following members, are
* only used as part of I/O splitting so we avoid
* memsetting them until it is actually needed.
* They will be initialized in nvme_request_add_child()
* if the request is split.
*/
memset(req, 0, offsetof(struct nvme_request, children));
req->cb_fn = cb_fn;
@ -157,48 +159,6 @@ nvme_allocate_request(void *payload, uint32_t payload_size,
return req;
}
void
nvme_cb_complete_child(void *child_arg, const struct nvme_completion *cpl)
{
struct nvme_request *child = child_arg;
struct nvme_request *parent = child->parent;
parent->num_children--;
TAILQ_REMOVE(&parent->children, child, child_tailq);
if (nvme_completion_is_error(cpl)) {
memcpy(&parent->parent_status, cpl, sizeof(*cpl));
}
if (parent->num_children == 0) {
if (parent->cb_fn) {
parent->cb_fn(parent->cb_arg, &parent->parent_status);
}
nvme_free_request(parent);
}
}
void
nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
{
if (parent->num_children == 0) {
/*
* Defer initialization of the children TAILQ since it falls
* on a separate cacheline. This ensures we do not touch this
* cacheline except on request splitting cases, which are
* relatively rare.
*/
TAILQ_INIT(&parent->children);
memset(&parent->parent_status, 0, sizeof(struct nvme_completion));
}
parent->num_children++;
TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
child->parent = parent;
child->cb_fn = nvme_cb_complete_child;
child->cb_arg = child;
}
static int
nvme_allocate_ioq_index(void)
{

View File

@ -433,7 +433,4 @@ struct nvme_request *
nvme_allocate_request(void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_cb_complete_child(void *child, const struct nvme_completion *cpl);
void nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child);
#endif /* __NVME_INTERNAL_H__ */

View File

@ -43,6 +43,48 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t opc);
static void
nvme_cb_complete_child(void *child_arg, const struct nvme_completion *cpl)
{
struct nvme_request *child = child_arg;
struct nvme_request *parent = child->parent;
parent->num_children--;
TAILQ_REMOVE(&parent->children, child, child_tailq);
if (nvme_completion_is_error(cpl)) {
memcpy(&parent->parent_status, cpl, sizeof(*cpl));
}
if (parent->num_children == 0) {
if (parent->cb_fn) {
parent->cb_fn(parent->cb_arg, &parent->parent_status);
}
nvme_free_request(parent);
}
}
static void
nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
{
if (parent->num_children == 0) {
/*
* Defer initialization of the children TAILQ since it falls
* on a separate cacheline. This ensures we do not touch this
* cacheline except on request splitting cases, which are
* relatively rare.
*/
TAILQ_INIT(&parent->children);
memset(&parent->parent_status, 0, sizeof(struct nvme_completion));
}
parent->num_children++;
TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
child->parent = parent;
child->cb_fn = nvme_cb_complete_child;
child->cb_arg = child;
}
static struct nvme_request *
_nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count,