nvmf: Pass not num_buffers but length to spdk_nvmf_request_get_buffers()

The subsequent patches unifies getting buffers, filling iovecs, and
filling WRs in a single API. This is a preparation.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I077c4ea8957dcb3c7e4f4181f18b04b343e9927d
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468953
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-24 09:53:14 +09:00 committed by Jim Harris
parent aa6964e585
commit 7c7a0c0a68
7 changed files with 12 additions and 19 deletions

View File

@ -1298,14 +1298,11 @@ static int
nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
{
uint32_t length = fc_req->req.length;
uint32_t num_buffers;
struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
struct spdk_nvmf_transport *transport = group->transport;
num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);
if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, num_buffers)) {
if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
return -ENOMEM;
}

View File

@ -390,7 +390,7 @@ void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
int spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers);
uint32_t length);
bool spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx *dif_ctx);

View File

@ -1674,16 +1674,13 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_request *req = &rdma_req->req;
struct ibv_send_wr *wr = &rdma_req->data.wr;
uint32_t num_buffers;
int rc = 0;
rqpair = SPDK_CONTAINEROF(req->qpair, struct spdk_nvmf_rdma_qpair, qpair);
rgroup = rqpair->poller->group;
num_buffers = SPDK_CEIL_DIV(length, rtransport->transport.opts.io_unit_size);
if (spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
num_buffers)) {
length)) {
return -ENOMEM;
}
@ -1730,7 +1727,6 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
struct spdk_nvmf_request *req = &rdma_req->req;
struct spdk_nvme_sgl_descriptor *inline_segment, *desc;
uint32_t num_sgl_descriptors;
uint32_t num_buffers = 0;
uint32_t i;
int rc;
@ -1763,9 +1759,8 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
goto err_exit;
}
num_buffers = SPDK_CEIL_DIV(desc->keyed.length, rtransport->transport.opts.io_unit_size);
rc = spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
num_buffers);
desc->keyed.length);
if (rc != 0) {
goto err_exit;
}

View File

@ -2198,16 +2198,13 @@ static int
spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvmf_tcp_req *tcp_req, uint32_t length)
{
uint32_t num_buffers;
struct spdk_nvmf_tcp_qpair *tqpair;
struct spdk_nvmf_transport_poll_group *group;
tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
group = &tqpair->group->group;
num_buffers = SPDK_CEIL_DIV(length, ttransport->transport.opts.io_unit_size);
if (spdk_nvmf_request_get_buffers(&tcp_req->req, group, &ttransport->transport, num_buffers)) {
if (spdk_nvmf_request_get_buffers(&tcp_req->req, group, &ttransport->transport, length)) {
tcp_req->req.iovcnt = 0;
return -ENOMEM;
}

View File

@ -393,13 +393,15 @@ int
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
uint32_t length)
{
uint32_t num_buffers;
uint32_t i = 0;
/* If the number of buffers is too large, then we know the I/O is larger than allowed.
* Fail it.
*/
num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);
if (num_buffers + req->num_buffers > NVMF_REQ_MAX_BUFFERS) {
return -EINVAL;
}

View File

@ -107,13 +107,15 @@ int
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport,
uint32_t num_buffers)
uint32_t length)
{
uint32_t num_buffers;
uint32_t i = 0;
/* If the number of buffers is too large, then we know the I/O is larger than allowed.
* Fail it.
*/
num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);
if (num_buffers + req->num_buffers > NVMF_REQ_MAX_BUFFERS) {
return -EINVAL;
}

View File

@ -170,7 +170,7 @@ DEFINE_STUB(spdk_nvmf_transport_req_complete,
DEFINE_STUB(spdk_nvmf_request_get_buffers,
int,
(struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
struct spdk_nvmf_transport *transport, uint32_t num_buffers),
struct spdk_nvmf_transport *transport, uint32_t length),
0);
DEFINE_STUB_V(spdk_nvmf_request_free_buffers,