rdma: Use optimal ceiling integer division

This form of the celinig division allows to remove an extra condition

Change-Id: I8a2de792172ec9115563e7fb914745c476f16e8d
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com>
Signed-off-by: Evgenii Kochetov <evgeniik@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/462198
Reviewed-by: Seth Howell <seth.howell@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Alexey Marchuk 2019-07-15 12:52:00 +00:00 committed by Darek Stojaczyk
parent de1d0f8fe9
commit 0754417fa9
2 changed files with 7 additions and 10 deletions

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -54,6 +54,9 @@ extern "C" {
#define SPDK_SEC_TO_USEC 1000000ULL
#define SPDK_SEC_TO_NSEC 1000000000ULL
/* Ceiling division of unsigned integers */
#define SPDK_CEIL_DIV(x,y) (((x)+(y)-1)/(y))
static inline uint32_t
spdk_u32log2(uint32_t x)
{

View File

@ -1574,10 +1574,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
rgroup = rqpair->poller->group;
rdma_req->req.iovcnt = 0;
num_buffers = rdma_req->req.length / rtransport->transport.opts.io_unit_size;
if (rdma_req->req.length % rtransport->transport.opts.io_unit_size) {
num_buffers++;
}
num_buffers = SPDK_CEIL_DIV(rdma_req->req.length, rtransport->transport.opts.io_unit_size);
if (nvmf_rdma_request_get_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers)) {
return -ENOMEM;
@ -1636,10 +1633,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
for (i = 0; i < num_sgl_descriptors; i++) {
num_buffers += desc->keyed.length / rtransport->transport.opts.io_unit_size;
if (desc->keyed.length % rtransport->transport.opts.io_unit_size) {
num_buffers++;
}
num_buffers += SPDK_CEIL_DIV(desc->keyed.length, rtransport->transport.opts.io_unit_size);
desc++;
}
/* If the number of buffers is too large, then we know the I/O is larger than allowed. Fail it. */