util: added spdk_divide_round_up()

Replaced divide_round_up() from blobstore.c, lvol.c and reduce.c with
new spdk_divide_round_up() from util.h.

Change-Id: I013383ac286ca52b5c15c7fab4fb40ad97b92656
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/437649
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Konrad Sztyber 2018-12-18 10:46:35 +01:00 committed by Jim Harris
parent 8114cd367a
commit 62db4ac2cf
4 changed files with 23 additions and 31 deletions

View File

@ -83,6 +83,12 @@ spdk_u32_is_pow2(uint32_t x)
return (x & (x - 1)) == 0;
}
static inline uint64_t
spdk_divide_round_up(uint64_t num, uint64_t divisor)
{
return (num + divisor - 1) / divisor;
}
#ifdef __cplusplus
}
#endif

View File

@ -40,6 +40,7 @@
#include "spdk/thread.h"
#include "spdk/bit_array.h"
#include "spdk/likely.h"
#include "spdk/util.h"
#include "spdk_internal/assert.h"
#include "spdk_internal/log.h"
@ -68,12 +69,6 @@ _spdk_blob_verify_md_op(struct spdk_blob *blob)
assert(blob->state != SPDK_BLOB_STATE_LOADING);
}
static inline size_t
divide_round_up(size_t num, size_t divisor)
{
return (num + divisor - 1) / divisor;
}
static void
_spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
{
@ -2978,7 +2973,7 @@ _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
} else {
/* Claim all of the clusters used by the metadata */
num_md_clusters = divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
for (i = 0; i < num_md_clusters; i++) {
_spdk_bs_claim_cluster(ctx->bs, i);
}
@ -3111,7 +3106,7 @@ _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
}
ctx->bs->md_start = ctx->super->md_start;
ctx->bs->md_len = ctx->super->md_len;
ctx->bs->total_data_clusters = ctx->bs->total_clusters - divide_round_up(
ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
ctx->bs->super_blob = ctx->super->super_blob;
memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
@ -3607,8 +3602,8 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
* up to the nearest page, plus a header.
*/
ctx->super->used_page_mask_start = num_md_pages;
ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
divide_round_up(bs->md_len, 8),
ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
spdk_divide_round_up(bs->md_len, 8),
SPDK_BS_PAGE_SIZE);
num_md_pages += ctx->super->used_page_mask_len;
@ -3616,8 +3611,8 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
* up to the nearest page, plus a header.
*/
ctx->super->used_cluster_mask_start = num_md_pages;
ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
divide_round_up(bs->total_clusters, 8),
ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
spdk_divide_round_up(bs->total_clusters, 8),
SPDK_BS_PAGE_SIZE);
num_md_pages += ctx->super->used_cluster_mask_len;
@ -3625,8 +3620,8 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
* up to the nearest page, plus a header.
*/
ctx->super->used_blobid_mask_start = num_md_pages;
ctx->super->used_blobid_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
divide_round_up(bs->md_len, 8),
ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
spdk_divide_round_up(bs->md_len, 8),
SPDK_BS_PAGE_SIZE);
num_md_pages += ctx->super->used_blobid_mask_len;
@ -3641,7 +3636,7 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
num_md_clusters = divide_round_up(num_md_pages, bs->pages_per_cluster);
num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
if (num_md_clusters > bs->total_clusters) {
SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
"please decrease number of pages reserved for metadata "

View File

@ -48,12 +48,6 @@ SPDK_LOG_REGISTER_COMPONENT("lvol", SPDK_LOG_LVOL)
static TAILQ_HEAD(, spdk_lvol_store) g_lvol_stores = TAILQ_HEAD_INITIALIZER(g_lvol_stores);
static pthread_mutex_t g_lvol_stores_mutex = PTHREAD_MUTEX_INITIALIZER;
static inline uint64_t
divide_round_up(uint64_t num, uint64_t divisor)
{
return (num + divisor - 1) / divisor;
}
static int
_spdk_add_lvs_to_list(struct spdk_lvol_store *lvs)
{
@ -1075,7 +1069,7 @@ spdk_lvol_create(struct spdk_lvol_store *lvs, const char *name, uint64_t sz,
return -ENOMEM;
}
lvol->lvol_store = lvs;
num_clusters = divide_round_up(sz, spdk_bs_get_cluster_size(bs));
num_clusters = spdk_divide_round_up(sz, spdk_bs_get_cluster_size(bs));
lvol->thin_provision = thin_provision;
snprintf(lvol->name, sizeof(lvol->name), "%s", name);
TAILQ_INSERT_TAIL(&lvol->lvol_store->pending_lvols, lvol, link);
@ -1256,7 +1250,7 @@ spdk_lvol_resize(struct spdk_lvol *lvol, uint64_t sz,
struct spdk_blob *blob = lvol->blob;
struct spdk_lvol_store *lvs = lvol->lvol_store;
struct spdk_lvol_req *req;
uint64_t new_clusters = divide_round_up(sz, spdk_bs_get_cluster_size(lvs->blobstore));
uint64_t new_clusters = spdk_divide_round_up(sz, spdk_bs_get_cluster_size(lvs->blobstore));
req = calloc(1, sizeof(*req));
if (!req) {

View File

@ -37,6 +37,7 @@
#include "spdk/env.h"
#include "spdk/string.h"
#include "spdk/bit_array.h"
#include "spdk/util.h"
#include "spdk_internal/log.h"
#include "libpmem.h"
@ -138,12 +139,6 @@ _reduce_persist(struct spdk_reduce_vol *vol, const void *addr, size_t len)
}
}
static inline uint64_t
divide_round_up(uint64_t num, uint64_t divisor)
{
return (num + divisor - 1) / divisor;
}
static uint64_t
_get_pm_logical_map_size(uint64_t vol_size, uint64_t chunk_size)
{
@ -153,7 +148,8 @@ _get_pm_logical_map_size(uint64_t vol_size, uint64_t chunk_size)
logical_map_size = chunks_in_logical_map * sizeof(uint64_t);
/* Round up to next cacheline. */
return divide_round_up(logical_map_size, REDUCE_PM_SIZE_ALIGNMENT) * REDUCE_PM_SIZE_ALIGNMENT;
return spdk_divide_round_up(logical_map_size, REDUCE_PM_SIZE_ALIGNMENT) *
REDUCE_PM_SIZE_ALIGNMENT;
}
static uint64_t
@ -176,7 +172,8 @@ _get_pm_total_chunks_size(uint64_t vol_size, uint64_t chunk_size, uint64_t backi
io_units_per_chunk = chunk_size / backing_io_unit_size;
total_chunks_size = num_chunks * io_units_per_chunk * sizeof(uint64_t);
return divide_round_up(total_chunks_size, REDUCE_PM_SIZE_ALIGNMENT) * REDUCE_PM_SIZE_ALIGNMENT;
return spdk_divide_round_up(total_chunks_size, REDUCE_PM_SIZE_ALIGNMENT) *
REDUCE_PM_SIZE_ALIGNMENT;
}
static uint64_t *