numam-dpdk/lib/librte_mempool/rte_mempool_ops_default.c
Andrew Rybchenko 91ad034919 mempool: fold memory size calculation helper
rte_mempool_calc_mem_size_helper() was introduced to avoid
code duplication and used in deprecated rte_mempool_mem_size() and
rte_mempool_op_calc_mem_size_default(). Now the first one is removed
and it is better to fold the helper into the second one to make it
more readable.

Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-08-05 23:41:57 +02:00

71 lines
1.9 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2016 Intel Corporation.
* Copyright(c) 2016 6WIND S.A.
* Copyright(c) 2018 Solarflare Communications Inc.
*/
#include <rte_mempool.h>
ssize_t
rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align)
{
size_t total_elt_sz;
size_t obj_per_page, pg_num, pg_sz;
size_t mem_size;
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
if (total_elt_sz == 0) {
mem_size = 0;
} else if (pg_shift == 0) {
mem_size = total_elt_sz * obj_num;
} else {
pg_sz = (size_t)1 << pg_shift;
obj_per_page = pg_sz / total_elt_sz;
if (obj_per_page == 0) {
/*
* Note that if object size is bigger than page size,
* then it is assumed that pages are grouped in subsets
* of physically continuous pages big enough to store
* at least one object.
*/
mem_size =
RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;
} else {
pg_num = (obj_num + obj_per_page - 1) / obj_per_page;
mem_size = pg_num << pg_shift;
}
}
*min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
*align = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, (size_t)1 << pg_shift);
return mem_size;
}
int
rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
void *vaddr, rte_iova_t iova, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
size_t total_elt_sz;
size_t off;
unsigned int i;
void *obj;
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
off += mp->header_size;
obj = (char *)vaddr + off;
obj_cb(mp, obj_cb_arg, obj,
(iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
off += mp->elt_size + mp->trailer_size;
}
return i;
}