mempool: add op to populate objects using provided memory

The callback allows to customize how objects are stored in the
memory chunk. Default implementation of the callback which simply
puts objects one by one is available.

Suggested-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
Andrew Rybchenko 2018-04-16 14:24:34 +01:00 committed by Thomas Monjalon
parent 0a48646893
commit e1174f2d53
7 changed files with 149 additions and 14 deletions

View File

@ -60,7 +60,7 @@ Deprecation Notices
- removal of ``get_capabilities`` mempool ops and related flags.
- substitute ``register_memory_area`` with ``populate`` ops.
- addition of new ops to customize objects population and allocate contiguous
- addition of new op to allocate contiguous
block of objects if underlying driver supports it.
* mbuf: The opaque ``mbuf->hash.sched`` field will be updated to support generic

View File

@ -230,6 +230,8 @@ ABI Changes
A new callback ``calc_mem_size`` has been added to ``rte_mempool_ops``
to allow to customize required memory size calculation.
A new callback ``populate`` has been added to ``rte_mempool_ops``
to allow to customize objects population.
* **Additional fields in rte_eth_dev_info.**

View File

@ -122,7 +122,8 @@ get_min_page_size(void)
static void
mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova)
mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
void *obj, rte_iova_t iova)
{
struct rte_mempool_objhdr *hdr;
struct rte_mempool_objtlr *tlr __rte_unused;
@ -139,9 +140,6 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova)
tlr = __mempool_get_trailer(obj);
tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
#endif
/* enqueue in ring */
rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
}
/* call obj_cb() for each mempool element */
@ -420,17 +418,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
while (off + total_elt_sz <= len && mp->populated_size < mp->size) {
off += mp->header_size;
if (iova == RTE_BAD_IOVA)
mempool_add_elem(mp, (char *)vaddr + off,
RTE_BAD_IOVA);
else
mempool_add_elem(mp, (char *)vaddr + off, iova + off);
off += mp->elt_size + mp->trailer_size;
i++;
if (off > len) {
ret = -EINVAL;
goto fail;
}
i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
(char *)vaddr + off,
(iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
len - off, mempool_add_elem, NULL);
/* not enough room to store one object */
if (i == 0) {
ret = -EINVAL;

View File

@ -456,6 +456,63 @@ ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align);
/**
* Function to be called for each populated object.
*
* @param[in] mp
* A pointer to the mempool structure.
* @param[in] opaque
* An opaque pointer passed to iterator.
* @param[in] vaddr
* Object virtual address.
* @param[in] iova
* Input/output virtual address of the object or RTE_BAD_IOVA.
*/
typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
void *opaque, void *vaddr, rte_iova_t iova);
/**
* Populate memory pool objects using provided memory chunk.
*
* Populated objects should be enqueued to the pool, e.g. using
* rte_mempool_ops_enqueue_bulk().
*
* If the given IO address is unknown (iova = RTE_BAD_IOVA),
* the chunk doesn't need to be physically contiguous (only virtually),
* and allocated objects may span two pages.
*
* @param[in] mp
* A pointer to the mempool structure.
* @param[in] max_objs
* Maximum number of objects to be populated.
* @param[in] vaddr
* The virtual address of memory that should be used to store objects.
* @param[in] iova
* The IO address
* @param[in] len
* The length of memory in bytes.
* @param[in] obj_cb
* Callback function to be executed for each populated object.
* @param[in] obj_cb_arg
* An opaque pointer passed to the callback function.
* @return
* The number of objects added on success.
* On error, no objects are populated and a negative errno is returned.
*/
typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
unsigned int max_objs,
void *vaddr, rte_iova_t iova, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
/**
* Default way to populate memory pool object using provided memory
* chunk: just slice objects one by one.
*/
int rte_mempool_op_populate_default(struct rte_mempool *mp,
unsigned int max_objs,
void *vaddr, rte_iova_t iova, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@ -477,6 +534,11 @@ struct rte_mempool_ops {
* store specified number of objects.
*/
rte_mempool_calc_mem_size_t calc_mem_size;
/**
* Optional callback to populate mempool objects using
* provided memory chunk.
*/
rte_mempool_populate_t populate;
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@ -648,6 +710,34 @@ ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align);
/**
* @internal wrapper for mempool_ops populate callback.
*
* Populate memory pool objects using provided memory chunk.
*
* @param[in] mp
* A pointer to the mempool structure.
* @param[in] max_objs
* Maximum number of objects to be populated.
* @param[in] vaddr
* The virtual address of memory that should be used to store objects.
* @param[in] iova
* The IO address
* @param[in] len
* The length of memory in bytes.
* @param[in] obj_cb
* Callback function to be executed for each populated object.
* @param[in] obj_cb_arg
* An opaque pointer passed to the callback function.
* @return
* The number of objects added on success.
* On error, no objects are populated and a negative errno is returned.
*/
int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
void *vaddr, rte_iova_t iova, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb,
void *obj_cb_arg);
/**
* @internal wrapper for mempool_ops free callback.
*

View File

@ -60,6 +60,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
ops->get_capabilities = h->get_capabilities;
ops->register_memory_area = h->register_memory_area;
ops->calc_mem_size = h->calc_mem_size;
ops->populate = h->populate;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
@ -141,6 +142,26 @@ rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
}
/* wrapper to populate memory pool objects using provided memory chunk */
int
rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
void *vaddr, rte_iova_t iova, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb,
void *obj_cb_arg)
{
struct rte_mempool_ops *ops;
ops = rte_mempool_get_ops(mp->ops_index);
if (ops->populate == NULL)
return rte_mempool_op_populate_default(mp, max_objs, vaddr,
iova, len, obj_cb,
obj_cb_arg);
return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb,
obj_cb_arg);
}
/* sets mempool ops previously registered by rte_mempool_register_ops. */
int
rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,

View File

@ -36,3 +36,27 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
return mem_size;
}
int
rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
void *vaddr, rte_iova_t iova, size_t len,
rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
size_t total_elt_sz;
size_t off;
unsigned int i;
void *obj;
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
off += mp->header_size;
obj = (char *)vaddr + off;
obj_cb(mp, obj_cb_arg, obj,
(iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
off += mp->elt_size + mp->trailer_size;
}
return i;
}

View File

@ -56,5 +56,6 @@ DPDK_18.05 {
global:
rte_mempool_op_calc_mem_size_default;
rte_mempool_op_populate_default;
} DPDK_17.11;