05912855bc
The callback is not required any more since there is a new callback to populate objects using provided memory area which provides the same information. Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com> Acked-by: Olivier Matz <olivier.matz@6wind.com>
164 lines
4.0 KiB
C
164 lines
4.0 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2016 Intel Corporation.
|
|
* Copyright(c) 2016 6WIND S.A.
|
|
*/
|
|
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
|
|
#include <rte_mempool.h>
|
|
#include <rte_errno.h>
|
|
#include <rte_dev.h>
|
|
|
|
/* indirect jump table to support external memory pools. */
|
|
struct rte_mempool_ops_table rte_mempool_ops_table = {
|
|
.sl = RTE_SPINLOCK_INITIALIZER,
|
|
.num_ops = 0
|
|
};
|
|
|
|
/* add a new ops struct in rte_mempool_ops_table, return its index. */
|
|
int
|
|
rte_mempool_register_ops(const struct rte_mempool_ops *h)
|
|
{
|
|
struct rte_mempool_ops *ops;
|
|
int16_t ops_index;
|
|
|
|
rte_spinlock_lock(&rte_mempool_ops_table.sl);
|
|
|
|
if (rte_mempool_ops_table.num_ops >=
|
|
RTE_MEMPOOL_MAX_OPS_IDX) {
|
|
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
|
|
RTE_LOG(ERR, MEMPOOL,
|
|
"Maximum number of mempool ops structs exceeded\n");
|
|
return -ENOSPC;
|
|
}
|
|
|
|
if (h->alloc == NULL || h->enqueue == NULL ||
|
|
h->dequeue == NULL || h->get_count == NULL) {
|
|
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
|
|
RTE_LOG(ERR, MEMPOOL,
|
|
"Missing callback while registering mempool ops\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (strlen(h->name) >= sizeof(ops->name) - 1) {
|
|
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
|
|
RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
|
|
__func__, h->name);
|
|
rte_errno = EEXIST;
|
|
return -EEXIST;
|
|
}
|
|
|
|
ops_index = rte_mempool_ops_table.num_ops++;
|
|
ops = &rte_mempool_ops_table.ops[ops_index];
|
|
snprintf(ops->name, sizeof(ops->name), "%s", h->name);
|
|
ops->alloc = h->alloc;
|
|
ops->free = h->free;
|
|
ops->enqueue = h->enqueue;
|
|
ops->dequeue = h->dequeue;
|
|
ops->get_count = h->get_count;
|
|
ops->calc_mem_size = h->calc_mem_size;
|
|
ops->populate = h->populate;
|
|
|
|
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
|
|
|
|
return ops_index;
|
|
}
|
|
|
|
/* wrapper to allocate an external mempool's private (pool) data. */
|
|
int
|
|
rte_mempool_ops_alloc(struct rte_mempool *mp)
|
|
{
|
|
struct rte_mempool_ops *ops;
|
|
|
|
ops = rte_mempool_get_ops(mp->ops_index);
|
|
return ops->alloc(mp);
|
|
}
|
|
|
|
/* wrapper to free an external pool ops. */
|
|
void
|
|
rte_mempool_ops_free(struct rte_mempool *mp)
|
|
{
|
|
struct rte_mempool_ops *ops;
|
|
|
|
ops = rte_mempool_get_ops(mp->ops_index);
|
|
if (ops->free == NULL)
|
|
return;
|
|
ops->free(mp);
|
|
}
|
|
|
|
/* wrapper to get available objects in an external mempool. */
|
|
unsigned int
|
|
rte_mempool_ops_get_count(const struct rte_mempool *mp)
|
|
{
|
|
struct rte_mempool_ops *ops;
|
|
|
|
ops = rte_mempool_get_ops(mp->ops_index);
|
|
return ops->get_count(mp);
|
|
}
|
|
|
|
/* wrapper to notify new memory area to external mempool */
|
|
ssize_t
|
|
rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
|
|
uint32_t obj_num, uint32_t pg_shift,
|
|
size_t *min_chunk_size, size_t *align)
|
|
{
|
|
struct rte_mempool_ops *ops;
|
|
|
|
ops = rte_mempool_get_ops(mp->ops_index);
|
|
|
|
if (ops->calc_mem_size == NULL)
|
|
return rte_mempool_op_calc_mem_size_default(mp, obj_num,
|
|
pg_shift, min_chunk_size, align);
|
|
|
|
return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
|
|
}
|
|
|
|
/* wrapper to populate memory pool objects using provided memory chunk */
|
|
int
|
|
rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
|
|
void *vaddr, rte_iova_t iova, size_t len,
|
|
rte_mempool_populate_obj_cb_t *obj_cb,
|
|
void *obj_cb_arg)
|
|
{
|
|
struct rte_mempool_ops *ops;
|
|
|
|
ops = rte_mempool_get_ops(mp->ops_index);
|
|
|
|
if (ops->populate == NULL)
|
|
return rte_mempool_op_populate_default(mp, max_objs, vaddr,
|
|
iova, len, obj_cb,
|
|
obj_cb_arg);
|
|
|
|
return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb,
|
|
obj_cb_arg);
|
|
}
|
|
|
|
/* sets mempool ops previously registered by rte_mempool_register_ops. */
|
|
int
|
|
rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
|
|
void *pool_config)
|
|
{
|
|
struct rte_mempool_ops *ops = NULL;
|
|
unsigned i;
|
|
|
|
/* too late, the mempool is already populated. */
|
|
if (mp->flags & MEMPOOL_F_POOL_CREATED)
|
|
return -EEXIST;
|
|
|
|
for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
|
|
if (!strcmp(name,
|
|
rte_mempool_ops_table.ops[i].name)) {
|
|
ops = &rte_mempool_ops_table.ops[i];
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (ops == NULL)
|
|
return -EINVAL;
|
|
|
|
mp->ops_index = i;
|
|
mp->pool_config = pool_config;
|
|
return 0;
|
|
}
|