mempool: return 0 if area is too small on populate
Change rte_mempool_populate_iova() and rte_mempool_populate_virt() to return 0 instead of -EINVAL when there is not enough room to store one object, as it can be helpful for applications to distinguish this specific case. As this is an ABI change, use symbol versioning to preserve old behavior for binary applications. Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Signed-off-by: Thomas Monjalon <thomas@monjalon.net> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Lukasz Wojciechowski <l.wojciechow@partner.samsung.com>
This commit is contained in:
parent
105f3039c7
commit
c0280d5d8a
@ -65,11 +65,6 @@ Deprecation Notices
|
||||
structure would be made internal (or removed if all dependencies are cleared)
|
||||
in future releases.
|
||||
|
||||
* mempool: starting from v20.05, the API of rte_mempool_populate_iova()
|
||||
and rte_mempool_populate_virt() will change to return 0 instead
|
||||
of -EINVAL when there is not enough room to store one object. The ABI
|
||||
will be preserved until 20.11.
|
||||
|
||||
* ethdev: the legacy filter API, including
|
||||
``rte_eth_dev_filter_supported()``, ``rte_eth_dev_filter_ctrl()`` as well
|
||||
as filter types MACVLAN, ETHERTYPE, FLEXIBLE, SYN, NTUPLE, TUNNEL, FDIR,
|
||||
|
@ -241,6 +241,10 @@ API Changes
|
||||
Also, make sure to start the actual text at the margin.
|
||||
=========================================================
|
||||
|
||||
* mempool: The API of ``rte_mempool_populate_iova()`` and
|
||||
``rte_mempool_populate_virt()`` changed to return 0 instead of -EINVAL
|
||||
when there is not enough room to store one object.
|
||||
|
||||
|
||||
ABI Changes
|
||||
-----------
|
||||
|
@ -1319,7 +1319,7 @@ ntb_mbuf_pool_create(uint16_t mbuf_seg_size, uint32_t nb_mbuf,
|
||||
mz->len - ntb_info.ntb_hdr_size,
|
||||
ntb_mempool_mz_free,
|
||||
(void *)(uintptr_t)mz);
|
||||
if (ret < 0) {
|
||||
if (ret <= 0) {
|
||||
rte_memzone_free(mz);
|
||||
rte_mempool_free(mp);
|
||||
return NULL;
|
||||
|
@ -9,6 +9,8 @@ foreach flag: extra_flags
|
||||
endif
|
||||
endforeach
|
||||
|
||||
use_function_versioning = true
|
||||
|
||||
sources = files('rte_mempool.c', 'rte_mempool_ops.c',
|
||||
'rte_mempool_ops_default.c', 'mempool_trace_points.c')
|
||||
headers = files('rte_mempool.h', 'rte_mempool_trace.h',
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <rte_string_fns.h>
|
||||
#include <rte_spinlock.h>
|
||||
#include <rte_tailq.h>
|
||||
#include <rte_function_versioning.h>
|
||||
|
||||
#include "rte_mempool.h"
|
||||
#include "rte_mempool_trace.h"
|
||||
@ -303,12 +304,17 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
__vsym int
|
||||
rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr,
|
||||
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque);
|
||||
|
||||
/* Add objects in the pool, using a physically contiguous memory
|
||||
* zone. Return the number of objects added, or a negative value
|
||||
* on error.
|
||||
*/
|
||||
static int
|
||||
__rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
__vsym int
|
||||
rte_mempool_populate_iova_v21(struct rte_mempool *mp, char *vaddr,
|
||||
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque)
|
||||
{
|
||||
@ -359,6 +365,8 @@ __rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
|
||||
STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
|
||||
mp->nb_mem_chunks++;
|
||||
|
||||
rte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque);
|
||||
return i;
|
||||
|
||||
fail:
|
||||
@ -366,21 +374,34 @@ __rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
BIND_DEFAULT_SYMBOL(rte_mempool_populate_iova, _v21, 21);
|
||||
MAP_STATIC_SYMBOL(
|
||||
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
rte_iova_t iova, size_t len,
|
||||
rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque),
|
||||
rte_mempool_populate_iova_v21);
|
||||
|
||||
__vsym int
|
||||
rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr,
|
||||
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque);
|
||||
|
||||
__vsym int
|
||||
rte_mempool_populate_iova_v20(struct rte_mempool *mp, char *vaddr,
|
||||
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __rte_mempool_populate_iova(mp, vaddr, iova, len, free_cb,
|
||||
ret = rte_mempool_populate_iova_v21(mp, vaddr, iova, len, free_cb,
|
||||
opaque);
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
|
||||
rte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque);
|
||||
return ret;
|
||||
}
|
||||
VERSION_SYMBOL(rte_mempool_populate_iova, _v20, 20.0);
|
||||
|
||||
static rte_iova_t
|
||||
get_iova(void *addr)
|
||||
@ -395,11 +416,16 @@ get_iova(void *addr)
|
||||
return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
|
||||
}
|
||||
|
||||
__vsym int
|
||||
rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr,
|
||||
size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque);
|
||||
|
||||
/* Populate the mempool with a virtual area. Return the number of
|
||||
* objects added, or a negative value on error.
|
||||
*/
|
||||
int
|
||||
rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
||||
__vsym int
|
||||
rte_mempool_populate_virt_v21(struct rte_mempool *mp, char *addr,
|
||||
size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque)
|
||||
{
|
||||
@ -432,7 +458,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
||||
break;
|
||||
}
|
||||
|
||||
ret = __rte_mempool_populate_iova(mp, addr + off, iova,
|
||||
ret = rte_mempool_populate_iova_v21(mp, addr + off, iova,
|
||||
phys_len, free_cb, opaque);
|
||||
if (ret == 0)
|
||||
continue;
|
||||
@ -443,9 +469,6 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
||||
cnt += ret;
|
||||
}
|
||||
|
||||
if (cnt == 0)
|
||||
return -EINVAL;
|
||||
|
||||
rte_mempool_trace_populate_virt(mp, addr, len, pg_sz, free_cb, opaque);
|
||||
return cnt;
|
||||
|
||||
@ -453,6 +476,35 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
||||
rte_mempool_free_memchunks(mp);
|
||||
return ret;
|
||||
}
|
||||
BIND_DEFAULT_SYMBOL(rte_mempool_populate_virt, _v21, 21);
|
||||
MAP_STATIC_SYMBOL(
|
||||
int rte_mempool_populate_virt(struct rte_mempool *mp,
|
||||
char *addr, size_t len, size_t pg_sz,
|
||||
rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque),
|
||||
rte_mempool_populate_virt_v21);
|
||||
|
||||
__vsym int
|
||||
rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr,
|
||||
size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque);
|
||||
|
||||
__vsym int
|
||||
rte_mempool_populate_virt_v20(struct rte_mempool *mp, char *addr,
|
||||
size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
void *opaque)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rte_mempool_populate_virt_v21(mp, addr, len, pg_sz,
|
||||
free_cb, opaque);
|
||||
|
||||
if (ret == 0)
|
||||
ret = -EINVAL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
VERSION_SYMBOL(rte_mempool_populate_virt, _v20, 20.0);
|
||||
|
||||
/* Get the minimal page size used in a mempool before populating it. */
|
||||
int
|
||||
@ -609,6 +661,8 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
||||
mz->len, pg_sz,
|
||||
rte_mempool_memchunk_mz_free,
|
||||
(void *)(uintptr_t)mz);
|
||||
if (ret == 0) /* should not happen */
|
||||
ret = -ENOBUFS;
|
||||
if (ret < 0) {
|
||||
rte_memzone_free(mz);
|
||||
goto fail;
|
||||
@ -701,6 +755,8 @@ rte_mempool_populate_anon(struct rte_mempool *mp)
|
||||
|
||||
ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
|
||||
rte_mempool_memchunk_anon_free, addr);
|
||||
if (ret == 0) /* should not happen */
|
||||
ret = -ENOBUFS;
|
||||
if (ret < 0) {
|
||||
rte_errno = -ret;
|
||||
goto fail;
|
||||
|
@ -1112,9 +1112,12 @@ rte_mempool_free(struct rte_mempool *mp);
|
||||
* @param opaque
|
||||
* An opaque argument passed to free_cb.
|
||||
* @return
|
||||
* The number of objects added on success.
|
||||
* The number of objects added on success (strictly positive).
|
||||
* On error, the chunk is not added in the memory list of the
|
||||
* mempool and a negative errno is returned.
|
||||
* mempool the following code is returned:
|
||||
* (0): not enough room in chunk for one object.
|
||||
* (-ENOSPC): mempool is already populated.
|
||||
* (-ENOMEM): allocation failure.
|
||||
*/
|
||||
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
|
||||
@ -1139,9 +1142,12 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
* @param opaque
|
||||
* An opaque argument passed to free_cb.
|
||||
* @return
|
||||
* The number of objects added on success.
|
||||
* The number of objects added on success (strictly positive).
|
||||
* On error, the chunk is not added in the memory list of the
|
||||
* mempool and a negative errno is returned.
|
||||
* mempool the following code is returned:
|
||||
* (0): not enough room in chunk for one object.
|
||||
* (-ENOSPC): mempool is already populated.
|
||||
* (-ENOMEM): allocation failure.
|
||||
*/
|
||||
int
|
||||
rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
||||
|
@ -31,6 +31,13 @@ DPDK_20.0 {
|
||||
local: *;
|
||||
};
|
||||
|
||||
DPDK_21 {
|
||||
global:
|
||||
|
||||
rte_mempool_populate_iova;
|
||||
rte_mempool_populate_virt;
|
||||
} DPDK_20.0;
|
||||
|
||||
EXPERIMENTAL {
|
||||
global:
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user