net/dpaa2: use internal mempool for SG table

Creating and using driver's mempool for
allocating the SG table memory required for
FD creation instead of relying on user mempool.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Gagandeep Singh 2022-10-07 08:57:34 +05:30 committed by Ferruh Yigit
parent e7524271c3
commit 75e2a1d473
3 changed files with 34 additions and 7 deletions

View File

@ -78,6 +78,9 @@ bool dpaa2_enable_err_queue;
#define MAX_NB_RX_DESC 11264
int total_nb_rx_desc;
int dpaa2_valid_dev;
struct rte_mempool *dpaa2_tx_sg_pool;
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
@ -2907,7 +2910,20 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
/* Invoke PMD device initialization function */
diag = dpaa2_dev_init(eth_dev);
if (diag == 0) {
if (!dpaa2_tx_sg_pool) {
dpaa2_tx_sg_pool =
rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool",
DPAA2_POOL_SIZE,
DPAA2_POOL_CACHE_SIZE, 0,
DPAA2_MAX_SGS * sizeof(struct qbman_sge),
rte_socket_id());
if (dpaa2_tx_sg_pool == NULL) {
DPAA2_PMD_ERR("SG pool creation failed\n");
return -ENOMEM;
}
}
rte_eth_dev_probing_finish(eth_dev);
dpaa2_valid_dev++;
return 0;
}
@ -2923,6 +2939,9 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
eth_dev = dpaa2_dev->eth_dev;
dpaa2_dev_close(eth_dev);
dpaa2_valid_dev--;
if (!dpaa2_valid_dev)
rte_mempool_free(dpaa2_tx_sg_pool);
ret = rte_eth_dev_release_port(eth_dev);
return ret;

View File

@ -121,6 +121,15 @@
#define DPAA2_PKT_TYPE_VLAN_1 0x0160
#define DPAA2_PKT_TYPE_VLAN_2 0x0260
/* Global pool used by driver for SG list TX */
extern struct rte_mempool *dpaa2_tx_sg_pool;
/* Maximum SG segments */
#define DPAA2_MAX_SGS 128
/* SG pool size */
#define DPAA2_POOL_SIZE 2048
/* SG pool cache size */
#define DPAA2_POOL_CACHE_SIZE 256
/* enable timestamp in mbuf*/
extern bool dpaa2_enable_ts[];
extern uint64_t dpaa2_timestamp_rx_dynflag;

View File

@ -403,7 +403,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
static int __rte_noinline __rte_hot
eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd,
struct rte_mempool *mp, uint16_t bpid)
uint16_t bpid)
{
struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
struct qbman_sge *sgt, *sge = NULL;
@ -433,12 +433,12 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
}
DPAA2_SET_FD_OFFSET(fd, offset);
} else {
temp = rte_pktmbuf_alloc(mp);
temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
if (temp == NULL) {
DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
return -ENOMEM;
}
DPAA2_SET_ONLY_FD_BPID(fd, bpid);
DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
DPAA2_SET_FD_OFFSET(fd, temp->data_off);
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
@ -1321,9 +1321,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
if (unlikely((*bufs)->nb_segs > 1)) {
mp = (*bufs)->pool;
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
mp, 0))
mempool_to_bpid(mp)))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
@ -1372,7 +1373,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
mp, bpid))
bpid))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
@ -1646,7 +1647,6 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
mp,
bpid))
goto send_frames;
} else {
@ -1810,7 +1810,6 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
mp,
bpid))
goto send_n_return;
} else {