net/dpaa: use internal mempool for SG table

Creating and using driver's mempool for
allocating the SG table memory required for
FD creation.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Gagandeep Singh 2022-10-07 08:57:40 +05:30 committed by Ferruh Yigit
parent b585ecb54a
commit 533c31cc83
3 changed files with 31 additions and 5 deletions

View File

@ -133,6 +133,8 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
};
static struct rte_dpaa_driver rte_dpaa_pmd;
int dpaa_valid_dev;
struct rte_mempool *dpaa_tx_sg_pool;
static int
dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
@ -2223,7 +2225,20 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
if (diag == 0) {
if (!dpaa_tx_sg_pool) {
dpaa_tx_sg_pool =
rte_pktmbuf_pool_create("dpaa_mbuf_tx_sg_pool",
DPAA_POOL_SIZE,
DPAA_POOL_CACHE_SIZE, 0,
DPAA_MAX_SGS * sizeof(struct qm_sg_entry),
rte_socket_id());
if (dpaa_tx_sg_pool == NULL) {
DPAA_PMD_ERR("SG pool creation failed\n");
return -ENOMEM;
}
}
rte_eth_dev_probing_finish(eth_dev);
dpaa_valid_dev++;
return 0;
}
@ -2241,6 +2256,9 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
eth_dev = dpaa_dev->eth_dev;
dpaa_eth_dev_close(eth_dev);
dpaa_valid_dev--;
if (!dpaa_valid_dev)
rte_mempool_free(dpaa_tx_sg_pool);
ret = rte_eth_dev_release_port(eth_dev);
return ret;

View File

@ -33,6 +33,13 @@
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
/* Maximum SG segments supported on all cores*/
#define DPAA_MAX_SGS 128
/* SG pool size */
#define DPAA_POOL_SIZE 2048
/* SG pool cache size */
#define DPAA_POOL_CACHE_SIZE 256
/* RX queue tail drop threshold (CGR Based) in frame count */
#define CGR_RX_PERFQ_THRESH 256
#define CGR_TX_CGR_THRESH 512
@ -103,6 +110,8 @@
#define FMC_FILE "/tmp/fmc.bin"
extern struct rte_mempool *dpaa_tx_sg_pool;
/* Each network interface is represented by one of these */
struct dpaa_if {
int valid;

View File

@ -803,8 +803,7 @@ uint16_t dpaa_eth_queue_rx(void *q,
static int
dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qm_fd *fd,
struct dpaa_bp_info *bp_info)
struct qm_fd *fd)
{
struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
struct rte_mbuf *temp, *mi;
@ -813,7 +812,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
temp = rte_pktmbuf_alloc(bp_info->mp);
temp = rte_pktmbuf_alloc(dpaa_tx_sg_pool);
if (!temp) {
DPAA_PMD_ERR("Failure in allocation of mbuf");
return -1;
@ -849,7 +848,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
fd->format = QM_FD_SG;
fd->addr = temp->buf_iova;
fd->offset = temp->data_off;
fd->bpid = bp_info ? bp_info->bpid : 0xff;
fd->bpid = DPAA_MEMPOOL_TO_BPID(dpaa_tx_sg_pool);
fd->length20 = mbuf->pkt_len;
while (i < DPAA_SGT_MAX_ENTRIES) {
@ -967,7 +966,7 @@ tx_on_dpaa_pool(struct rte_mbuf *mbuf,
tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
} else if (mbuf->nb_segs > 1 &&
mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info)) {
if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr)) {
DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
return 1;
}