drivers/dpaa: optimize thread local storage

Minimize the number of different thread variables

Add all the thread specific variables in dpaa_portal
structure to optimize TLS Usage.

Signed-off-by: Rohit Raj <rohit.raj@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Rohit Raj 2020-07-07 14:52:27 +05:30 committed by Ferruh Yigit
parent 6b10d1f7bd
commit e58722218a
9 changed files with 54 additions and 46 deletions

View File

@ -142,6 +142,12 @@ New Features
* Re-implemented get_fdir_info and get_fdir_stat in private API.
* **Updated NXP dpaa ethdev PMD.**
Updated the NXP dpaa ethdev with new features and improvements, including:
* Added support to use datapath APIs from non-EAL pthread
* **Updated NXP dpaa2 ethdev PMD.**
Updated the NXP dpaa2 ethdev with new features and improvements, including:

View File

@ -52,8 +52,7 @@ unsigned int dpaa_svr_family;
#define FSL_DPAA_BUS_NAME dpaa_bus
RTE_DEFINE_PER_LCORE(bool, dpaa_io);
RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
RTE_DEFINE_PER_LCORE(struct dpaa_portal *, dpaa_io);
struct fm_eth_port_cfg *
dpaa_get_eth_port_cfg(int dev_id)
@ -253,7 +252,6 @@ int rte_dpaa_portal_init(void *arg)
{
unsigned int cpu, lcore = rte_lcore_id();
int ret;
struct dpaa_portal *dpaa_io_portal;
BUS_INIT_FUNC_TRACE();
@ -288,20 +286,21 @@ int rte_dpaa_portal_init(void *arg)
DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d",
cpu, lcore);
dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
DPAA_PER_LCORE_PORTAL = rte_malloc(NULL, sizeof(struct dpaa_portal),
RTE_CACHE_LINE_SIZE);
if (!dpaa_io_portal) {
if (!DPAA_PER_LCORE_PORTAL) {
DPAA_BUS_LOG(ERR, "Unable to allocate memory");
bman_thread_finish();
qman_thread_finish();
return -ENOMEM;
}
dpaa_io_portal->qman_idx = qman_get_portal_index();
dpaa_io_portal->bman_idx = bman_get_portal_index();
dpaa_io_portal->tid = syscall(SYS_gettid);
DPAA_PER_LCORE_PORTAL->qman_idx = qman_get_portal_index();
DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index();
DPAA_PER_LCORE_PORTAL->tid = syscall(SYS_gettid);
ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
ret = pthread_setspecific(dpaa_portal_key,
(void *)DPAA_PER_LCORE_PORTAL);
if (ret) {
DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
" (lcore=%u) with ret: %d", cpu, lcore, ret);
@ -310,8 +309,6 @@ int rte_dpaa_portal_init(void *arg)
return ret;
}
RTE_PER_LCORE(dpaa_io) = true;
DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
return 0;
@ -324,7 +321,7 @@ rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
u32 sdqcr;
int ret;
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init(arg);
if (ret < 0) {
DPAA_BUS_LOG(ERR, "portal initialization failure");
@ -367,8 +364,7 @@ dpaa_portal_finish(void *arg)
rte_free(dpaa_io_portal);
dpaa_io_portal = NULL;
RTE_PER_LCORE(dpaa_io) = false;
DPAA_PER_LCORE_PORTAL = NULL;
}
static int

View File

@ -44,7 +44,6 @@ INTERNAL {
netcfg_acquire;
netcfg_release;
per_lcore_dpaa_io;
per_lcore_held_bufs;
qman_alloc_cgrid_range;
qman_alloc_pool_range;
qman_clear_irq;

View File

@ -35,8 +35,6 @@
extern unsigned int dpaa_svr_family;
extern RTE_DEFINE_PER_LCORE(bool, dpaa_io);
struct rte_dpaa_device;
struct rte_dpaa_driver;
@ -90,12 +88,38 @@ struct rte_dpaa_driver {
rte_dpaa_remove_t remove;
};
/* Create storage for dqrr entries per lcore */
#define DPAA_PORTAL_DEQUEUE_DEPTH 16
struct dpaa_portal_dqrr {
void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
uint64_t dqrr_held;
uint8_t dqrr_size;
};
struct dpaa_portal {
uint32_t bman_idx; /**< BMAN Portal ID*/
uint32_t qman_idx; /**< QMAN Portal ID*/
struct dpaa_portal_dqrr dpaa_held_bufs;
struct rte_crypto_op **dpaa_sec_ops;
int dpaa_sec_op_nb;
uint64_t tid;/**< Parent Thread id for this portal */
};
RTE_DECLARE_PER_LCORE(struct dpaa_portal *, dpaa_io);
#define DPAA_PER_LCORE_PORTAL \
RTE_PER_LCORE(dpaa_io)
#define DPAA_PER_LCORE_DQRR_SIZE \
RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_size
#define DPAA_PER_LCORE_DQRR_HELD \
RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_held
#define DPAA_PER_LCORE_DQRR_MBUF(i) \
RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.mbuf[i]
#define DPAA_PER_LCORE_RTE_CRYPTO_OP \
RTE_PER_LCORE(dpaa_io)->dpaa_sec_ops
#define DPAA_PER_LCORE_DPAA_SEC_OP_NB \
RTE_PER_LCORE(dpaa_io)->dpaa_sec_op_nb
/* Various structures representing contiguous memory maps */
struct dpaa_memseg {
TAILQ_ENTRY(dpaa_memseg) next;
@ -200,20 +224,6 @@ RTE_INIT(dpaainitfn_ ##nm) \
} \
RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
/* Create storage for dqrr entries per lcore */
#define DPAA_PORTAL_DEQUEUE_DEPTH 16
struct dpaa_portal_dqrr {
void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
uint64_t dqrr_held;
uint8_t dqrr_size;
};
RTE_DECLARE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
#define DPAA_PER_LCORE_DQRR_SIZE RTE_PER_LCORE(held_bufs).dqrr_size
#define DPAA_PER_LCORE_DQRR_HELD RTE_PER_LCORE(held_bufs).dqrr_held
#define DPAA_PER_LCORE_DQRR_MBUF(i) RTE_PER_LCORE(held_bufs).mbuf[i]
__rte_internal
struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);

View File

@ -45,9 +45,6 @@
static uint8_t cryptodev_driver_id;
static __thread struct rte_crypto_op **dpaa_sec_ops;
static __thread int dpaa_sec_op_nb;
static int
dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
@ -143,7 +140,7 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
struct dpaa_sec_job *job;
struct dpaa_sec_op_ctx *ctx;
if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
return qman_cb_dqrr_defer;
if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
@ -174,7 +171,7 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
}
mbuf->data_len = len;
}
dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
dpaa_sec_op_ending(ctx);
return qman_cb_dqrr_consume;
@ -2280,7 +2277,7 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
DPAA_SEC_ERR("Unable to prepare sec cdb");
return ret;
}
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_SEC_ERR("Failure in affining portal");
@ -3442,7 +3439,7 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
}
}
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
retval = rte_dpaa_portal_init((void *)1);
if (retval) {
DPAA_SEC_ERR("Unable to initialize portal");

View File

@ -179,7 +179,7 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
struct dpaa_port *portal = (struct dpaa_port *)port;
struct rte_mbuf *mbuf;
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
/* Affine current thread context to a qman portal */
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
@ -251,7 +251,7 @@ dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
struct dpaa_port *portal = (struct dpaa_port *)port;
struct rte_mbuf *mbuf;
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
/* Affine current thread context to a qman portal */
ret = rte_dpaa_portal_init((void *)0);
if (ret) {

View File

@ -53,7 +53,7 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
MEMPOOL_INIT_FUNC_TRACE();
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_MEMPOOL_ERR(
@ -169,7 +169,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
n, bp_info->bpid);
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
@ -224,7 +224,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
return -1;
}
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",

View File

@ -1707,7 +1707,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
is_global_init = 1;
}
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)1);
if (ret) {
DPAA_PMD_ERR("Unable to initialize portal");

View File

@ -670,7 +670,7 @@ uint16_t dpaa_eth_queue_rx(void *q,
if (likely(fq->is_static))
return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_PMD_ERR("Failure in affining portal");
@ -970,7 +970,7 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
int ret, realloc_mbuf = 0;
uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_PMD_ERR("Failure in affining portal");