bus/dpaa: add BMAN hardware interfaces
Signed-off-by: Geoff Thorpe <geoff.thorpe@nxp.com> Signed-off-by: Roy Pledge <roy.pledge@nxp.com> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com> Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
This commit is contained in:
parent
847ee3bd0d
commit
f38f61e982
@ -63,6 +63,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
|
||||
base/fman/of.c \
|
||||
base/fman/netcfg_layer.c \
|
||||
base/qbman/process.c \
|
||||
base/qbman/bman.c \
|
||||
base/qbman/bman_driver.c \
|
||||
base/qbman/qman.c \
|
||||
base/qbman/qman_driver.c \
|
||||
|
394
drivers/bus/dpaa/base/qbman/bman.c
Normal file
394
drivers/bus/dpaa/base/qbman/bman.c
Normal file
@ -0,0 +1,394 @@
|
||||
/*-
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright 2008-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2017 NXP.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the above-listed copyright holders nor the
|
||||
* names of any contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "bman.h"
|
||||
#include <rte_branch_prediction.h>
|
||||
|
||||
/* Compilation constants */
|
||||
#define RCR_THRESH 2 /* reread h/w CI when running out of space */
|
||||
#define IRQNAME "BMan portal %d"
|
||||
#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
|
||||
|
||||
struct bman_portal {
|
||||
struct bm_portal p;
|
||||
/* 2-element array. pools[0] is mask, pools[1] is snapshot. */
|
||||
struct bman_depletion *pools;
|
||||
int thresh_set;
|
||||
unsigned long irq_sources;
|
||||
u32 slowpoll; /* only used when interrupts are off */
|
||||
/* When the cpu-affine portal is activated, this is non-NULL */
|
||||
const struct bm_portal_config *config;
|
||||
char irqname[MAX_IRQNAME];
|
||||
};
|
||||
|
||||
static cpumask_t affine_mask;
|
||||
static DEFINE_SPINLOCK(affine_mask_lock);
|
||||
static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
|
||||
|
||||
static inline struct bman_portal *get_affine_portal(void)
|
||||
{
|
||||
return &RTE_PER_LCORE(bman_affine_portal);
|
||||
}
|
||||
|
||||
/*
|
||||
* This object type refers to a pool, it isn't *the* pool. There may be
|
||||
* more than one such object per BMan buffer pool, eg. if different users of
|
||||
* the pool are operating via different portals.
|
||||
*/
|
||||
struct bman_pool {
|
||||
struct bman_pool_params params;
|
||||
/* Used for hash-table admin when using depletion notifications. */
|
||||
struct bman_portal *portal;
|
||||
struct bman_pool *next;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
atomic_t in_use;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline
|
||||
struct bman_portal *bman_create_portal(struct bman_portal *portal,
|
||||
const struct bm_portal_config *c)
|
||||
{
|
||||
struct bm_portal *p;
|
||||
const struct bman_depletion *pools = &c->mask;
|
||||
int ret;
|
||||
u8 bpid = 0;
|
||||
|
||||
p = &portal->p;
|
||||
/*
|
||||
* prep the low-level portal struct with the mapped addresses from the
|
||||
* config, everything that follows depends on it and "config" is more
|
||||
* for (de)reference...
|
||||
*/
|
||||
p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
|
||||
p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
|
||||
if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
|
||||
pr_err("Bman RCR initialisation failed\n");
|
||||
return NULL;
|
||||
}
|
||||
if (bm_mc_init(p)) {
|
||||
pr_err("Bman MC initialisation failed\n");
|
||||
goto fail_mc;
|
||||
}
|
||||
portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
|
||||
if (!portal->pools)
|
||||
goto fail_pools;
|
||||
portal->pools[0] = *pools;
|
||||
bman_depletion_init(portal->pools + 1);
|
||||
while (bpid < bman_pool_max) {
|
||||
/*
|
||||
* Default to all BPIDs disabled, we enable as required at
|
||||
* run-time.
|
||||
*/
|
||||
bm_isr_bscn_mask(p, bpid, 0);
|
||||
bpid++;
|
||||
}
|
||||
portal->slowpoll = 0;
|
||||
/* Write-to-clear any stale interrupt status bits */
|
||||
bm_isr_disable_write(p, 0xffffffff);
|
||||
portal->irq_sources = 0;
|
||||
bm_isr_enable_write(p, portal->irq_sources);
|
||||
bm_isr_status_clear(p, 0xffffffff);
|
||||
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
|
||||
if (request_irq(c->irq, NULL, 0, portal->irqname,
|
||||
portal)) {
|
||||
pr_err("request_irq() failed\n");
|
||||
goto fail_irq;
|
||||
}
|
||||
|
||||
/* Need RCR to be empty before continuing */
|
||||
ret = bm_rcr_get_fill(p);
|
||||
if (ret) {
|
||||
pr_err("Bman RCR unclean\n");
|
||||
goto fail_rcr_empty;
|
||||
}
|
||||
/* Success */
|
||||
portal->config = c;
|
||||
|
||||
bm_isr_disable_write(p, 0);
|
||||
bm_isr_uninhibit(p);
|
||||
return portal;
|
||||
fail_rcr_empty:
|
||||
free_irq(c->irq, portal);
|
||||
fail_irq:
|
||||
kfree(portal->pools);
|
||||
fail_pools:
|
||||
bm_mc_finish(p);
|
||||
fail_mc:
|
||||
bm_rcr_finish(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bman_portal *
|
||||
bman_create_affine_portal(const struct bm_portal_config *c)
|
||||
{
|
||||
struct bman_portal *portal = get_affine_portal();
|
||||
|
||||
/*This function is called from the context which is already affine to
|
||||
*CPU or in other words this in non-migratable to other CPUs.
|
||||
*/
|
||||
portal = bman_create_portal(portal, c);
|
||||
if (portal) {
|
||||
spin_lock(&affine_mask_lock);
|
||||
CPU_SET(c->cpu, &affine_mask);
|
||||
spin_unlock(&affine_mask_lock);
|
||||
}
|
||||
return portal;
|
||||
}
|
||||
|
||||
static inline
|
||||
void bman_destroy_portal(struct bman_portal *bm)
|
||||
{
|
||||
const struct bm_portal_config *pcfg;
|
||||
|
||||
pcfg = bm->config;
|
||||
bm_rcr_cce_update(&bm->p);
|
||||
bm_rcr_cce_update(&bm->p);
|
||||
|
||||
free_irq(pcfg->irq, bm);
|
||||
|
||||
kfree(bm->pools);
|
||||
bm_mc_finish(&bm->p);
|
||||
bm_rcr_finish(&bm->p);
|
||||
bm->config = NULL;
|
||||
}
|
||||
|
||||
const struct
|
||||
bm_portal_config *bman_destroy_affine_portal(void)
|
||||
{
|
||||
struct bman_portal *bm = get_affine_portal();
|
||||
const struct bm_portal_config *pcfg;
|
||||
|
||||
pcfg = bm->config;
|
||||
bman_destroy_portal(bm);
|
||||
spin_lock(&affine_mask_lock);
|
||||
CPU_CLR(pcfg->cpu, &affine_mask);
|
||||
spin_unlock(&affine_mask_lock);
|
||||
return pcfg;
|
||||
}
|
||||
|
||||
int
|
||||
bman_get_portal_index(void)
|
||||
{
|
||||
struct bman_portal *p = get_affine_portal();
|
||||
return p->config->index;
|
||||
}
|
||||
|
||||
static const u32 zero_thresholds[4] = {0, 0, 0, 0};
|
||||
|
||||
struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
|
||||
{
|
||||
struct bman_pool *pool = NULL;
|
||||
u32 bpid;
|
||||
|
||||
if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
|
||||
int ret = bman_alloc_bpid(&bpid);
|
||||
|
||||
if (ret)
|
||||
return NULL;
|
||||
} else {
|
||||
if (params->bpid >= bman_pool_max)
|
||||
return NULL;
|
||||
bpid = params->bpid;
|
||||
}
|
||||
if (params->flags & BMAN_POOL_FLAG_THRESH) {
|
||||
int ret = bm_pool_set(bpid, params->thresholds);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
pool = kmalloc(sizeof(*pool), GFP_KERNEL);
|
||||
if (!pool)
|
||||
goto err;
|
||||
pool->params = *params;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
atomic_set(&pool->in_use, 1);
|
||||
#endif
|
||||
if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
|
||||
pool->params.bpid = bpid;
|
||||
|
||||
return pool;
|
||||
err:
|
||||
if (params->flags & BMAN_POOL_FLAG_THRESH)
|
||||
bm_pool_set(bpid, zero_thresholds);
|
||||
|
||||
if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
|
||||
bman_release_bpid(bpid);
|
||||
kfree(pool);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void bman_free_pool(struct bman_pool *pool)
|
||||
{
|
||||
if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
|
||||
bm_pool_set(pool->params.bpid, zero_thresholds);
|
||||
if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
|
||||
bman_release_bpid(pool->params.bpid);
|
||||
kfree(pool);
|
||||
}
|
||||
|
||||
const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
|
||||
{
|
||||
return &pool->params;
|
||||
}
|
||||
|
||||
static void update_rcr_ci(struct bman_portal *p, int avail)
|
||||
{
|
||||
if (avail)
|
||||
bm_rcr_cce_prefetch(&p->p);
|
||||
else
|
||||
bm_rcr_cce_update(&p->p);
|
||||
}
|
||||
|
||||
#define BMAN_BUF_MASK 0x0000fffffffffffful
|
||||
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
|
||||
u32 flags __maybe_unused)
|
||||
{
|
||||
struct bman_portal *p;
|
||||
struct bm_rcr_entry *r;
|
||||
u32 i = num - 1;
|
||||
u8 avail;
|
||||
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
if (!num || (num > 8))
|
||||
return -EINVAL;
|
||||
if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
p = get_affine_portal();
|
||||
avail = bm_rcr_get_avail(&p->p);
|
||||
if (avail < 2)
|
||||
update_rcr_ci(p, avail);
|
||||
r = bm_rcr_start(&p->p);
|
||||
if (unlikely(!r))
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* we can copy all but the first entry, as this can trigger badness
|
||||
* with the valid-bit
|
||||
*/
|
||||
r->bufs[0].opaque =
|
||||
cpu_to_be64(((u64)pool->params.bpid << 48) |
|
||||
(bufs[0].opaque & BMAN_BUF_MASK));
|
||||
if (i) {
|
||||
for (i = 1; i < num; i++)
|
||||
r->bufs[i].opaque =
|
||||
cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
|
||||
}
|
||||
|
||||
bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
|
||||
(num & BM_RCR_VERB_BUFCOUNT_MASK));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
|
||||
u32 flags __maybe_unused)
|
||||
{
|
||||
struct bman_portal *p = get_affine_portal();
|
||||
struct bm_mc_command *mcc;
|
||||
struct bm_mc_result *mcr;
|
||||
int ret, i;
|
||||
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
if (!num || (num > 8))
|
||||
return -EINVAL;
|
||||
if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
mcc = bm_mc_start(&p->p);
|
||||
mcc->acquire.bpid = pool->params.bpid;
|
||||
bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
|
||||
(num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
|
||||
while (!(mcr = bm_mc_result(&p->p)))
|
||||
cpu_relax();
|
||||
ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
|
||||
if (bufs) {
|
||||
for (i = 0; i < num; i++)
|
||||
bufs[i].opaque =
|
||||
be64_to_cpu(mcr->acquire.bufs[i].opaque);
|
||||
}
|
||||
if (ret != num)
|
||||
ret = -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bman_query_pools(struct bm_pool_state *state)
|
||||
{
|
||||
struct bman_portal *p = get_affine_portal();
|
||||
struct bm_mc_result *mcr;
|
||||
|
||||
bm_mc_start(&p->p);
|
||||
bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
|
||||
while (!(mcr = bm_mc_result(&p->p)))
|
||||
cpu_relax();
|
||||
DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
|
||||
BM_MCR_VERB_CMD_QUERY);
|
||||
*state = mcr->query;
|
||||
state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
|
||||
state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
|
||||
state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
|
||||
state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 bman_query_free_buffers(struct bman_pool *pool)
|
||||
{
|
||||
return bm_pool_free_buffers(pool->params.bpid);
|
||||
}
|
||||
|
||||
int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
|
||||
{
|
||||
u32 bpid;
|
||||
|
||||
bpid = bman_get_params(pool)->bpid;
|
||||
|
||||
return bm_pool_set(bpid, thresholds);
|
||||
}
|
||||
|
||||
int bman_shutdown_pool(u32 bpid)
|
||||
{
|
||||
struct bman_portal *p = get_affine_portal();
|
||||
return bm_shutdown_pool(&p->p, bpid);
|
||||
}
|
550
drivers/bus/dpaa/base/qbman/bman.h
Normal file
550
drivers/bus/dpaa/base/qbman/bman.h
Normal file
@ -0,0 +1,550 @@
|
||||
/*-
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright 2010-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2017 NXP.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of the above-listed copyright holders nor the
|
||||
* names of any contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __BMAN_H
|
||||
#define __BMAN_H
|
||||
|
||||
#include "bman_priv.h"
|
||||
|
||||
/* Cache-inhibited register offsets */
|
||||
#define BM_REG_RCR_PI_CINH 0x3000
|
||||
#define BM_REG_RCR_CI_CINH 0x3100
|
||||
#define BM_REG_RCR_ITR 0x3200
|
||||
#define BM_REG_CFG 0x3300
|
||||
#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
|
||||
#define BM_REG_ISR 0x3e00
|
||||
#define BM_REG_IIR 0x3ec0
|
||||
|
||||
/* Cache-enabled register offsets */
|
||||
#define BM_CL_CR 0x0000
|
||||
#define BM_CL_RR0 0x0100
|
||||
#define BM_CL_RR1 0x0140
|
||||
#define BM_CL_RCR 0x1000
|
||||
#define BM_CL_RCR_PI_CENA 0x3000
|
||||
#define BM_CL_RCR_CI_CENA 0x3100
|
||||
|
||||
/* BTW, the drivers (and h/w programming model) already obtain the required
|
||||
* synchronisation for portal accesses via lwsync(), hwsync(), and
|
||||
* data-dependencies. Use of barrier()s or other order-preserving primitives
|
||||
* simply degrade performance. Hence the use of the __raw_*() interfaces, which
|
||||
* simply ensure that the compiler treats the portal registers as volatile (ie.
|
||||
* non-coherent).
|
||||
*/
|
||||
|
||||
/* Cache-inhibited register access. */
|
||||
#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->ci + (o)))
|
||||
#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
|
||||
(bm)->ci + (o))
|
||||
#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
|
||||
#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
|
||||
|
||||
/* Cache-enabled (index) register access */
|
||||
#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->ce + (o))
|
||||
#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->ce + (o))
|
||||
#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->ce + (o)))
|
||||
#define __bm_cl_out(bm, o, val) \
|
||||
do { \
|
||||
u32 *__tmpclout = (bm)->ce + (o); \
|
||||
__raw_writel(cpu_to_be32(val), __tmpclout); \
|
||||
dcbf(__tmpclout); \
|
||||
} while (0)
|
||||
#define __bm_cl_invalidate(bm, o) dccivac((bm)->ce + (o))
|
||||
#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
|
||||
#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
|
||||
#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
|
||||
#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
|
||||
#define bm_cl_invalidate(reg)\
|
||||
__bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
|
||||
|
||||
/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
|
||||
* analysis, look at using the "extra" bit in the ring index registers to avoid
|
||||
* cyclic issues.
|
||||
*/
|
||||
static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
|
||||
{
|
||||
/* 'first' is included, 'last' is excluded */
|
||||
if (first <= last)
|
||||
return last - first;
|
||||
return ringsize + last - first;
|
||||
}
|
||||
|
||||
/* Portal modes.
|
||||
* Enum types;
|
||||
* pmode == production mode
|
||||
* cmode == consumption mode,
|
||||
* Enum values use 3 letter codes. First letter matches the portal mode,
|
||||
* remaining two letters indicate;
|
||||
* ci == cache-inhibited portal register
|
||||
* ce == cache-enabled portal register
|
||||
* vb == in-band valid-bit (cache-enabled)
|
||||
*/
|
||||
enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
|
||||
bm_rcr_pci = 0, /* PI index, cache-inhibited */
|
||||
bm_rcr_pce = 1, /* PI index, cache-enabled */
|
||||
bm_rcr_pvb = 2 /* valid-bit */
|
||||
};
|
||||
|
||||
enum bm_rcr_cmode { /* s/w-only */
|
||||
bm_rcr_cci, /* CI index, cache-inhibited */
|
||||
bm_rcr_cce /* CI index, cache-enabled */
|
||||
};
|
||||
|
||||
/* --- Portal structures --- */
|
||||
|
||||
#define BM_RCR_SIZE 8
|
||||
|
||||
struct bm_rcr {
|
||||
struct bm_rcr_entry *ring, *cursor;
|
||||
u8 ci, available, ithresh, vbit;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
u32 busy;
|
||||
enum bm_rcr_pmode pmode;
|
||||
enum bm_rcr_cmode cmode;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bm_mc {
|
||||
struct bm_mc_command *cr;
|
||||
struct bm_mc_result *rr;
|
||||
u8 rridx, vbit;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
enum {
|
||||
/* Can only be _mc_start()ed */
|
||||
mc_idle,
|
||||
/* Can only be _mc_commit()ed or _mc_abort()ed */
|
||||
mc_user,
|
||||
/* Can only be _mc_retry()ed */
|
||||
mc_hw
|
||||
} state;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bm_addr {
|
||||
void __iomem *ce; /* cache-enabled */
|
||||
void __iomem *ci; /* cache-inhibited */
|
||||
};
|
||||
|
||||
struct bm_portal {
|
||||
struct bm_addr addr;
|
||||
struct bm_rcr rcr;
|
||||
struct bm_mc mc;
|
||||
struct bm_portal_config config;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
|
||||
#define RCR_CARRYCLEAR(p) \
|
||||
(void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
|
||||
|
||||
/* Bit-wise logic to convert a ring pointer to a ring index */
|
||||
static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
|
||||
{
|
||||
return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
|
||||
}
|
||||
|
||||
/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
|
||||
static inline void RCR_INC(struct bm_rcr *rcr)
|
||||
{
|
||||
/* NB: this is odd-looking, but experiments show that it generates
|
||||
* fast code with essentially no branching overheads. We increment to
|
||||
* the next RCR pointer and handle overflow and 'vbit'.
|
||||
*/
|
||||
struct bm_rcr_entry *partial = rcr->cursor + 1;
|
||||
|
||||
rcr->cursor = RCR_CARRYCLEAR(partial);
|
||||
if (partial != rcr->cursor)
|
||||
rcr->vbit ^= BM_RCR_VERB_VBIT;
|
||||
}
|
||||
|
||||
static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
|
||||
__maybe_unused enum bm_rcr_cmode cmode)
|
||||
{
|
||||
/* This use of 'register', as well as all other occurrences, is because
|
||||
* it has been observed to generate much faster code with gcc than is
|
||||
* otherwise the case.
|
||||
*/
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
u32 cfg;
|
||||
u8 pi;
|
||||
|
||||
rcr->ring = portal->addr.ce + BM_CL_RCR;
|
||||
rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
|
||||
|
||||
pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
|
||||
rcr->cursor = rcr->ring + pi;
|
||||
rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
|
||||
rcr->available = BM_RCR_SIZE - 1
|
||||
- bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
|
||||
rcr->ithresh = bm_in(RCR_ITR);
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
rcr->busy = 0;
|
||||
rcr->pmode = pmode;
|
||||
rcr->cmode = cmode;
|
||||
#endif
|
||||
cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
|
||||
bm_out(CFG, cfg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bm_rcr_finish(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
|
||||
u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
|
||||
|
||||
DPAA_ASSERT(!rcr->busy);
|
||||
if (pi != RCR_PTR2IDX(rcr->cursor))
|
||||
pr_crit("losing uncommitted RCR entries\n");
|
||||
if (ci != rcr->ci)
|
||||
pr_crit("missing existing RCR completions\n");
|
||||
if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
|
||||
pr_crit("RCR destroyed unquiesced\n");
|
||||
}
|
||||
|
||||
static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
DPAA_ASSERT(!rcr->busy);
|
||||
if (!rcr->available)
|
||||
return NULL;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
rcr->busy = 1;
|
||||
#endif
|
||||
dcbz_64(rcr->cursor);
|
||||
return rcr->cursor;
|
||||
}
|
||||
|
||||
static inline void bm_rcr_abort(struct bm_portal *portal)
|
||||
{
|
||||
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
DPAA_ASSERT(rcr->busy);
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
rcr->busy = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
|
||||
struct bm_portal *portal, u8 myverb)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
DPAA_ASSERT(rcr->busy);
|
||||
DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
|
||||
if (rcr->available == 1)
|
||||
return NULL;
|
||||
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
|
||||
dcbf_64(rcr->cursor);
|
||||
RCR_INC(rcr);
|
||||
rcr->available--;
|
||||
dcbz_64(rcr->cursor);
|
||||
return rcr->cursor;
|
||||
}
|
||||
|
||||
static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
DPAA_ASSERT(rcr->busy);
|
||||
DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
|
||||
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
|
||||
RCR_INC(rcr);
|
||||
rcr->available--;
|
||||
hwsync();
|
||||
bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
rcr->busy = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
|
||||
{
|
||||
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
|
||||
bm_cl_invalidate(RCR_PI);
|
||||
bm_cl_touch_rw(RCR_PI);
|
||||
}
|
||||
|
||||
static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
DPAA_ASSERT(rcr->busy);
|
||||
DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
|
||||
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
|
||||
RCR_INC(rcr);
|
||||
rcr->available--;
|
||||
lwsync();
|
||||
bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
rcr->busy = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
struct bm_rcr_entry *rcursor;
|
||||
|
||||
DPAA_ASSERT(rcr->busy);
|
||||
DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
|
||||
lwsync();
|
||||
rcursor = rcr->cursor;
|
||||
rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
|
||||
dcbf_64(rcursor);
|
||||
RCR_INC(rcr);
|
||||
rcr->available--;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
rcr->busy = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
u8 diff, old_ci = rcr->ci;
|
||||
|
||||
DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
|
||||
rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
|
||||
diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
|
||||
rcr->available += diff;
|
||||
return diff;
|
||||
}
|
||||
|
||||
static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
|
||||
{
|
||||
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
|
||||
bm_cl_touch_ro(RCR_CI);
|
||||
}
|
||||
|
||||
static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
u8 diff, old_ci = rcr->ci;
|
||||
|
||||
DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
|
||||
rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
|
||||
bm_cl_invalidate(RCR_CI);
|
||||
diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
|
||||
rcr->available += diff;
|
||||
return diff;
|
||||
}
|
||||
|
||||
static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
return rcr->ithresh;
|
||||
}
|
||||
|
||||
static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
rcr->ithresh = ithresh;
|
||||
bm_out(RCR_ITR, ithresh);
|
||||
}
|
||||
|
||||
static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
return rcr->available;
|
||||
}
|
||||
|
||||
static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_rcr *rcr = &portal->rcr;
|
||||
|
||||
return BM_RCR_SIZE - 1 - rcr->available;
|
||||
}
|
||||
|
||||
/* --- Management command API --- */
|
||||
|
||||
static inline int bm_mc_init(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_mc *mc = &portal->mc;
|
||||
|
||||
mc->cr = portal->addr.ce + BM_CL_CR;
|
||||
mc->rr = portal->addr.ce + BM_CL_RR0;
|
||||
mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
|
||||
BM_MCC_VERB_VBIT) ? 0 : 1;
|
||||
mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
mc->state = mc_idle;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void bm_mc_finish(struct bm_portal *portal)
|
||||
{
|
||||
__maybe_unused register struct bm_mc *mc = &portal->mc;
|
||||
|
||||
DPAA_ASSERT(mc->state == mc_idle);
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
if (mc->state != mc_idle)
|
||||
pr_crit("Losing incomplete MC command\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_mc *mc = &portal->mc;
|
||||
|
||||
DPAA_ASSERT(mc->state == mc_idle);
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
mc->state = mc_user;
|
||||
#endif
|
||||
dcbz_64(mc->cr);
|
||||
return mc->cr;
|
||||
}
|
||||
|
||||
static inline void bm_mc_abort(struct bm_portal *portal)
|
||||
{
|
||||
__maybe_unused register struct bm_mc *mc = &portal->mc;
|
||||
|
||||
DPAA_ASSERT(mc->state == mc_user);
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
mc->state = mc_idle;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
|
||||
{
|
||||
register struct bm_mc *mc = &portal->mc;
|
||||
struct bm_mc_result *rr = mc->rr + mc->rridx;
|
||||
|
||||
DPAA_ASSERT(mc->state == mc_user);
|
||||
lwsync();
|
||||
mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
|
||||
dcbf(mc->cr);
|
||||
dcbit_ro(rr);
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
mc->state = mc_hw;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
|
||||
{
|
||||
register struct bm_mc *mc = &portal->mc;
|
||||
struct bm_mc_result *rr = mc->rr + mc->rridx;
|
||||
|
||||
DPAA_ASSERT(mc->state == mc_hw);
|
||||
/* The inactive response register's verb byte always returns zero until
|
||||
* its command is submitted and completed. This includes the valid-bit,
|
||||
* in case you were wondering.
|
||||
*/
|
||||
if (!__raw_readb(&rr->verb)) {
|
||||
dcbit_ro(rr);
|
||||
return NULL;
|
||||
}
|
||||
mc->rridx ^= 1;
|
||||
mc->vbit ^= BM_MCC_VERB_VBIT;
|
||||
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
|
||||
mc->state = mc_idle;
|
||||
#endif
|
||||
return rr;
|
||||
}
|
||||
|
||||
#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
|
||||
#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
|
||||
static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
|
||||
int enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
DPAA_ASSERT(bpid < bman_pool_max);
|
||||
/* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
|
||||
val = __bm_in(&portal->addr, SCN_REG(bpid));
|
||||
if (enable)
|
||||
val |= SCN_BIT(bpid);
|
||||
else
|
||||
val &= ~SCN_BIT(bpid);
|
||||
__bm_out(&portal->addr, SCN_REG(bpid), val);
|
||||
}
|
||||
|
||||
static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
|
||||
{
|
||||
#if defined(RTE_ARCH_ARM64)
|
||||
return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
|
||||
#else
|
||||
return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
|
||||
u32 val)
|
||||
{
|
||||
#if defined(RTE_ARCH_ARM64)
|
||||
__bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
|
||||
#else
|
||||
__bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Buffer Pool Cleanup */
|
||||
static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
|
||||
{
|
||||
struct bm_mc_command *bm_cmd;
|
||||
struct bm_mc_result *bm_res;
|
||||
|
||||
int aq_count = 0;
|
||||
bool stop = false;
|
||||
|
||||
while (!stop) {
|
||||
/* Acquire buffers until empty */
|
||||
bm_cmd = bm_mc_start(p);
|
||||
bm_cmd->acquire.bpid = bpid;
|
||||
bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
|
||||
while (!(bm_res = bm_mc_result(p)))
|
||||
cpu_relax();
|
||||
if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
|
||||
/* Pool is empty */
|
||||
stop = true;
|
||||
} else
|
||||
++aq_count;
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* __BMAN_H */
|
@ -65,6 +65,7 @@ static __thread struct dpaa_ioctl_portal_map map = {
|
||||
static int fsl_bman_portal_init(uint32_t idx, int is_shared)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
struct bman_portal *portal;
|
||||
int loop, ret;
|
||||
struct dpaa_ioctl_irq_map irq_map;
|
||||
|
||||
@ -111,6 +112,14 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
|
||||
/* Use the IRQ FD as a unique IRQ number */
|
||||
pcfg.irq = fd;
|
||||
|
||||
portal = bman_create_affine_portal(&pcfg);
|
||||
if (!portal) {
|
||||
pr_err("Bman portal initialisation failed (%d)",
|
||||
pcfg.cpu);
|
||||
process_portal_unmap(&map.addr);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Set the IRQ number */
|
||||
irq_map.type = dpaa_portal_bman;
|
||||
irq_map.portal_cinh = map.addr.cinh;
|
||||
@ -120,10 +129,13 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
|
||||
|
||||
static int fsl_bman_portal_finish(void)
|
||||
{
|
||||
__maybe_unused const struct bm_portal_config *cfg;
|
||||
int ret;
|
||||
|
||||
process_portal_irq_unmap(fd);
|
||||
|
||||
cfg = bman_destroy_affine_portal();
|
||||
DPAA_BUG_ON(cfg != &pcfg);
|
||||
ret = process_portal_unmap(&map.addr);
|
||||
if (ret)
|
||||
error(0, ret, "process_portal_unmap()");
|
||||
|
@ -41,6 +41,22 @@
|
||||
#include "dpaa_sys.h"
|
||||
#include <process.h>
|
||||
#include <fsl_qman.h>
|
||||
#include <fsl_bman.h>
|
||||
|
||||
int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
|
||||
{
|
||||
return process_alloc(dpaa_id_bpid, result, count, align, partial);
|
||||
}
|
||||
|
||||
void bman_release_bpid_range(u32 bpid, u32 count)
|
||||
{
|
||||
process_release(dpaa_id_bpid, bpid, count);
|
||||
}
|
||||
|
||||
int bman_reserve_bpid_range(u32 bpid, u32 count)
|
||||
{
|
||||
return process_reserve(dpaa_id_bpid, bpid, count);
|
||||
}
|
||||
|
||||
int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user