bus/dpaa: add QMAN interface driver

The Queue Manager (QMan) is a hardware queue management block that
allows software and accelerators on the datapath to enqueue and dequeue
frames in order to communicate.

This part of QBMAN DPAA Block.

Signed-off-by: Geoff Thorpe <geoff.thorpe@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
This commit is contained in:
Shreyansh Jain 2017-09-28 17:59:30 +05:30 committed by Ferruh Yigit
parent 65ebc1be3c
commit f6fadc3e63
5 changed files with 1842 additions and 0 deletions

View File

@ -63,6 +63,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/of.c \
base/fman/netcfg_layer.c \
base/qbman/process.c \
base/qbman/qman_driver.c \
base/qbman/dpaa_sys.c
# Link Pthread

View File

@ -0,0 +1,271 @@
/*-
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
* Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the above-listed copyright holders nor the
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* GPL LICENSE SUMMARY
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_usd.h>
#include <process.h>
#include "qman_priv.h"
#include <sys/ioctl.h>
#include <rte_branch_prediction.h>
/* Global variable containing revision id (even on non-control plane systems
* where CCSR isn't available).
*/
u16 qman_ip_rev;
u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
u16 qm_channel_pme = QMAN_CHANNEL_PME;
/* Ccsr map address to access ccsrbased register */
void *qman_ccsr_map;
/* The qman clock frequency */
u32 qman_clk;
static __thread int fd = -1;
static __thread struct qm_portal_config pcfg;
static __thread struct dpaa_ioctl_portal_map map = {
.type = dpaa_portal_qman
};
static int fsl_qman_portal_init(uint32_t index, int is_shared)
{
cpu_set_t cpuset;
int loop, ret;
struct dpaa_ioctl_irq_map irq_map;
/* Verify the thread's cpu-affinity */
ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
&cpuset);
if (ret) {
error(0, ret, "pthread_getaffinity_np()");
return ret;
}
pcfg.cpu = -1;
for (loop = 0; loop < CPU_SETSIZE; loop++)
if (CPU_ISSET(loop, &cpuset)) {
if (pcfg.cpu != -1) {
pr_err("Thread is not affine to 1 cpu\n");
return -EINVAL;
}
pcfg.cpu = loop;
}
if (pcfg.cpu == -1) {
pr_err("Bug in getaffinity handling!\n");
return -EINVAL;
}
/* Allocate and map a qman portal */
map.index = index;
ret = process_portal_map(&map);
if (ret) {
error(0, ret, "process_portal_map()");
return ret;
}
pcfg.channel = map.channel;
pcfg.pools = map.pools;
pcfg.index = map.index;
/* Make the portal's cache-[enabled|inhibited] regions */
pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
if (fd == -1) {
pr_err("QMan irq init failed\n");
process_portal_unmap(&map.addr);
return -EBUSY;
}
pcfg.is_shared = is_shared;
pcfg.node = NULL;
pcfg.irq = fd;
irq_map.type = dpaa_portal_qman;
irq_map.portal_cinh = map.addr.cinh;
process_portal_irq_map(fd, &irq_map);
return 0;
}
static int fsl_qman_portal_finish(void)
{
int ret;
process_portal_irq_unmap(fd);
ret = process_portal_unmap(&map.addr);
if (ret)
error(0, ret, "process_portal_unmap()");
return ret;
}
int qman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
* calling into the code that is dependent on the device naming.
*/
return fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
}
int qman_thread_finish(void)
{
return fsl_qman_portal_finish();
}
void qman_thread_irq(void)
{
qbman_invoke_irq(pcfg.irq);
/* Now we need to uninhibit interrupts. This is the only code outside
* the regular portal driver that manipulates any portal register, so
* rather than breaking that encapsulation I am simply hard-coding the
* offset to the inhibit register here.
*/
out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
}
int qman_global_init(void)
{
const struct device_node *dt_node;
int ret = 0;
size_t lenp;
const u32 *chanid;
static int ccsr_map_fd;
const uint32_t *qman_addr;
uint64_t phys_addr;
uint64_t regs_size;
const u32 *clk;
static int done;
if (done)
return -EBUSY;
/* Use the device-tree to determine IP revision until something better
* is devised.
*/
dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman-portal");
if (!dt_node) {
pr_err("No qman portals available for any CPU\n");
return -ENODEV;
}
if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.0") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-1.0.0"))
pr_err("QMan rev1.0 on P4080 rev1 is not supported!\n");
else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.1") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-1.1.0"))
qman_ip_rev = QMAN_REV11;
else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.2") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-1.2.0"))
qman_ip_rev = QMAN_REV12;
else if (of_device_is_compatible(dt_node, "fsl,qman-portal-2.0") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-2.0.0"))
qman_ip_rev = QMAN_REV20;
else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.0") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.1"))
qman_ip_rev = QMAN_REV30;
else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.0") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.1") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.2") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.3"))
qman_ip_rev = QMAN_REV31;
else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.0") ||
of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.1"))
qman_ip_rev = QMAN_REV32;
else
qman_ip_rev = QMAN_REV11;
if (!qman_ip_rev) {
pr_err("Unknown qman portal version\n");
return -ENODEV;
}
if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
qm_channel_pme = QMAN_CHANNEL_PME_REV3;
}
dt_node = of_find_compatible_node(NULL, NULL, "fsl,pool-channel-range");
if (!dt_node) {
pr_err("No qman pool channel range available\n");
return -ENODEV;
}
chanid = of_get_property(dt_node, "fsl,pool-channel-range", &lenp);
if (!chanid) {
pr_err("Can not get pool-channel-range property\n");
return -EINVAL;
}
/* get ccsr base */
dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman");
if (!dt_node) {
pr_err("No qman device node available\n");
return -ENODEV;
}
qman_addr = of_get_address(dt_node, 0, &regs_size, NULL);
if (!qman_addr) {
pr_err("of_get_address cannot return qman address\n");
return -EINVAL;
}
phys_addr = of_translate_address(dt_node, qman_addr);
if (!phys_addr) {
pr_err("of_translate_address failed\n");
return -EINVAL;
}
ccsr_map_fd = open("/dev/mem", O_RDWR);
if (unlikely(ccsr_map_fd < 0)) {
pr_err("Can not open /dev/mem for qman ccsr map\n");
return ccsr_map_fd;
}
qman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
MAP_SHARED, ccsr_map_fd, phys_addr);
if (qman_ccsr_map == MAP_FAILED) {
pr_err("Can not map qman ccsr base\n");
return -EINVAL;
}
clk = of_get_property(dt_node, "clock-frequency", NULL);
if (!clk)
pr_warn("Can't find Qman clock frequency\n");
else
qman_clk = be32_to_cpu(*clk);
return ret;
}

View File

@ -0,0 +1,303 @@
/*-
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
* Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the above-listed copyright holders nor the
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* GPL LICENSE SUMMARY
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QMAN_PRIV_H
#define __QMAN_PRIV_H
#include "dpaa_sys.h"
#include <fsl_qman.h>
/* Congestion Groups */
/*
* This wrapper represents a bit-array for the state of the 256 QMan congestion
* groups. Is also used as a *mask* for congestion groups, eg. so we ignore
* those that don't concern us. We harness the structure and accessor details
* already used in the management command to query congestion groups.
*/
struct qman_cgrs {
struct __qm_mcr_querycongestion q;
};
static inline void qman_cgrs_init(struct qman_cgrs *c)
{
memset(c, 0, sizeof(*c));
}
static inline void qman_cgrs_fill(struct qman_cgrs *c)
{
memset(c, 0xff, sizeof(*c));
}
static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
{
return QM_MCR_QUERYCONGESTION(&c->q, num);
}
static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
{
c->q.state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
}
static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
{
c->q.state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
}
static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
{
while ((++num < (int)__CGR_NUM) && !qman_cgrs_get(c, num))
;
return num;
}
static inline void qman_cgrs_cp(struct qman_cgrs *dest,
const struct qman_cgrs *src)
{
memcpy(dest, src, sizeof(*dest));
}
static inline void qman_cgrs_and(struct qman_cgrs *dest,
const struct qman_cgrs *a,
const struct qman_cgrs *b)
{
int ret;
u32 *_d = dest->q.state;
const u32 *_a = a->q.state;
const u32 *_b = b->q.state;
for (ret = 0; ret < 8; ret++)
*(_d++) = *(_a++) & *(_b++);
}
static inline void qman_cgrs_xor(struct qman_cgrs *dest,
const struct qman_cgrs *a,
const struct qman_cgrs *b)
{
int ret;
u32 *_d = dest->q.state;
const u32 *_a = a->q.state;
const u32 *_b = b->q.state;
for (ret = 0; ret < 8; ret++)
*(_d++) = *(_a++) ^ *(_b++);
}
/* used by CCSR and portal interrupt code */
enum qm_isr_reg {
qm_isr_status = 0,
qm_isr_enable = 1,
qm_isr_disable = 2,
qm_isr_inhibit = 3
};
struct qm_portal_config {
/*
* Corenet portal addresses;
* [0]==cache-enabled, [1]==cache-inhibited.
*/
void __iomem *addr_virt[2];
struct device_node *node;
/* Allow these to be joined in lists */
struct list_head list;
/* User-visible portal configuration settings */
/* If the caller enables DQRR stashing (and thus wishes to operate the
* portal from only one cpu), this is the logical CPU that the portal
* will stash to. Whether stashing is enabled or not, this setting is
* also used for any "core-affine" portals, ie. default portals
* associated to the corresponding cpu. -1 implies that there is no
* core affinity configured.
*/
int cpu;
/* portal interrupt line */
int irq;
/* the unique index of this portal */
u32 index;
/* Is this portal shared? (If so, it has coarser locking and demuxes
* processing on behalf of other CPUs.).
*/
int is_shared;
/* The portal's dedicated channel id, use this value for initialising
* frame queues to target this portal when scheduled.
*/
u16 channel;
/* A mask of which pool channels this portal has dequeue access to
* (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask).
*/
u32 pools;
};
/* Revision info (for errata and feature handling) */
#define QMAN_REV11 0x0101
#define QMAN_REV12 0x0102
#define QMAN_REV20 0x0200
#define QMAN_REV30 0x0300
#define QMAN_REV31 0x0301
#define QMAN_REV32 0x0302
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
extern u32 qman_clk;
int qm_set_wpm(int wpm);
int qm_get_wpm(int *wpm);
struct qman_portal *qman_create_affine_portal(
const struct qm_portal_config *config,
const struct qman_cgrs *cgrs);
const struct qm_portal_config *qman_destroy_affine_portal(void);
struct qm_portal_config *qm_get_unused_portal(void);
struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
void qm_put_unused_portal(struct qm_portal_config *pcfg);
void qm_set_liodns(struct qm_portal_config *pcfg);
/* This CGR feature is supported by h/w and required by unit-tests and the
* debugfs hooks, so is implemented in the driver. However it allows an explicit
* corruption of h/w fields by s/w that are usually incorruptible (because the
* counters are usually maintained entirely within h/w). As such, we declare
* this API internally.
*/
int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
struct qm_mcr_cgrtestwrite *result);
/* QMan s/w corenet portal, low-level i/face */
/*
* For Choose one SOURCE. Choose one COUNT. Choose one
* dequeue TYPE. Choose TOKEN (8-bit).
* If SOURCE == CHANNELS,
* Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
* You can choose DEDICATED_PRECEDENCE if the portal channel should have
* priority.
* If SOURCE == SPECIFICWQ,
* Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
* channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
* work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
* same value.
*/
#define QM_SDQCR_SOURCE_CHANNELS 0x0
#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
#define QM_SDQCR_COUNT_EXACT1 0x0
#define QM_SDQCR_COUNT_UPTO3 0x20000000
#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
#define QM_SDQCR_TYPE_MASK 0x03000000
#define QM_SDQCR_TYPE_NULL 0x0
#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
#define QM_SDQCR_TYPE_ACTIVE 0x03000000
#define QM_SDQCR_TOKEN_MASK 0x00ff0000
#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
#define QM_VDQCR_FQID_MASK 0x00ffffff
#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
#define QM_EQCR_VERB_VBIT 0x80
#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
#define QM_EQCR_VERB_COLOUR_GREEN 0x00
#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
#define QM_EQCR_VERB_COLOUR_RED 0x10
#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
#define QM_EQCR_DCA_ENABLE 0x80
#define QM_EQCR_DCA_PARK 0x40
#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
#define QM_MCC_VERB_VBIT 0x80
#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
#define QM_MCC_VERB_INITFQ_PARKED 0x40
#define QM_MCC_VERB_INITFQ_SCHED 0x41
#define QM_MCC_VERB_QUERYFQ 0x44
#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
#define QM_MCC_VERB_QUERYWQ 0x46
#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
#define QM_MCC_VERB_INITCGR 0x50
#define QM_MCC_VERB_MODIFYCGR 0x51
#define QM_MCC_VERB_CGRTESTWRITE 0x52
#define QM_MCC_VERB_QUERYCGR 0x58
#define QM_MCC_VERB_QUERYCONGESTION 0x59
/*
* Used by all portal interrupt registers except 'inhibit'
* Channels with frame availability
*/
#define QM_PIRQ_DQAVAIL 0x0000ffff
/* The DQAVAIL interrupt fields break down into these bits; */
#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
#define QM_DQAVAIL_MASK 0xffff
/* This mask contains all the "irqsource" bits visible to API users */
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
* the disable register" rather than "disable the ability to write".
*/
#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
/* TODO: unfortunate name-clash here, reword? */
#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
#define QMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
#endif /* _QMAN_PRIV_H */

File diff suppressed because it is too large Load Diff

View File

@ -47,6 +47,10 @@
extern "C" {
#endif
/* Thread-entry/exit hooks; */
int qman_thread_init(void);
int qman_thread_finish(void);
#define QBMAN_ANY_PORTAL_IDX 0xffffffff
/* Obtain and free raw (unitialized) portals */
@ -81,6 +85,15 @@ int qman_free_raw_portal(struct dpaa_raw_portal *portal);
int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
int bman_free_raw_portal(struct dpaa_raw_portal *portal);
/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
* line before notifying us, and this post-processing re-enables it once
* processing is complete. As such, it is essential to call this before going
* into another blocking read/select/poll.
*/
void qman_thread_irq(void);
/* Global setup */
int qman_global_init(void);
#ifdef __cplusplus
}
#endif