bus/dpaa: add BMAN driver core

The Buffer Manager (BMan) is a hardware buffer pool management block that
allows software and accelerators on the datapath to acquire and release
buffers in order to build frames.

This patch adds the core routines.

Signed-off-by: Geoff Thorpe <geoff.thorpe@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
This commit is contained in:
Shreyansh Jain 2017-09-28 17:59:32 +05:30 committed by Ferruh Yigit
parent c47ff048b9
commit f09ede6c8f
5 changed files with 817 additions and 0 deletions

View File

@ -63,6 +63,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
base/fman/of.c \
base/fman/netcfg_layer.c \
base/qbman/process.c \
base/qbman/bman_driver.c \
base/qbman/qman.c \
base/qbman/qman_driver.c \
base/qbman/dpaa_alloc.c \

View File

@ -0,0 +1,311 @@
/*-
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
* Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the above-listed copyright holders nor the
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* GPL LICENSE SUMMARY
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_branch_prediction.h>
#include <fsl_usd.h>
#include <process.h>
#include "bman_priv.h"
#include <sys/ioctl.h>
/*
* Global variables of the max portal/pool number this bman version supported
*/
u16 bman_ip_rev;
u16 bman_pool_max;
void *bman_ccsr_map;
/*****************/
/* Portal driver */
/*****************/
static __thread int fd = -1;
static __thread struct bm_portal_config pcfg;
static __thread struct dpaa_ioctl_portal_map map = {
.type = dpaa_portal_bman
};
static int fsl_bman_portal_init(uint32_t idx, int is_shared)
{
cpu_set_t cpuset;
int loop, ret;
struct dpaa_ioctl_irq_map irq_map;
/* Verify the thread's cpu-affinity */
ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
&cpuset);
if (ret) {
error(0, ret, "pthread_getaffinity_np()");
return ret;
}
pcfg.cpu = -1;
for (loop = 0; loop < CPU_SETSIZE; loop++)
if (CPU_ISSET(loop, &cpuset)) {
if (pcfg.cpu != -1) {
pr_err("Thread is not affine to 1 cpu");
return -EINVAL;
}
pcfg.cpu = loop;
}
if (pcfg.cpu == -1) {
pr_err("Bug in getaffinity handling!");
return -EINVAL;
}
/* Allocate and map a bman portal */
map.index = idx;
ret = process_portal_map(&map);
if (ret) {
error(0, ret, "process_portal_map()");
return ret;
}
/* Make the portal's cache-[enabled|inhibited] regions */
pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
pcfg.is_shared = is_shared;
pcfg.index = map.index;
bman_depletion_fill(&pcfg.mask);
fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
if (fd == -1) {
pr_err("BMan irq init failed");
process_portal_unmap(&map.addr);
return -EBUSY;
}
/* Use the IRQ FD as a unique IRQ number */
pcfg.irq = fd;
/* Set the IRQ number */
irq_map.type = dpaa_portal_bman;
irq_map.portal_cinh = map.addr.cinh;
process_portal_irq_map(fd, &irq_map);
return 0;
}
static int fsl_bman_portal_finish(void)
{
int ret;
process_portal_irq_unmap(fd);
ret = process_portal_unmap(&map.addr);
if (ret)
error(0, ret, "process_portal_unmap()");
return ret;
}
int bman_thread_init(void)
{
/* Convert from contiguous/virtual cpu numbering to real cpu when
* calling into the code that is dependent on the device naming.
*/
return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
}
int bman_thread_finish(void)
{
return fsl_bman_portal_finish();
}
void bman_thread_irq(void)
{
qbman_invoke_irq(pcfg.irq);
/* Now we need to uninhibit interrupts. This is the only code outside
* the regular portal driver that manipulates any portal register, so
* rather than breaking that encapsulation I am simply hard-coding the
* offset to the inhibit register here.
*/
out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
}
int bman_init_ccsr(const struct device_node *node)
{
static int ccsr_map_fd;
uint64_t phys_addr;
const uint32_t *bman_addr;
uint64_t regs_size;
bman_addr = of_get_address(node, 0, &regs_size, NULL);
if (!bman_addr) {
pr_err("of_get_address cannot return BMan address");
return -EINVAL;
}
phys_addr = of_translate_address(node, bman_addr);
if (!phys_addr) {
pr_err("of_translate_address failed");
return -EINVAL;
}
ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
if (unlikely(ccsr_map_fd < 0)) {
pr_err("Can not open /dev/mem for BMan CCSR map");
return ccsr_map_fd;
}
bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
if (bman_ccsr_map == MAP_FAILED) {
pr_err("Can not map BMan CCSR base Bman: "
"0x%x Phys: 0x%lx size 0x%lx",
*bman_addr, phys_addr, regs_size);
return -EINVAL;
}
return 0;
}
int bman_global_init(void)
{
const struct device_node *dt_node;
static int done;
if (done)
return -EBUSY;
/* Use the device-tree to determine IP revision until something better
* is devised.
*/
dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
if (!dt_node) {
pr_err("No bman portals available for any CPU\n");
return -ENODEV;
}
if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
bman_ip_rev = BMAN_REV10;
bman_pool_max = 64;
} else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
bman_ip_rev = BMAN_REV20;
bman_pool_max = 8;
} else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
bman_ip_rev = BMAN_REV21;
bman_pool_max = 64;
} else {
pr_warn("unknown BMan version in portal node,default "
"to rev1.0");
bman_ip_rev = BMAN_REV10;
bman_pool_max = 64;
}
if (!bman_ip_rev) {
pr_err("Unknown bman portal version\n");
return -ENODEV;
}
{
const struct device_node *dn = of_find_compatible_node(NULL,
NULL, "fsl,bman");
if (!dn)
pr_err("No bman device node available");
if (bman_init_ccsr(dn))
pr_err("BMan CCSR map failed.");
}
done = 1;
return 0;
}
#define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
u32 bm_pool_free_buffers(u32 bpid)
{
return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
}
static u32 __generate_thresh(u32 val, int roundup)
{
u32 e = 0; /* co-efficient, exponent */
int oddbit = 0;
while (val > 0xff) {
oddbit = val & 1;
val >>= 1;
e++;
if (roundup && oddbit)
val++;
}
DPAA_ASSERT(e < 0x10);
return (val | (e << 8));
}
#define POOL_SWDET(n) (0x0000 + ((n) * 0x04))
#define POOL_HWDET(n) (0x0100 + ((n) * 0x04))
#define POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
#define POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
int bm_pool_set(u32 bpid, const u32 *thresholds)
{
if (!bman_ccsr_map)
return -ENODEV;
if (bpid >= bman_pool_max)
return -EINVAL;
out_be32(bman_ccsr_map + POOL_SWDET(bpid),
__generate_thresh(thresholds[0], 0));
out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
__generate_thresh(thresholds[1], 1));
out_be32(bman_ccsr_map + POOL_HWDET(bpid),
__generate_thresh(thresholds[2], 0));
out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
__generate_thresh(thresholds[3], 1));
return 0;
}
#define BMAN_LOW_DEFAULT_THRESH 0x40
#define BMAN_HIGH_DEFAULT_THRESH 0x80
int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
const u32 high_thresh)
{
if (!bman_ccsr_map)
return -ENODEV;
if (bpid >= bman_pool_max)
return -EINVAL;
if (low_thresh && high_thresh) {
out_be32(bman_ccsr_map + POOL_HWDET(bpid),
__generate_thresh(low_thresh, 0));
out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
__generate_thresh(high_thresh, 1));
} else {
out_be32(bman_ccsr_map + POOL_HWDET(bpid),
__generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
__generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
}
return 0;
}

View File

@ -0,0 +1,125 @@
/*-
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* BSD LICENSE
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
* Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the above-listed copyright holders nor the
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* GPL LICENSE SUMMARY
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BMAN_PRIV_H
#define __BMAN_PRIV_H
#include "dpaa_sys.h"
#include <fsl_bman.h>
/* Revision info (for errata and feature handling) */
#define BMAN_REV10 0x0100
#define BMAN_REV20 0x0200
#define BMAN_REV21 0x0201
#define BMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
#define BMAN_CCSR_MAP "/dev/mem"
/* This mask contains all the "irqsource" bits visible to API users */
#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
* the disable register" rather than "disable the ability to write".
*/
#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
/*
* Global variables of the max portal/pool number this bman version supported
*/
extern u16 bman_pool_max;
/* used by CCSR and portal interrupt code */
enum bm_isr_reg {
bm_isr_status = 0,
bm_isr_enable = 1,
bm_isr_disable = 2,
bm_isr_inhibit = 3
};
struct bm_portal_config {
/*
* Corenet portal addresses;
* [0]==cache-enabled, [1]==cache-inhibited.
*/
void __iomem *addr_virt[2];
/* Allow these to be joined in lists */
struct list_head list;
/* User-visible portal configuration settings */
/* This is used for any "core-affine" portals, ie. default portals
* associated to the corresponding cpu. -1 implies that there is no
* core affinity configured.
*/
int cpu;
/* portal interrupt line */
int irq;
/* the unique index of this portal */
u32 index;
/* Is this portal shared? (If so, it has coarser locking and demuxes
* processing on behalf of other CPUs.).
*/
int is_shared;
/* These are the buffer pool IDs that may be used via this portal. */
struct bman_depletion mask;
};
int bman_init_ccsr(const struct device_node *node);
struct bman_portal *bman_create_affine_portal(
const struct bm_portal_config *config);
const struct bm_portal_config *bman_destroy_affine_portal(void);
/* Set depletion thresholds associated with a buffer pool. Requires that the
* operating system have access to Bman CCSR (ie. compiled in support and
* run-time access courtesy of the device-tree).
*/
int bm_pool_set(u32 bpid, const u32 *thresholds);
/* Read the free buffer count for a given buffer */
u32 bm_pool_free_buffers(u32 bpid);
#endif /* __BMAN_PRIV_H */

View File

@ -0,0 +1,375 @@
/*-
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* BSD LICENSE
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the above-listed copyright holders nor the
* names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* GPL LICENSE SUMMARY
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_BMAN_H
#define __FSL_BMAN_H
#ifdef __cplusplus
extern "C" {
#endif
/* This wrapper represents a bit-array for the depletion state of the 64 Bman
* buffer pools.
*/
struct bman_depletion {
u32 state[2];
};
static inline void bman_depletion_init(struct bman_depletion *c)
{
c->state[0] = c->state[1] = 0;
}
static inline void bman_depletion_fill(struct bman_depletion *c)
{
c->state[0] = c->state[1] = ~0;
}
/* --- Bman data structures (and associated constants) --- */
/* Represents s/w corenet portal mapped data structures */
struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
struct bm_mc_command; /* MC (Management Command) command */
struct bm_mc_result; /* MC result */
/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
* pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
* BM_MCC_VERB_ACQUIRE), the 'bpid' field is used.
*/
struct bm_buffer {
union {
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u8 __reserved1;
u8 bpid;
u16 hi; /* High 16-bits of 48-bit address */
u32 lo; /* Low 32-bits of 48-bit address */
#else
u32 lo;
u16 hi;
u8 bpid;
u8 __reserved;
#endif
};
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u64 __notaddress:16;
u64 addr:48;
#else
u64 addr:48;
u64 __notaddress:16;
#endif
};
u64 opaque;
};
} __attribute__((aligned(8)));
static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
{
return buf->addr;
}
static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
{
return (dma_addr_t)buf->addr;
}
#define bm_buffer_set64(buf, v) \
do { \
struct bm_buffer *__buf931 = (buf); \
__buf931->hi = upper_32_bits(v); \
__buf931->lo = lower_32_bits(v); \
} while (0)
/* See 1.5.3.5.4: "Release Command" */
struct bm_rcr_entry {
union {
struct {
u8 __dont_write_directly__verb;
u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
u8 __reserved1[62];
};
struct bm_buffer bufs[8];
};
} __packed;
#define BM_RCR_VERB_VBIT 0x80
#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
/* See 1.5.3.1: "Acquire Command" */
/* See 1.5.3.2: "Query Command" */
struct bm_mcc_acquire {
u8 bpid;
u8 __reserved1[62];
} __packed;
struct bm_mcc_query {
u8 __reserved2[63];
} __packed;
struct bm_mc_command {
u8 __dont_write_directly__verb;
union {
struct bm_mcc_acquire acquire;
struct bm_mcc_query query;
};
} __packed;
#define BM_MCC_VERB_VBIT 0x80
#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
#define BM_MCC_VERB_CMD_ACQUIRE 0x10
#define BM_MCC_VERB_CMD_QUERY 0x40
#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
/* See 1.5.3.3: "Acquire Response" */
/* See 1.5.3.4: "Query Response" */
struct bm_pool_state {
u8 __reserved1[32];
/* "availability state" and "depletion state" */
struct {
u8 __reserved1[8];
/* Access using bman_depletion_***() */
struct bman_depletion state;
} as, ds;
};
struct bm_mc_result {
union {
struct {
u8 verb;
u8 __reserved1[63];
};
union {
struct {
u8 __reserved1;
u8 bpid;
u8 __reserved2[62];
};
struct bm_buffer bufs[8];
} acquire;
struct bm_pool_state query;
};
} __packed;
#define BM_MCR_VERB_VBIT 0x80
#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
#define BM_MCR_VERB_CMD_ERR_ECC 0x70
#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
/* Portal and Buffer Pools */
/* Represents a managed portal */
struct bman_portal;
/* This object type represents Bman buffer pools. */
struct bman_pool;
/* This struct specifies parameters for a bman_pool object. */
struct bman_pool_params {
/* index of the buffer pool to encapsulate (0-63), ignored if
* BMAN_POOL_FLAG_DYNAMIC_BPID is set.
*/
u32 bpid;
/* bit-mask of BMAN_POOL_FLAG_*** options */
u32 flags;
/* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
* this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
* when run in the control plane (which controls Bman CCSR). This array
* matches the definition of bm_pool_set().
*/
u32 thresholds[4];
};
/* Flags to bman_new_pool() */
#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
/* Flags to bman_release() */
#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
/**
* bman_get_portal_index - get portal configuration index
*/
int bman_get_portal_index(void);
/**
* bman_rcr_is_empty - Determine if portal's RCR is empty
*
* For use in situations where a cpu-affine caller needs to determine when all
* releases for the local portal have been processed by Bman but can't use the
* BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
* The function forces tracking of RCR consumption (which normally doesn't
* happen until release processing needs to find space to put new release
* commands), and returns zero if the ring still has unprocessed entries,
* non-zero if it is empty.
*/
int bman_rcr_is_empty(void);
/**
* bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
* @result: is set by the API to the base BPID of the allocated range
* @count: the number of BPIDs required
* @align: required alignment of the allocated range
* @partial: non-zero if the API can return fewer than @count BPIDs
*
* Returns the number of buffer pools allocated, or a negative error code. If
* @partial is non zero, the allocation request may return a smaller range of
* BPs than requested (though alignment will be as requested). If @partial is
* zero, the return value will either be 'count' or negative.
*/
int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
static inline int bman_alloc_bpid(u32 *result)
{
int ret = bman_alloc_bpid_range(result, 1, 0, 0);
return (ret > 0) ? 0 : ret;
}
/**
* bman_release_bpid_range - Release the specified range of buffer pool IDs
* @bpid: the base BPID of the range to deallocate
* @count: the number of BPIDs in the range
*
* This function can also be used to seed the allocator with ranges of BPIDs
* that it can subsequently allocate from.
*/
void bman_release_bpid_range(u32 bpid, unsigned int count);
static inline void bman_release_bpid(u32 bpid)
{
bman_release_bpid_range(bpid, 1);
}
int bman_reserve_bpid_range(u32 bpid, unsigned int count);
static inline int bman_reserve_bpid(u32 bpid)
{
return bman_reserve_bpid_range(bpid, 1);
}
void bman_seed_bpid_range(u32 bpid, unsigned int count);
int bman_shutdown_pool(u32 bpid);
/**
* bman_new_pool - Allocates a Buffer Pool object
* @params: parameters specifying the buffer pool ID and behaviour
*
* Creates a pool object for the given @params. A portal and the depletion
* callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
* is set. NB, the fields from @params are copied into the new pool object, so
* the structure provided by the caller can be released or reused after the
* function returns.
*/
struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
/**
* bman_free_pool - Deallocates a Buffer Pool object
* @pool: the pool object to release
*/
void bman_free_pool(struct bman_pool *pool);
/**
* bman_get_params - Returns a pool object's parameters.
* @pool: the pool object
*
* The returned pointer refers to state within the pool object so must not be
* modified and can no longer be read once the pool object is destroyed.
*/
const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
/**
* bman_release - Release buffer(s) to the buffer pool
* @pool: the buffer pool object to release to
* @bufs: an array of buffers to release
* @num: the number of buffers in @bufs (1-8)
* @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
*
*/
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
u32 flags);
/**
* bman_acquire - Acquire buffer(s) from a buffer pool
* @pool: the buffer pool object to acquire from
* @bufs: array for storing the acquired buffers
* @num: the number of buffers desired (@bufs is at least this big)
*
* Issues an "Acquire" command via the portal's management command interface.
* The return value will be the number of buffers obtained from the pool, or a
* negative error code if a h/w error or pool starvation was encountered.
*/
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
u32 flags);
/**
* bman_query_pools - Query all buffer pool states
* @state: storage for the queried availability and depletion states
*/
int bman_query_pools(struct bm_pool_state *state);
/**
* bman_query_free_buffers - Query how many free buffers are in buffer pool
* @pool: the buffer pool object to query
*
* Return the number of the free buffers
*/
u32 bman_query_free_buffers(struct bman_pool *pool);
/**
* bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
* @pool: the buffer pool object to which the thresholds will be set
* @thresholds: the new thresholds
*/
int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
/**
* bm_pool_set_hw_threshold - Change the buffer pool's thresholds
* @pool: Pool id
* @low_thresh: low threshold
* @high_thresh: high threshold
*/
int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
const u32 high_thresh);
#ifdef __cplusplus
}
#endif
#endif /* __FSL_BMAN_H */

View File

@ -50,7 +50,9 @@ extern "C" {
/* Thread-entry/exit hooks; */
int qman_thread_init(void);
int bman_thread_init(void);
int qman_thread_finish(void);
int bman_thread_finish(void);
#define QBMAN_ANY_PORTAL_IDX 0xffffffff
@ -92,9 +94,12 @@ int bman_free_raw_portal(struct dpaa_raw_portal *portal);
* into another blocking read/select/poll.
*/
void qman_thread_irq(void);
void bman_thread_irq(void);
/* Global setup */
int qman_global_init(void);
int bman_global_init(void);
#ifdef __cplusplus
}
#endif