bus/fslmc: support memory backed portals with QBMAN 5.0

This new mode is available in LX2160 platform. The code
dynamically detect the underlying qbman version and choose
the mode at runtime.

Signed-off-by: Youri Querry <youri.querry_1@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
This commit is contained in:
Nipun Gupta 2018-10-12 15:34:17 +05:30 committed by Thomas Monjalon
parent 93cc838b72
commit 293c0ca94c
8 changed files with 873 additions and 255 deletions

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright 2016 NXP
* Copyright 2016-2018 NXP
*
*/
#include <unistd.h>
@ -177,68 +177,6 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
}
#endif
static int
configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
{
struct qbman_swp_desc p_des;
struct dpio_attr attr;
dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
if (!dpio_dev->dpio) {
DPAA2_BUS_ERR("Memory allocation failure");
return -1;
}
dpio_dev->dpio->regs = dpio_dev->mc_portal;
if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
&dpio_dev->token)) {
DPAA2_BUS_ERR("Failed to allocate IO space");
free(dpio_dev->dpio);
return -1;
}
if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
DPAA2_BUS_ERR("Failed to reset dpio");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
DPAA2_BUS_ERR("Failed to Enable dpio");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, &attr)) {
DPAA2_BUS_ERR("DPIO Get attribute failed");
dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
/* Configure & setup SW portal */
p_des.block = NULL;
p_des.idx = attr.qbman_portal_id;
p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
p_des.irq = -1;
p_des.qman_version = attr.qbman_version;
dpio_dev->sw_portal = qbman_swp_init(&p_des);
if (dpio_dev->sw_portal == NULL) {
DPAA2_BUS_ERR("QBMan SW Portal Init failed");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
return 0;
}
static int
dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
{
@ -402,15 +340,17 @@ dpaa2_create_dpio_device(int vdev_fd,
struct vfio_device_info *obj_info,
int object_id)
{
struct dpaa2_dpio_dev *dpio_dev;
struct dpaa2_dpio_dev *dpio_dev = NULL;
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
struct qbman_swp_desc p_des;
struct dpio_attr attr;
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
return -1;
}
dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
dpio_dev = rte_zmalloc(NULL, sizeof(struct dpaa2_dpio_dev),
RTE_CACHE_LINE_SIZE);
if (!dpio_dev) {
DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
@ -423,45 +363,33 @@ dpaa2_create_dpio_device(int vdev_fd,
/* Using single portal for all devices */
dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
reg_info.index = 0;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
DPAA2_BUS_ERR("vfio: error getting region info");
rte_free(dpio_dev);
return -1;
dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
if (!dpio_dev->dpio) {
DPAA2_BUS_ERR("Memory allocation failure");
goto err;
}
dpio_dev->ce_size = reg_info.size;
dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
reg_info.index = 1;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
DPAA2_BUS_ERR("vfio: error getting region info");
rte_free(dpio_dev);
return -1;
dpio_dev->dpio->regs = dpio_dev->mc_portal;
if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
&dpio_dev->token)) {
DPAA2_BUS_ERR("Failed to allocate IO space");
goto err;
}
dpio_dev->ci_size = reg_info.size;
dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
if (configure_dpio_qbman_swp(dpio_dev)) {
DPAA2_BUS_ERR(
"Fail to configure the dpio qbman portal for %d",
dpio_dev->hw_id);
rte_free(dpio_dev);
return -1;
if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
DPAA2_BUS_ERR("Failed to reset dpio");
goto err;
}
io_space_count++;
dpio_dev->index = io_space_count;
if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
DPAA2_BUS_ERR("Failed to Enable dpio");
goto err;
}
if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
DPAA2_BUS_ERR("Fail to setup interrupt for %d",
dpio_dev->hw_id);
rte_free(dpio_dev);
if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, &attr)) {
DPAA2_BUS_ERR("DPIO Get attribute failed");
goto err;
}
/* find the SoC type for the first time */
@ -483,9 +411,67 @@ dpaa2_create_dpio_device(int vdev_fd,
dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
}
if (dpaa2_svr_family == SVR_LX2160A)
reg_info.index = DPAA2_SWP_CENA_MEM_REGION;
else
reg_info.index = DPAA2_SWP_CENA_REGION;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
DPAA2_BUS_ERR("vfio: error getting region info");
goto err;
}
dpio_dev->ce_size = reg_info.size;
dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
reg_info.index = DPAA2_SWP_CINH_REGION;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
DPAA2_BUS_ERR("vfio: error getting region info");
goto err;
}
dpio_dev->ci_size = reg_info.size;
dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
/* Configure & setup SW portal */
p_des.block = NULL;
p_des.idx = attr.qbman_portal_id;
p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
p_des.irq = -1;
p_des.qman_version = attr.qbman_version;
dpio_dev->sw_portal = qbman_swp_init(&p_des);
if (dpio_dev->sw_portal == NULL) {
DPAA2_BUS_ERR("QBMan SW Portal Init failed");
goto err;
}
io_space_count++;
dpio_dev->index = io_space_count;
if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
DPAA2_BUS_ERR("Fail to setup interrupt for %d",
dpio_dev->hw_id);
goto err;
}
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
return 0;
err:
if (dpio_dev->dpio) {
dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
}
rte_free(dpio_dev);
return -1;
}
void

View File

@ -38,6 +38,10 @@
#define DPAA2_DQRR_RING_SIZE 16
/** <Maximum number of slots available in RX ring*/
#define DPAA2_SWP_CENA_REGION 0
#define DPAA2_SWP_CINH_REGION 1
#define DPAA2_SWP_CENA_MEM_REGION 2
#define MC_PORTAL_INDEX 0
#define NUM_DPIO_REGIONS 2
#define NUM_DQS_PER_QUEUE 2

View File

@ -78,13 +78,14 @@ do { \
#define lower_32_bits(x) ((uint32_t)(x))
#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
#define __iomem
#define __raw_readb(p) (*(const volatile unsigned char *)(p))
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
#define dma_wmb() rte_smp_mb()
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
#define atomic_set(v, i) rte_atomic32_set(v, i)

View File

@ -42,6 +42,15 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
*/
void qbman_swp_finish(struct qbman_swp *p);
/**
* qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
* portal. This is required to be called if a portal moved to another core
* because the QBMan portal area is non coherent
* @p: the qbman_swp object to be invalidated
*
*/
void qbman_swp_invalidate(struct qbman_swp *p);
/**
* qbman_swp_get_desc() - Get the descriptor of the given portal object.
* @p: the given portal object.
@ -172,7 +181,7 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
/**
* struct qbman_result - structure for qbman dequeue response and/or
* notification.
* @donot_manipulate_directly: the 16 32bit data to represent the whole
* @dont_manipulate_directly: the 16 32bit data to represent the whole
* possible qbman dequeue result.
*/
struct qbman_result {
@ -262,7 +271,7 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
*/
struct qbman_pull_desc {
union {
uint32_t donot_manipulate_directly[16];
uint32_t dont_manipulate_directly[16];
struct pull {
uint8_t verb;
uint8_t numf;
@ -355,6 +364,14 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
enum qbman_pull_type_e dct);
/**
* qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
*
* @rad: 1 = Reschedule the FQ after dequeue.
* 0 = Allow the FQ to remain active after dequeue.
*/
void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
/**
* qbman_swp_pull() - Issue the pull dequeue command
* @s: the software portal object.
@ -775,7 +792,7 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
/* struct qbman_eq_desc - structure of enqueue descriptor */
struct qbman_eq_desc {
union {
uint32_t donot_manipulate_directly[8];
uint32_t dont_manipulate_directly[8];
struct eq {
uint8_t verb;
uint8_t dca;
@ -796,11 +813,11 @@ struct qbman_eq_desc {
/**
* struct qbman_eq_response - structure of enqueue response
* @donot_manipulate_directly: the 16 32bit data to represent the whole
* @dont_manipulate_directly: the 16 32bit data to represent the whole
* enqueue response.
*/
struct qbman_eq_response {
uint32_t donot_manipulate_directly[16];
uint32_t dont_manipulate_directly[16];
};
/**
@ -958,6 +975,7 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
* @s: the software portal used for enqueue.
* @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
* @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
@ -973,7 +991,6 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
* @s: the software portal used for enqueue.
* @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
* @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
@ -998,12 +1015,12 @@ int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
/*******************/
/**
* struct qbman_release_desc - The structure for buffer release descriptor
* @donot_manipulate_directly: the 32bit data to represent the whole
* @dont_manipulate_directly: the 32bit data to represent the whole
* possible settings of qbman release descriptor.
*/
struct qbman_release_desc {
union {
uint32_t donot_manipulate_directly[16];
uint32_t dont_manipulate_directly[16];
struct br {
uint8_t verb;
uint8_t reserved;

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2018 NXP
*
*/
#ifndef _QBMAN_PORTAL_H_
#define _QBMAN_PORTAL_H_
#include "qbman_sys.h"
#include <fsl_qbman_portal.h>
uint32_t qman_version;
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
@ -14,13 +19,14 @@
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((uint32_t)0x80)
/* All QBMan command use this "Read trigger bit" encoding */
#define QB_RT_BIT ((uint32_t)0x100)
/* Management command result codes */
#define QBMAN_MC_RSLT_OK 0xf0
/* QBMan DQRR size is set at runtime in qbman_portal.c */
#define QBMAN_EQCR_SIZE 8
static inline uint8_t qm_cyc_diff(uint8_t ringsize, uint8_t first,
uint8_t last)
{
@ -51,6 +57,10 @@ struct qbman_swp {
#endif
uint32_t valid_bit; /* 0x00 or 0x80 */
} mc;
/* Management response */
struct {
uint32_t valid_bit; /* 0x00 or 0x80 */
} mr;
/* Push dequeues */
uint32_t sdq;
/* Volatile dequeues */
@ -87,6 +97,8 @@ struct qbman_swp {
struct {
uint32_t pi;
uint32_t pi_vb;
uint32_t pi_ring_size;
uint32_t pi_mask;
uint32_t ci;
int available;
} eqcr;
@ -141,4 +153,16 @@ static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
* an inline) is necessary to work with different descriptor types and to work
* correctly with const and non-const inputs (and similarly-qualified outputs).
*/
#define qb_cl(d) (&(d)->donot_manipulate_directly[0])
#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
#ifdef RTE_ARCH_ARM64
#define clean(p) \
{ asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
#define invalidate(p) \
{ asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
#else
#define clean(p)
#define invalidate(p)
#endif
#endif

View File

@ -18,11 +18,51 @@
* *not* to provide linux compatibility.
*/
#ifndef _QBMAN_SYS_H_
#define _QBMAN_SYS_H_
#include "qbman_sys_decl.h"
#define CENA_WRITE_ENABLE 0
#define CINH_WRITE_ENABLE 1
/* CINH register offsets */
#define QBMAN_CINH_SWP_EQCR_PI 0x800
#define QBMAN_CINH_SWP_EQCR_CI 0x840
#define QBMAN_CINH_SWP_EQAR 0x8c0
#define QBMAN_CINH_SWP_CR_RT 0x900
#define QBMAN_CINH_SWP_VDQCR_RT 0x940
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
#define QBMAN_CINH_SWP_RCR_PI 0xc00
#define QBMAN_CINH_SWP_RAR 0xcc0
#define QBMAN_CINH_SWP_ISR 0xe00
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
#define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
#define QBMAN_CENA_SWP_CR 0x600
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780
#define QBMAN_CENA_SWP_EQCR_CI 0x840
/* CENA register offsets in memory-backed mode */
#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((uint32_t)(n) << 6))
#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
#define QBMAN_CENA_SWP_CR_MEM 0x1600
#define QBMAN_CENA_SWP_RR_MEM 0x1680
#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
/* Debugging assists */
static inline void __hexdump(unsigned long start, unsigned long end,
unsigned long p, size_t sz, const unsigned char *c)
@ -125,8 +165,8 @@ struct qbman_swp_sys {
* place-holder.
*/
uint8_t *cena;
uint8_t __iomem *addr_cena;
uint8_t __iomem *addr_cinh;
uint8_t *addr_cena;
uint8_t *addr_cinh;
uint32_t idx;
enum qbman_eqcr_mode eqcr_mode;
};
@ -292,13 +332,16 @@ static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
* qbman_portal.c. So use of it is declared locally here.
*/
#define QBMAN_CINH_SWP_CFG 0xd00
#define QBMAN_CINH_SWP_CFG 0xd00
#define SWP_CFG_DQRR_MF_SHIFT 20
#define SWP_CFG_EST_SHIFT 16
#define SWP_CFG_CPBS_SHIFT 15
#define SWP_CFG_WN_SHIFT 14
#define SWP_CFG_RPM_SHIFT 12
#define SWP_CFG_DCM_SHIFT 10
#define SWP_CFG_EPM_SHIFT 8
#define SWP_CFG_VPM_SHIFT 7
#define SWP_CFG_CPM_SHIFT 6
#define SWP_CFG_SD_SHIFT 5
#define SWP_CFG_SP_SHIFT 4
#define SWP_CFG_SE_SHIFT 3
@ -329,11 +372,20 @@ static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
return reg;
}
#define QMAN_RT_MODE 0x00000100
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
#define QMAN_REV_5000 0x05000000
#define QMAN_REV_MASK 0xffff0000
static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
const struct qbman_swp_desc *d,
uint8_t dqrr_size)
{
uint32_t reg;
int i;
#ifdef RTE_ARCH_64
uint8_t wn = CENA_WRITE_ENABLE;
#else
@ -343,7 +395,7 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
s->addr_cena = d->cena_bar;
s->addr_cinh = d->cinh_bar;
s->idx = (uint32_t)d->idx;
s->cena = malloc(4096);
s->cena = malloc(64*1024);
if (!s->cena) {
pr_err("Could not allocate page for cena shadow\n");
return -1;
@ -358,12 +410,34 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
QBMAN_BUG_ON(reg);
#endif
if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
memset(s->addr_cena, 0, 64*1024);
else {
/* Invalidate the portal memory.
* This ensures no stale cache lines
*/
for (i = 0; i < 0x1000; i += 64)
dccivac(s->addr_cena + i);
}
if (s->eqcr_mode == qman_eqcr_vb_array)
reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1,
1, 1);
else
reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1,
1, 1);
reg = qbman_set_swp_cfg(dqrr_size, wn,
0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
else {
if ((d->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
reg = qbman_set_swp_cfg(dqrr_size, wn,
1, 3, 2, 2, 1, 1, 1, 1, 1, 1);
else
reg = qbman_set_swp_cfg(dqrr_size, wn,
1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
}
if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
}
qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
if (!reg) {
@ -371,6 +445,12 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
free(s->cena);
return -1;
}
if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
}
return 0;
}
@ -378,3 +458,5 @@ static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
{
free(s->cena);
}
#endif /* _QBMAN_SYS_H_ */

View File

@ -3,6 +3,9 @@
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
*
*/
#ifndef _QBMAN_SYS_DECL_H_
#define _QBMAN_SYS_DECL_H_
#include <compat.h>
#include <fsl_qbman_base.h>
@ -51,3 +54,4 @@ static inline void prefetch_for_store(void *p)
RTE_SET_USED(p);
}
#endif
#endif /* _QBMAN_SYS_DECL_H_ */