Use proper integer-pointer type conversions.

As part of an effort to extend Book-E to the 64-bit world, make the necessary
changes to the DPAA/dTSEC driver set to be integer-pointer conversion clean.
This means no more casts to int, and use uintptr_t where needed.

Since the NCSW source is effectively obsolete, direct changes to the source tree
are safe.
This commit is contained in:
Justin Hibbits 2016-10-18 00:55:15 +00:00
parent aec9c8d5a5
commit f77405e334
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=307542
15 changed files with 128 additions and 126 deletions

View File

@ -51,45 +51,45 @@
/***************************/
/* Cache-inhibited register offsets */
#define REG_RCR_PI_CINH (void *)0x0000
#define REG_RCR_CI_CINH (void *)0x0004
#define REG_RCR_ITR (void *)0x0008
#define REG_CFG (void *)0x0100
#define REG_SCN(n) ((void *)(0x0200 + ((n) << 2)))
#define REG_ISR (void *)0x0e00
#define REG_IER (void *)0x0e04
#define REG_ISDR (void *)0x0e08
#define REG_IIR (void *)0x0e0c
#define REG_RCR_PI_CINH 0x0000
#define REG_RCR_CI_CINH 0x0004
#define REG_RCR_ITR 0x0008
#define REG_CFG 0x0100
#define REG_SCN(n) (0x0200 + ((n) << 2))
#define REG_ISR 0x0e00
#define REG_IER 0x0e04
#define REG_ISDR 0x0e08
#define REG_IIR 0x0e0c
/* Cache-enabled register offsets */
#define CL_CR (void *)0x0000
#define CL_RR0 (void *)0x0100
#define CL_RR1 (void *)0x0140
#define CL_RCR (void *)0x1000
#define CL_RCR_PI_CENA (void *)0x3000
#define CL_RCR_CI_CENA (void *)0x3100
#define CL_CR 0x0000
#define CL_RR0 0x0100
#define CL_RR1 0x0140
#define CL_RCR 0x1000
#define CL_RCR_PI_CENA 0x3000
#define CL_RCR_CI_CENA 0x3100
/* The h/w design requires mappings to be size-aligned so that "add"s can be
* reduced to "or"s. The primitives below do the same for s/w. */
static __inline__ void *ptr_ADD(void *a, void *b)
static __inline__ void *ptr_ADD(void *a, uintptr_t b)
{
return (void *)((uintptr_t)a + (uintptr_t)b);
return (void *)((uintptr_t)a + b);
}
/* Bitwise-OR two pointers */
static __inline__ void *ptr_OR(void *a, void *b)
static __inline__ void *ptr_OR(void *a, uintptr_t b)
{
return (void *)((uintptr_t)a | (uintptr_t)b);
return (void *)((uintptr_t)a | b);
}
/* Cache-inhibited register access */
static __inline__ uint32_t __bm_in(struct bm_addr *bm, void *offset)
static __inline__ uint32_t __bm_in(struct bm_addr *bm, uintptr_t offset)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(bm->addr_ci, offset);
return GET_UINT32(*tmp);
}
static __inline__ void __bm_out(struct bm_addr *bm, void *offset, uint32_t val)
static __inline__ void __bm_out(struct bm_addr *bm, uintptr_t offset, uint32_t val)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(bm->addr_ci, offset);
WRITE_UINT32(*tmp, val);
@ -101,26 +101,26 @@ static __inline__ void __bm_out(struct bm_addr *bm, void *offset, uint32_t val)
#define bm_cl(n) (void *)((n) << 6)
/* Cache-enabled (index) register access */
static __inline__ void __bm_cl_touch_ro(struct bm_addr *bm, void *offset)
static __inline__ void __bm_cl_touch_ro(struct bm_addr *bm, uintptr_t offset)
{
dcbt_ro(ptr_ADD(bm->addr_ce, offset));
}
static __inline__ void __bm_cl_touch_rw(struct bm_addr *bm, void *offset)
static __inline__ void __bm_cl_touch_rw(struct bm_addr *bm, uintptr_t offset)
{
dcbt_rw(ptr_ADD(bm->addr_ce, offset));
}
static __inline__ uint32_t __bm_cl_in(struct bm_addr *bm, void *offset)
static __inline__ uint32_t __bm_cl_in(struct bm_addr *bm, uintptr_t offset)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(bm->addr_ce, offset);
return GET_UINT32(*tmp);
}
static __inline__ void __bm_cl_out(struct bm_addr *bm, void *offset, uint32_t val)
static __inline__ void __bm_cl_out(struct bm_addr *bm, uintptr_t offset, uint32_t val)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(bm->addr_ce, offset);
WRITE_UINT32(*tmp, val);
dcbf(tmp);
}
static __inline__ void __bm_cl_invalidate(struct bm_addr *bm, void *offset)
static __inline__ void __bm_cl_invalidate(struct bm_addr *bm, uintptr_t offset)
{
dcbi(ptr_ADD(bm->addr_ce, offset));
}
@ -156,7 +156,7 @@ static __inline__ uint8_t cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last
/* Bit-wise logic to convert a ring pointer to a ring index */
static __inline__ uint8_t RCR_PTR2IDX(struct bm_rcr_entry *e)
{
return (uint8_t)(((uint32_t)e >> 6) & (BM_RCR_SIZE - 1));
return (uint8_t)(((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1));
}
/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
@ -483,12 +483,12 @@ void bm_isr_bscn_mask(struct bm_portal *portal, uint8_t bpid, int enable)
uint32_t __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
{
return __bm_in(&portal->addr, PTR_MOVE(REG_ISR, (n << 2)));
return __bm_in(&portal->addr, REG_ISR + (n << 2));
}
void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n, uint32_t val)
{
__bm_out(&portal->addr, PTR_MOVE(REG_ISR, (n << 2)), val);
__bm_out(&portal->addr, REG_ISR + (n << 2), val);
}

View File

@ -568,8 +568,8 @@ static uint32_t LoopMessageRing(t_QmPortal *p_QmPortal, uint32_t is)
qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
if (p_Msg) {
struct qman_fq *p_FqFqs = (void *)p_Msg->fq.contextB;
struct qman_fq *p_FqErn = (void *)p_Msg->ern.tag;
struct qman_fq *p_FqFqs = UINT_TO_PTR(p_Msg->fq.contextB);
struct qman_fq *p_FqErn = UINT_TO_PTR(p_Msg->ern.tag);
uint8_t verb =(uint8_t)(p_Msg->verb & QM_MR_VERB_TYPE_MASK);
t_QmRejectedFrameInfo rejectedFrameInfo;
@ -646,7 +646,7 @@ static void LoopDequeueRing(t_Handle h_QmPortal)
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
if (!p_Dq)
break;
p_Fq = (void *)p_Dq->contextB;
p_Fq = UINT_TO_PTR(p_Dq->contextB);
if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
* to check for clearing it when doing volatile dequeues. It's
@ -728,7 +728,7 @@ static void LoopDequeueRingDcaOptimized(t_Handle h_QmPortal)
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
if (!p_Dq)
break;
p_Fq = (void *)p_Dq->contextB;
p_Fq = UINT_TO_PTR(p_Dq->contextB);
if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
* to check for clearing it when doing volatile dequeues. It's
@ -802,7 +802,7 @@ static void LoopDequeueRingOptimized(t_Handle h_QmPortal)
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
if (!p_Dq)
break;
p_Fq = (void *)p_Dq->contextB;
p_Fq = UINT_TO_PTR(p_Dq->contextB);
if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
* to check for clearing it when doing volatile dequeues. It's
@ -1483,7 +1483,7 @@ static t_Error QmPortalPullFrame(t_Handle h_QmPortal, uint32_t pdqcr, t_DpaaFD *
p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
if (!p_Dq)
continue;
p_Fq = (void *)p_Dq->contextB;
p_Fq = UINT_TO_PTR(p_Dq->contextB);
ASSERT_COND(p_Dq->fqid);
p_Dst = (uint32_t *)p_Frame;
p_Src = (uint32_t *)&p_Dq->fd;
@ -1811,7 +1811,7 @@ t_Error QM_PORTAL_PollFrame(t_Handle h_QmPortal, t_QmPortalFrameInfo *p_frameInf
PUNLOCK(p_QmPortal);
return ERROR_CODE(E_EMPTY);
}
p_Fq = (void *)p_Dq->contextB;
p_Fq = UINT_TO_PTR(p_Dq->contextB);
ASSERT_COND(p_Dq->fqid);
if (p_Fq)
{
@ -2141,7 +2141,7 @@ t_Error QM_FQR_Enqueue(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffse
}
p_Eq->fqid = p_Fq->fqid;
p_Eq->tag = (uint32_t)p_Fq;
p_Eq->tag = (uintptr_t)p_Fq;
/* gcc does a dreadful job of the following;
* eq->fd = *fd;
* It causes the entire function to save/restore a wider range of

View File

@ -51,61 +51,61 @@
/***************************/
/* Cache-inhibited register offsets */
#define REG_EQCR_PI_CINH (void *)0x0000
#define REG_EQCR_CI_CINH (void *)0x0004
#define REG_EQCR_ITR (void *)0x0008
#define REG_DQRR_PI_CINH (void *)0x0040
#define REG_DQRR_CI_CINH (void *)0x0044
#define REG_DQRR_ITR (void *)0x0048
#define REG_DQRR_DCAP (void *)0x0050
#define REG_DQRR_SDQCR (void *)0x0054
#define REG_DQRR_VDQCR (void *)0x0058
#define REG_DQRR_PDQCR (void *)0x005c
#define REG_MR_PI_CINH (void *)0x0080
#define REG_MR_CI_CINH (void *)0x0084
#define REG_MR_ITR (void *)0x0088
#define REG_CFG (void *)0x0100
#define REG_ISR (void *)0x0e00
#define REG_IER (void *)0x0e04
#define REG_ISDR (void *)0x0e08
#define REG_IIR (void *)0x0e0c
#define REG_ITPR (void *)0x0e14
#define REG_EQCR_PI_CINH 0x0000
#define REG_EQCR_CI_CINH 0x0004
#define REG_EQCR_ITR 0x0008
#define REG_DQRR_PI_CINH 0x0040
#define REG_DQRR_CI_CINH 0x0044
#define REG_DQRR_ITR 0x0048
#define REG_DQRR_DCAP 0x0050
#define REG_DQRR_SDQCR 0x0054
#define REG_DQRR_VDQCR 0x0058
#define REG_DQRR_PDQCR 0x005c
#define REG_MR_PI_CINH 0x0080
#define REG_MR_CI_CINH 0x0084
#define REG_MR_ITR 0x0088
#define REG_CFG 0x0100
#define REG_ISR 0x0e00
#define REG_IER 0x0e04
#define REG_ISDR 0x0e08
#define REG_IIR 0x0e0c
#define REG_ITPR 0x0e14
/* Cache-enabled register offsets */
#define CL_EQCR (void *)0x0000
#define CL_DQRR (void *)0x1000
#define CL_MR (void *)0x2000
#define CL_EQCR_PI_CENA (void *)0x3000
#define CL_EQCR_CI_CENA (void *)0x3100
#define CL_DQRR_PI_CENA (void *)0x3200
#define CL_DQRR_CI_CENA (void *)0x3300
#define CL_MR_PI_CENA (void *)0x3400
#define CL_MR_CI_CENA (void *)0x3500
#define CL_RORI_CENA (void *)0x3600
#define CL_CR (void *)0x3800
#define CL_RR0 (void *)0x3900
#define CL_RR1 (void *)0x3940
#define CL_EQCR 0x0000
#define CL_DQRR 0x1000
#define CL_MR 0x2000
#define CL_EQCR_PI_CENA 0x3000
#define CL_EQCR_CI_CENA 0x3100
#define CL_DQRR_PI_CENA 0x3200
#define CL_DQRR_CI_CENA 0x3300
#define CL_MR_PI_CENA 0x3400
#define CL_MR_CI_CENA 0x3500
#define CL_RORI_CENA 0x3600
#define CL_CR 0x3800
#define CL_RR0 0x3900
#define CL_RR1 0x3940
static __inline__ void *ptr_ADD(void *a, void *b)
static __inline__ void *ptr_ADD(void *a, uintptr_t b)
{
return (void *)((uintptr_t)a + (uintptr_t)b);
return (void *)((uintptr_t)a + b);
}
/* The h/w design requires mappings to be size-aligned so that "add"s can be
* reduced to "or"s. The primitives below do the same for s/w. */
/* Bitwise-OR two pointers */
static __inline__ void *ptr_OR(void *a, void *b)
static __inline__ void *ptr_OR(void *a, uintptr_t b)
{
return (void *)((uintptr_t)a + (uintptr_t)b);
return (void *)((uintptr_t)a + b);
}
/* Cache-inhibited register access */
static __inline__ uint32_t __qm_in(struct qm_addr *qm, void *offset)
static __inline__ uint32_t __qm_in(struct qm_addr *qm, uintptr_t offset)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ci, offset);
return GET_UINT32(*tmp);
}
static __inline__ void __qm_out(struct qm_addr *qm, void *offset, uint32_t val)
static __inline__ void __qm_out(struct qm_addr *qm, uintptr_t offset, uint32_t val)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ci, offset);
WRITE_UINT32(*tmp, val);
@ -114,29 +114,29 @@ static __inline__ void __qm_out(struct qm_addr *qm, void *offset, uint32_t val)
#define qm_out(reg, val) __qm_out(&portal->addr, REG_##reg, (uint32_t)val)
/* Convert 'n' cachelines to a pointer value for bitwise OR */
#define qm_cl(n) (void *)((n) << 6)
#define qm_cl(n) ((n) << 6)
/* Cache-enabled (index) register access */
static __inline__ void __qm_cl_touch_ro(struct qm_addr *qm, void *offset)
static __inline__ void __qm_cl_touch_ro(struct qm_addr *qm, uintptr_t offset)
{
dcbt_ro(ptr_ADD(qm->addr_ce, offset));
}
static __inline__ void __qm_cl_touch_rw(struct qm_addr *qm, void *offset)
static __inline__ void __qm_cl_touch_rw(struct qm_addr *qm, uintptr_t offset)
{
dcbt_rw(ptr_ADD(qm->addr_ce, offset));
}
static __inline__ uint32_t __qm_cl_in(struct qm_addr *qm, void *offset)
static __inline__ uint32_t __qm_cl_in(struct qm_addr *qm, uintptr_t offset)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ce, offset);
return GET_UINT32(*tmp);
}
static __inline__ void __qm_cl_out(struct qm_addr *qm, void *offset, uint32_t val)
static __inline__ void __qm_cl_out(struct qm_addr *qm, uintptr_t offset, uint32_t val)
{
uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ce, offset);
WRITE_UINT32(*tmp, val);
dcbf(tmp);
}
static __inline__ void __qm_cl_invalidate(struct qm_addr *qm, void *offset)
static __inline__ void __qm_cl_invalidate(struct qm_addr *qm, uintptr_t offset)
{
dcbi(ptr_ADD(qm->addr_ce, offset));
}
@ -190,7 +190,7 @@ static __inline__ void __qm_portal_unbind(struct qm_portal *portal, uint8_t ifac
/* Bit-wise logic to convert a ring pointer to a ring index */
static __inline__ uint8_t EQCR_PTR2IDX(struct qm_eqcr_entry *e)
{
return (uint8_t)(((uint32_t)e >> 6) & (QM_EQCR_SIZE - 1));
return (uint8_t)(((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1));
}
/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
@ -459,7 +459,7 @@ static __inline__ uint8_t qm_eqcr_get_fill(struct qm_portal *portal)
static __inline__ uint8_t DQRR_PTR2IDX(struct qm_dqrr_entry *e)
{
return (uint8_t)(((uint32_t)e >> 6) & (QM_DQRR_SIZE - 1));
return (uint8_t)(((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1));
}
static __inline__ struct qm_dqrr_entry *DQRR_INC(struct qm_dqrr_entry *e)
@ -829,7 +829,7 @@ static __inline__ uint8_t qm_dqrr_get_maxfill(struct qm_portal *portal)
static __inline__ uint8_t MR_PTR2IDX(struct qm_mr_entry *e)
{
return (uint8_t)(((uint32_t)e >> 6) & (QM_MR_SIZE - 1));
return (uint8_t)(((uintptr_t)e >> 6) & (QM_MR_SIZE - 1));
}
static __inline__ struct qm_mr_entry *MR_INC(struct qm_mr_entry *e)
@ -1139,10 +1139,10 @@ static __inline__ void qm_isr_set_iperiod(struct qm_portal *portal, uint16_t ipe
static __inline__ uint32_t __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
{
return __qm_in(&portal->addr, PTR_MOVE(REG_ISR, (n << 2)));
return __qm_in(&portal->addr, REG_ISR + (n << 2));
}
static __inline__ void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, uint32_t val)
{
__qm_out(&portal->addr, PTR_MOVE(REG_ISR, (n << 2)), val);
__qm_out(&portal->addr, REG_ISR + (n << 2), val);
}

View File

@ -124,7 +124,7 @@ typedef struct {
t_Handle h_App; /**< A handle to an application layer object; This handle will
be passed by the driver upon calling the above callbacks.
NOTE: this parameter relevant only for BM in master mode ('guestId'=NCSW_MASTER_ID). */
int errIrq; /**< BM error interrupt line; NO_IRQ if interrupts not used.
uintptr_t errIrq; /**< BM error interrupt line; NO_IRQ if interrupts not used.
NOTE: this parameter relevant only for BM in master mode ('guestId'=NCSW_MASTER_ID). */
uint8_t partBpidBase; /**< The first buffer-pool-id dedicated to this partition.

View File

@ -322,9 +322,9 @@ typedef struct t_FmParams {
t_Handle h_App; /**< Relevant when guestId = NCSW_MASSTER_ID only.
A handle to an application layer object; This handle will
be passed by the driver upon calling the above callbacks */
int irq; /**< Relevant when guestId = NCSW_MASSTER_ID only.
uintptr_t irq; /**< Relevant when guestId = NCSW_MASSTER_ID only.
FM interrupt source for normal events */
int errIrq; /**< Relevant when guestId = NCSW_MASSTER_ID only.
uintptr_t errIrq; /**< Relevant when guestId = NCSW_MASSTER_ID only.
FM interrupt source for errors */
t_FmPcdFirmwareParams firmware; /**< Relevant when guestId = NCSW_MASSTER_ID only.
Ucode */

View File

@ -249,7 +249,7 @@ typedef struct {
t_QmExceptionsCallback *f_Exception; /**< An application callback routine to handle exceptions.*/
t_Handle h_App; /**< A handle to an application layer object; This handle will
be passed by the driver upon calling the above callbacks */
int errIrq; /**< error interrupt line; NO_IRQ if interrupts not used */
uintptr_t errIrq; /**< error interrupt line; NO_IRQ if interrupts not used */
uint32_t partFqidBase; /**< The first frame-queue-id dedicated to this partition.
NOTE: this parameter relevant only when working with multiple partitions. */
uint32_t partNumOfFqids; /**< Number of frame-queue-ids dedicated to this partition.

View File

@ -353,7 +353,8 @@ int ERROR_DYNAMIC_LEVEL = ERROR_GLOBAL_LEVEL;
#define PRINT_FORMAT "[CPU%02d, %s:%d %s]"
#define PRINT_FMT_PARAMS CORE_GetId(), __FILE__, __LINE__, __FUNCTION__
#define ERR_STRING(err) #err
#define _ERR_STRING(err) #err
#define ERR_STRING(err) _ERR_STRING(err)
#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0))
/* No debug/error/event messages at all */

View File

@ -197,7 +197,7 @@ char XX_GetChar(void);
@Return E_OK on success; error code otherwise..
*//***************************************************************************/
t_Error XX_PreallocAndBindIntr(int irq, unsigned int cpu);
t_Error XX_PreallocAndBindIntr(uintptr_t irq, unsigned int cpu);
/**************************************************************************//**
@Function XX_DeallocIntr
@ -208,7 +208,7 @@ t_Error XX_PreallocAndBindIntr(int irq, unsigned int cpu);
@Return E_OK on success; error code otherwise..
*//***************************************************************************/
t_Error XX_DeallocIntr(int irq);
t_Error XX_DeallocIntr(uintptr_t irq);
/**************************************************************************//**
@Function XX_SetIntr
@ -221,7 +221,7 @@ t_Error XX_DeallocIntr(int irq);
@Return E_OK on success; error code otherwise..
*//***************************************************************************/
t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle);
t_Error XX_SetIntr(uintptr_t irq, t_Isr *f_Isr, t_Handle handle);
/**************************************************************************//**
@Function XX_FreeIntr
@ -232,7 +232,7 @@ t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle);
@Return E_OK on success; error code otherwise..
*//***************************************************************************/
t_Error XX_FreeIntr(int irq);
t_Error XX_FreeIntr(uintptr_t irq);
/**************************************************************************//**
@Function XX_EnableIntr
@ -243,7 +243,7 @@ t_Error XX_FreeIntr(int irq);
@Return E_OK on success; error code otherwise..
*//***************************************************************************/
t_Error XX_EnableIntr(int irq);
t_Error XX_EnableIntr(uintptr_t irq);
/**************************************************************************//**
@Function XX_DisableIntr
@ -254,7 +254,7 @@ t_Error XX_EnableIntr(int irq);
@Return E_OK on success; error code otherwise..
*//***************************************************************************/
t_Error XX_DisableIntr(int irq);
t_Error XX_DisableIntr(uintptr_t irq);
#if !(defined(__MWERKS__) && defined(OPTIMIZED_FOR_SPEED))
/**************************************************************************//**

View File

@ -368,7 +368,7 @@ XX_Dispatch(void *arg)
}
t_Error
XX_PreallocAndBindIntr(int irq, unsigned int cpu)
XX_PreallocAndBindIntr(uintptr_t irq, unsigned int cpu)
{
struct resource *r;
unsigned int inum;
@ -388,7 +388,7 @@ XX_PreallocAndBindIntr(int irq, unsigned int cpu)
}
t_Error
XX_DeallocIntr(int irq)
XX_DeallocIntr(uintptr_t irq)
{
struct resource *r;
unsigned int inum;
@ -404,7 +404,7 @@ XX_DeallocIntr(int irq)
}
t_Error
XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle)
XX_SetIntr(uintptr_t irq, t_Isr *f_Isr, t_Handle handle)
{
device_t dev;
struct resource *r;
@ -453,7 +453,7 @@ XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle)
}
t_Error
XX_FreeIntr(int irq)
XX_FreeIntr(uintptr_t irq)
{
device_t dev;
struct resource *r;
@ -477,7 +477,7 @@ XX_FreeIntr(int irq)
}
t_Error
XX_EnableIntr(int irq)
XX_EnableIntr(uintptr_t irq)
{
struct resource *r;
@ -490,7 +490,7 @@ XX_EnableIntr(int irq)
}
t_Error
XX_DisableIntr(int irq)
XX_DisableIntr(uintptr_t irq)
{
struct resource *r;

View File

@ -114,7 +114,7 @@ bman_attach(device_t dev)
bp.totalNumOfBuffers = BMAN_MAX_BUFFERS;
bp.f_Exception = bman_exception;
bp.h_App = sc;
bp.errIrq = (int)sc->sc_ires;
bp.errIrq = (uintptr_t)sc->sc_ires;
bp.partBpidBase = 0;
bp.partNumOfPools = BM_MAX_NUM_OF_POOLS;
printf("base address: %llx\n", (uint64_t)bp.baseAddress);

View File

@ -95,7 +95,7 @@ bman_portals_detach(device_t dev)
}
if (sc->sc_dp[i].dp_ires != NULL) {
XX_DeallocIntr((int)sc->sc_dp[i].dp_ires);
XX_DeallocIntr((uintptr_t)sc->sc_dp[i].dp_ires);
bus_release_resource(dev, SYS_RES_IRQ,
sc->sc_dp[i].dp_irid, sc->sc_dp[i].dp_ires);
}
@ -116,7 +116,8 @@ bman_portal_setup(struct bman_softc *bsc)
struct dpaa_portals_softc *sc;
t_BmPortalParam bpp;
t_Handle portal;
unsigned int cpu, p;
unsigned int cpu;
uintptr_t p;
/* Return NULL if we're not ready or while detach */
if (bp_sc == NULL)
@ -129,9 +130,9 @@ bman_portal_setup(struct bman_softc *bsc)
cpu = PCPU_GET(cpuid);
/* Check if portal is ready */
while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
while (atomic_cmpset_acq_ptr((uintptr_t *)&sc->sc_dp[cpu].dp_ph,
0, -1) == 0) {
p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph);
p = atomic_load_acq_ptr((uintptr_t *)&sc->sc_dp[cpu].dp_ph);
/* Return if portal is already initialized */
if (p != 0 && p != -1) {
@ -153,7 +154,7 @@ bman_portal_setup(struct bman_softc *bsc)
bpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]);
bpp.h_Bm = bsc->sc_bh;
bpp.swPortalId = cpu;
bpp.irq = (int)sc->sc_dp[cpu].dp_ires;
bpp.irq = (uintptr_t)sc->sc_dp[cpu].dp_ires;
portal = BM_PORTAL_Config(&bpp);
if (portal == NULL)
@ -162,8 +163,7 @@ bman_portal_setup(struct bman_softc *bsc)
if (BM_PORTAL_Init(portal) != E_OK)
goto err;
atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
(uint32_t)portal);
atomic_store_rel_ptr((uintptr_t *)&sc->sc_dp[cpu].dp_ph, (uintptr_t)portal);
sched_unpin();
@ -173,7 +173,7 @@ bman_portal_setup(struct bman_softc *bsc)
if (portal != NULL)
BM_PORTAL_Free(portal);
atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0);
atomic_store_rel_ptr((uintptr_t *)&sc->sc_dp[cpu].dp_ph, 0);
sched_unpin();
return (NULL);

View File

@ -66,8 +66,8 @@ enum fman_mu_ram_map {
struct fman_config {
device_t fman_device;
uintptr_t mem_base_addr;
int irq_num;
int err_irq_num;
uintptr_t irq_num;
uintptr_t err_irq_num;
uint8_t fm_id;
t_FmExceptionsCallback *exception_callback;
t_FmBusErrorCallback *bus_error_callback;
@ -282,8 +282,8 @@ fman_attach(device_t dev)
cfg.fman_device = dev;
cfg.fm_id = device_get_unit(dev);
cfg.mem_base_addr = rman_get_bushandle(sc->mem_res);
cfg.irq_num = (int)sc->irq_res;
cfg.err_irq_num = (int)sc->err_irq_res;
cfg.irq_num = (uintptr_t)sc->irq_res;
cfg.err_irq_num = (uintptr_t)sc->err_irq_res;
cfg.exception_callback = fman_exception_callback;
cfg.bus_error_callback = fman_error_callback;

View File

@ -121,7 +121,7 @@ dpaa_portal_alloc_res(device_t dev, struct dpaa_portals_devinfo *di, int cpu)
return (ENXIO);
}
err = XX_PreallocAndBindIntr((int)sc->sc_dp[cpu].dp_ires, cpu);
err = XX_PreallocAndBindIntr((uintptr_t)sc->sc_dp[cpu].dp_ires, cpu);
if (err != E_OK) {
device_printf(dev, "Could not prealloc and bind interrupt\n");

View File

@ -205,7 +205,7 @@ qman_attach(device_t dev)
qp.pfdrMemPartitionId = NCSW_MASTER_ID;
qp.f_Exception = qman_exception;
qp.h_App = sc;
qp.errIrq = (int)sc->sc_ires;
qp.errIrq = (uintptr_t)sc->sc_ires;
qp.partFqidBase = QMAN_FQID_BASE;
qp.partNumOfFqids = QMAN_MAX_FQIDS;
qp.partCgsBase = 0;
@ -255,7 +255,7 @@ qman_detach(device_t dev)
QM_Free(sc->sc_qh);
if (sc->sc_ires != NULL)
XX_DeallocIntr((int)sc->sc_ires);
XX_DeallocIntr((uintptr_t)sc->sc_ires);
if (sc->sc_ires != NULL)
bus_release_resource(dev, SYS_RES_IRQ,

View File

@ -100,7 +100,7 @@ qman_portals_detach(device_t dev)
}
if (sc->sc_dp[i].dp_ires != NULL) {
XX_DeallocIntr((int)sc->sc_dp[i].dp_ires);
XX_DeallocIntr((uintptr_t)sc->sc_dp[i].dp_ires);
bus_release_resource(dev, SYS_RES_IRQ,
sc->sc_dp[i].dp_irid, sc->sc_dp[i].dp_ires);
}
@ -120,7 +120,8 @@ qman_portal_setup(struct qman_softc *qsc)
{
struct dpaa_portals_softc *sc;
t_QmPortalParam qpp;
unsigned int cpu, p;
unsigned int cpu;
uintptr_t p;
t_Handle portal;
/* Return NULL if we're not ready or while detach */
@ -134,9 +135,9 @@ qman_portal_setup(struct qman_softc *qsc)
cpu = PCPU_GET(cpuid);
/* Check if portal is ready */
while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
while (atomic_cmpset_acq_ptr((uintptr_t *)&sc->sc_dp[cpu].dp_ph,
0, -1) == 0) {
p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph);
p = atomic_load_acq_ptr((uintptr_t *)&sc->sc_dp[cpu].dp_ph);
/* Return if portal is already initialized */
if (p != 0 && p != -1) {
@ -158,7 +159,7 @@ qman_portal_setup(struct qman_softc *qsc)
qpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]);
qpp.h_Qm = qsc->sc_qh;
qpp.swPortalId = cpu;
qpp.irq = (int)sc->sc_dp[cpu].dp_ires;
qpp.irq = (uintptr_t)sc->sc_dp[cpu].dp_ires;
qpp.fdLiodnOffset = 0;
qpp.f_DfltFrame = qman_received_frame_callback;
qpp.f_RejectedFrame = qman_rejected_frame_callback;
@ -174,8 +175,8 @@ qman_portal_setup(struct qman_softc *qsc)
if (QM_PORTAL_AddPoolChannel(portal, QMAN_COMMON_POOL_CHANNEL) != E_OK)
goto err;
atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
(uint32_t)portal);
atomic_store_rel_ptr((uintptr_t *)&sc->sc_dp[cpu].dp_ph,
(uintptr_t)portal);
sched_unpin();
return (portal);