bus/dpaa: support interrupt portal based fd
This patch add supports in bus driver for qbman to support and configure portal based FDs, which can be used for interrupt based processing. Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
parent
079a67c251
commit
8e253882cd
@ -23,7 +23,7 @@ static void *bman_ccsr_map;
|
||||
/* Portal driver */
|
||||
/*****************/
|
||||
|
||||
static __thread int fd = -1;
|
||||
static __thread int bmfd = -1;
|
||||
static __thread struct bm_portal_config pcfg;
|
||||
static __thread struct dpaa_ioctl_portal_map map = {
|
||||
.type = dpaa_portal_bman
|
||||
@ -70,14 +70,14 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
|
||||
pcfg.index = map.index;
|
||||
bman_depletion_fill(&pcfg.mask);
|
||||
|
||||
fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
|
||||
if (bmfd == -1) {
|
||||
pr_err("BMan irq init failed");
|
||||
process_portal_unmap(&map.addr);
|
||||
return -EBUSY;
|
||||
}
|
||||
/* Use the IRQ FD as a unique IRQ number */
|
||||
pcfg.irq = fd;
|
||||
pcfg.irq = bmfd;
|
||||
|
||||
portal = bman_create_affine_portal(&pcfg);
|
||||
if (!portal) {
|
||||
@ -90,7 +90,7 @@ static int fsl_bman_portal_init(uint32_t idx, int is_shared)
|
||||
/* Set the IRQ number */
|
||||
irq_map.type = dpaa_portal_bman;
|
||||
irq_map.portal_cinh = map.addr.cinh;
|
||||
process_portal_irq_map(fd, &irq_map);
|
||||
process_portal_irq_map(bmfd, &irq_map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ static int fsl_bman_portal_finish(void)
|
||||
__maybe_unused const struct bm_portal_config *cfg;
|
||||
int ret;
|
||||
|
||||
process_portal_irq_unmap(fd);
|
||||
process_portal_irq_unmap(bmfd);
|
||||
|
||||
cfg = bman_destroy_affine_portal();
|
||||
DPAA_BUG_ON(cfg != &pcfg);
|
||||
@ -109,6 +109,11 @@ static int fsl_bman_portal_finish(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bman_thread_fd(void)
|
||||
{
|
||||
return bmfd;
|
||||
}
|
||||
|
||||
int bman_thread_init(void)
|
||||
{
|
||||
/* Convert from contiguous/virtual cpu numbering to real cpu when
|
||||
|
@ -1040,6 +1040,50 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
|
||||
return limit;
|
||||
}
|
||||
|
||||
int qman_irqsource_add(u32 bits)
|
||||
{
|
||||
struct qman_portal *p = get_affine_portal();
|
||||
|
||||
bits = bits & QM_PIRQ_VISIBLE;
|
||||
|
||||
/* Clear any previously remaining interrupt conditions in
|
||||
* QCSP_ISR. This prevents raising a false interrupt when
|
||||
* interrupt conditions are enabled in QCSP_IER.
|
||||
*/
|
||||
qm_isr_status_clear(&p->p, bits);
|
||||
dpaa_set_bits(bits, &p->irq_sources);
|
||||
qm_isr_enable_write(&p->p, p->irq_sources);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qman_irqsource_remove(u32 bits)
|
||||
{
|
||||
struct qman_portal *p = get_affine_portal();
|
||||
u32 ier;
|
||||
|
||||
/* Our interrupt handler only processes+clears status register bits that
|
||||
* are in p->irq_sources. As we're trimming that mask, if one of them
|
||||
* were to assert in the status register just before we remove it from
|
||||
* the enable register, there would be an interrupt-storm when we
|
||||
* release the IRQ lock. So we wait for the enable register update to
|
||||
* take effect in h/w (by reading it back) and then clear all other bits
|
||||
* in the status register. Ie. we clear them from ISR once it's certain
|
||||
* IER won't allow them to reassert.
|
||||
*/
|
||||
|
||||
bits &= QM_PIRQ_VISIBLE;
|
||||
dpaa_clear_bits(bits, &p->irq_sources);
|
||||
qm_isr_enable_write(&p->p, p->irq_sources);
|
||||
ier = qm_isr_enable_read(&p->p);
|
||||
/* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
|
||||
* data-dependency, ie. to protect against re-ordering.
|
||||
*/
|
||||
qm_isr_status_clear(&p->p, ~ier);
|
||||
return 0;
|
||||
}
|
||||
|
||||
u16 qman_affine_channel(int cpu)
|
||||
{
|
||||
if (cpu < 0) {
|
||||
@ -1114,6 +1158,14 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
|
||||
return rx_number;
|
||||
}
|
||||
|
||||
void qman_clear_irq(void)
|
||||
{
|
||||
struct qman_portal *p = get_affine_portal();
|
||||
u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
|
||||
~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
|
||||
qm_isr_status_clear(&p->p, clear);
|
||||
}
|
||||
|
||||
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
|
||||
void **bufs)
|
||||
{
|
||||
|
@ -113,6 +113,11 @@ static int fsl_qman_portal_finish(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qman_thread_fd(void)
|
||||
{
|
||||
return qmfd;
|
||||
}
|
||||
|
||||
int qman_thread_init(void)
|
||||
{
|
||||
/* Convert from contiguous/virtual cpu numbering to real cpu when
|
||||
@ -135,7 +140,7 @@ void qman_thread_irq(void)
|
||||
* rather than breaking that encapsulation I am simply hard-coding the
|
||||
* offset to the inhibit register here.
|
||||
*/
|
||||
out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
|
||||
out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0);
|
||||
}
|
||||
|
||||
struct qman_portal *fsl_qman_portal_create(void)
|
||||
|
@ -1315,6 +1315,26 @@ int qman_get_portal_index(void);
|
||||
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
|
||||
void **bufs);
|
||||
|
||||
/**
|
||||
* qman_irqsource_add - add processing sources to be interrupt-driven
|
||||
* @bits: bitmask of QM_PIRQ_**I processing sources
|
||||
*
|
||||
* Adds processing sources that should be interrupt-driven (rather than
|
||||
* processed via qman_poll_***() functions). Returns zero for success, or
|
||||
* -EINVAL if the current CPU is sharing a portal hosted on another CPU.
|
||||
*/
|
||||
int qman_irqsource_add(u32 bits);
|
||||
|
||||
/**
|
||||
* qman_irqsource_remove - remove processing sources from being interrupt-driven
|
||||
* @bits: bitmask of QM_PIRQ_**I processing sources
|
||||
*
|
||||
* Removes processing sources from being interrupt-driven, so that they will
|
||||
* instead be processed via qman_poll_***() functions. Returns zero for success,
|
||||
* or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
|
||||
*/
|
||||
int qman_irqsource_remove(u32 bits);
|
||||
|
||||
/**
|
||||
* qman_affine_channel - return the channel ID of an portal
|
||||
* @cpu: the cpu whose affine portal is the subject of the query
|
||||
|
@ -55,6 +55,10 @@ int qman_free_raw_portal(struct dpaa_raw_portal *portal);
|
||||
int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
|
||||
int bman_free_raw_portal(struct dpaa_raw_portal *portal);
|
||||
|
||||
/* Obtain thread-local UIO file-descriptors */
|
||||
int qman_thread_fd(void);
|
||||
int bman_thread_fd(void);
|
||||
|
||||
/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
|
||||
* line before notifying us, and this post-processing re-enables it once
|
||||
* processing is complete. As such, it is essential to call this before going
|
||||
@ -63,6 +67,8 @@ int bman_free_raw_portal(struct dpaa_raw_portal *portal);
|
||||
void qman_thread_irq(void);
|
||||
void bman_thread_irq(void);
|
||||
|
||||
void qman_clear_irq(void);
|
||||
|
||||
/* Global setup */
|
||||
int qman_global_init(void);
|
||||
int bman_global_init(void);
|
||||
|
@ -95,10 +95,24 @@ DPDK_18.02 {
|
||||
|
||||
DPDK_18.08 {
|
||||
global:
|
||||
|
||||
fman_if_get_sg_enable;
|
||||
fman_if_set_sg;
|
||||
of_get_mac_address;
|
||||
|
||||
local: *;
|
||||
} DPDK_18.02;
|
||||
|
||||
DPDK_18.11 {
|
||||
global:
|
||||
bman_thread_irq;
|
||||
fman_if_get_sg_enable;
|
||||
fman_if_set_sg;
|
||||
qman_clear_irq;
|
||||
|
||||
qman_irqsource_add;
|
||||
qman_irqsource_remove;
|
||||
qman_thread_fd;
|
||||
qman_thread_irq;
|
||||
|
||||
local: *;
|
||||
} DPDK_18.08;
|
||||
|
Loading…
Reference in New Issue
Block a user