raw/cnxk_bphy: support retrieving BPHY device memory
Allow user to retrieve baseband phy memory resources. Signed-off-by: Jakub Palider <jpalider@marvell.com> Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com> Reviewed-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
parent
61278bc794
commit
e50cb22331
@ -17,6 +17,7 @@ Features
|
||||
The BPHY CGX/RPM implements following features in the rawdev API:
|
||||
|
||||
- Access to BPHY CGX/RPM via a set of predefined messages
|
||||
- Access to BPHY memory
|
||||
|
||||
Device Setup
|
||||
------------
|
||||
@ -115,6 +116,15 @@ The former will setup low level interrupt handling while the latter will tear ev
|
||||
are also two convenience functions namely ``rte_pmd_bphy_intr_init()`` and
|
||||
``rte_pmd_bphy_intr_fini()`` that take care of all details.
|
||||
|
||||
|
||||
Get device memory
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Message is used to read device MMIO address.
|
||||
|
||||
Message must have type set to ``CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET``. There's a convenience function
|
||||
``rte_pmd_bphy_intr_mem_get()`` available that takes care of retrieving that address.
|
||||
|
||||
Self test
|
||||
---------
|
||||
|
||||
|
@ -53,6 +53,9 @@ cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
|
||||
case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
|
||||
cnxk_bphy_intr_fini(dev->dev_id);
|
||||
break;
|
||||
case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
|
||||
bphy_dev->queues[queue].rsp = &bphy_dev->mem;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
@ -57,3 +57,11 @@ cnxk_bphy_intr_fini(uint16_t dev_id)
|
||||
roc_bphy_intr_fini(irq_chip);
|
||||
bphy_dev->irq_chip = NULL;
|
||||
}
|
||||
|
||||
struct bphy_mem *
|
||||
cnxk_bphy_mem_get(uint16_t dev_id)
|
||||
{
|
||||
struct bphy_device *bphy_dev = cnxk_bphy_get_bphy_dev_by_dev_id(dev_id);
|
||||
|
||||
return &bphy_dev->mem;
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ struct bphy_device {
|
||||
|
||||
int cnxk_bphy_intr_init(uint16_t dev_id);
|
||||
void cnxk_bphy_intr_fini(uint16_t dev_id);
|
||||
struct bphy_mem *cnxk_bphy_mem_get(uint16_t dev_id);
|
||||
uint64_t cnxk_bphy_irq_max_get(uint16_t dev_id);
|
||||
|
||||
#endif /* _CNXK_BPHY_IRQ_ */
|
||||
|
@ -103,6 +103,7 @@ struct cnxk_bphy_cgx_msg {
|
||||
void *data;
|
||||
};
|
||||
|
||||
#define cnxk_bphy_mem bphy_mem
|
||||
#define CNXK_BPHY_DEF_QUEUE 0
|
||||
|
||||
enum cnxk_bphy_irq_msg_type {
|
||||
@ -115,6 +116,11 @@ enum cnxk_bphy_irq_msg_type {
|
||||
|
||||
struct cnxk_bphy_irq_msg {
|
||||
enum cnxk_bphy_irq_msg_type type;
|
||||
/*
|
||||
* The data field, depending on message type, may point to
|
||||
* - (deq) struct cnxk_bphy_mem for memory range request response
|
||||
* - (xxx) NULL
|
||||
*/
|
||||
void *data;
|
||||
};
|
||||
|
||||
@ -155,4 +161,28 @@ rte_pmd_bphy_intr_fini(uint16_t dev_id)
|
||||
rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
|
||||
}
|
||||
|
||||
static __rte_always_inline struct cnxk_bphy_mem *
|
||||
rte_pmd_bphy_intr_mem_get(uint16_t dev_id)
|
||||
{
|
||||
struct cnxk_bphy_irq_msg msg = {
|
||||
.type = CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
|
||||
};
|
||||
struct rte_rawdev_buf *bufs[1];
|
||||
struct rte_rawdev_buf buf;
|
||||
int ret;
|
||||
|
||||
buf.buf_addr = &msg;
|
||||
bufs[0] = &buf;
|
||||
|
||||
ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return buf.buf_addr;
|
||||
}
|
||||
|
||||
#endif /* _CNXK_BPHY_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user