common/cnxk: allocate LMT region in userspace

As per the new LMTST design, userspace shall allocate LMT region,
setup the DMA translation and share the IOVA with kernel via MBOX.
Kernel will convert this IOVA to physical memory and update the
LMT table entry with the same.
With this new design also shared mode (i.e. all pci funcs sharing
the LMT region allocated by primary/base pci func) is intact.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
Reviewed-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Harman Kalra 2021-06-23 10:16:05 +05:30 committed by Jerin Jacob
parent 0c22452d47
commit 9854e5db5d
5 changed files with 64 additions and 53 deletions

View File

@ -24,6 +24,8 @@
/* Platform definition */
#include "roc_platform.h"
#define ROC_LMT_LINE_SZ 128
#define ROC_NUM_LMT_LINES 2048
#define ROC_LMT_LINES_PER_CORE_LOG2 5
#define ROC_LMT_LINE_SIZE_LOG2 7
#define ROC_LMT_BASE_PER_CORE_LOG2 \

View File

@ -915,43 +915,30 @@ dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
}
static uint16_t
dev_pf_total_vfs(struct plt_pci_device *pci_dev)
{
uint16_t total_vfs = 0;
int sriov_pos, rc;
sriov_pos =
plt_pci_find_ext_capability(pci_dev, ROC_PCI_EXT_CAP_ID_SRIOV);
if (sriov_pos <= 0) {
plt_warn("Unable to find SRIOV cap, rc=%d", sriov_pos);
return 0;
}
rc = plt_pci_read_config(pci_dev, &total_vfs, 2,
sriov_pos + ROC_PCI_SRIOV_TOTAL_VF);
if (rc < 0) {
plt_warn("Unable to read SRIOV cap, rc=%d", rc);
return 0;
}
return total_vfs;
}
static int
dev_setup_shared_lmt_region(struct mbox *mbox)
dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
{
struct lmtst_tbl_setup_req *req;
req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
req->pcifunc = idev_lmt_pffunc_get();
/* This pcifunc is defined with primary pcifunc whose LMT address
* will be shared. If call contains valid IOVA, following pcifunc
* field is of no use.
*/
req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
req->use_local_lmt_region = valid_iova;
req->lmt_iova = iova;
return mbox_process(mbox);
}
/* Total no of lines * size of each lmtline */
#define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
static int
dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
dev_lmt_setup(struct dev *dev)
{
char name[PLT_MEMZONE_NAMESIZE];
const struct plt_memzone *mz;
struct idev_cfg *idev;
int rc;
@ -965,8 +952,11 @@ dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
/* Set common lmt region from second pf_func onwards. */
if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
dev->pf_func != idev_lmt_pffunc_get()) {
rc = dev_setup_shared_lmt_region(dev->mbox);
rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
if (!rc) {
/* On success, updating lmt base of secondary pf_funcs
* with primary pf_func's lmt base.
*/
dev->lmt_base = roc_idev_lmt_base_addr_get();
return rc;
}
@ -975,34 +965,30 @@ dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
dev->pf_func, rc);
}
if (dev_is_vf(dev)) {
/* VF BAR4 should always be sufficient enough to
* hold LMT lines.
*/
if (pci_dev->mem_resource[4].len <
(RVU_LMT_LINE_MAX * RVU_LMT_SZ)) {
plt_err("Not enough bar4 space for lmt lines");
return -EFAULT;
}
/* Allocating memory for LMT region */
sprintf(name, "LMT_MAP%x", dev->pf_func);
dev->lmt_base = dev->bar4;
} else {
uint64_t bar4_mbox_sz = MBOX_SIZE;
/* PF BAR4 should always be sufficient enough to
* hold PF-AF MBOX + PF-VF MBOX + LMT lines.
*/
if (pci_dev->mem_resource[4].len <
(bar4_mbox_sz + (RVU_LMT_LINE_MAX * RVU_LMT_SZ))) {
plt_err("Not enough bar4 space for lmt lines and mbox");
return -EFAULT;
}
/* LMT base is just after total VF MBOX area */
bar4_mbox_sz += (MBOX_SIZE * dev_pf_total_vfs(pci_dev));
dev->lmt_base = dev->bar4 + bar4_mbox_sz;
/* Setting alignment to ensure correct masking for resetting to lmt base
* of a core after all lmt lines under that core are used.
* Alignment value LMT_REGION_SIZE to handle the case where all lines
* are used by 1 core.
*/
mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE,
LMT_REGION_SIZE);
if (!mz) {
plt_err("Memory alloc failed: %s", strerror(errno));
goto fail;
}
/* Share the IOVA address with Kernel */
rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
if (rc) {
errno = rc;
goto free;
}
dev->lmt_base = mz->iova;
dev->lmt_mz = mz;
/* Base LMT address should be chosen from only those pci funcs which
* participate in LMT shared mode.
*/
@ -1016,6 +1002,10 @@ dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
}
return 0;
free:
plt_memzone_free(mz);
fail:
return -errno;
}
int
@ -1130,7 +1120,7 @@ dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
goto iounmap;
/* Setup LMT line base */
rc = dev_lmt_setup(pci_dev, dev);
rc = dev_lmt_setup(dev);
if (rc)
goto iounmap;
@ -1161,6 +1151,10 @@ dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
/* Clear references to this pci dev */
npa_lf_fini();
/* Releasing memory allocated for lmt region */
if (dev->lmt_mz)
plt_memzone_free(dev->lmt_mz);
mbox_unregister_irq(pci_dev, dev);
if (!dev_is_vf(dev))

View File

@ -84,6 +84,7 @@ struct dev {
struct dev_ops *ops;
void *roc_nix;
bool disable_shared_lmt; /* false(default): shared lmt mode enabled */
const struct plt_memzone *lmt_mz;
} __plt_cache_aligned;
struct npa {

View File

@ -403,6 +403,9 @@ struct lmtst_tbl_setup_req {
uint64_t __io dis_line_pref : 1;
uint64_t __io ssow_pf_func : 13;
uint16_t __io pcifunc;
uint8_t __io use_local_lmt_region;
uint64_t __io lmt_iova;
uint64_t __io rsvd[2]; /* Future use */
};
/* CGX mbox message formats */

View File

@ -194,4 +194,15 @@ int roc_plt_init(void);
typedef int (*roc_plt_init_cb_t)(void);
int __roc_api roc_plt_init_cb_register(roc_plt_init_cb_t cb);
static inline const void *
plt_lmt_region_reserve_aligned(const char *name, size_t len, uint32_t align)
{
/* To ensure returned memory is physically contiguous, bounding
* the start and end address in 2M range.
*/
return rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG,
align, RTE_PGSIZE_2M);
}
#endif /* _ROC_PLATFORM_H_ */