net/pfe: add MAC and host interface initialisation

HIF or host interface is responsible for transmit
and receive packets between physical ethernet
interfaces and HIF library defined logical interfaces.

This patch initialise that host interface and MAC.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
This commit is contained in:
Gagandeep Singh 2019-10-10 12:02:26 +05:30 committed by Ferruh Yigit
parent 6dd520837f
commit 5253fe372e
11 changed files with 1440 additions and 2 deletions

View File

@ -10,13 +10,23 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_pfe.a
CFLAGS += -O3 $(WERROR_FLAGS)
CFLAGS += -Wno-pointer-arith
CFLAGS += -I$(RTE_SDK)/drivers/net/pfe/base/
CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
EXPORT_MAP := rte_pmd_pfe_version.map
LIBABIVER := 1
# Driver uses below experimental APIs
# rte_mem_iova2virt
# rte_mem_virt2memseg
CFLAGS += -DALLOW_EXPERIMENTAL_API
# Interfaces with DPDK
SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hal.c
SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hif_lib.c
SRCS-$(CONFIG_RTE_LIBRTE_PFE_PMD) += pfe_hif.c
LDLIBS += -lrte_bus_vdev
LDLIBS += -lrte_bus_dpaa

View File

@ -311,6 +311,70 @@ void bmu_set_config(void *base, struct BMU_CFG *cfg);
enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
#endif
void gemac_init(void *base, void *config);
void gemac_disable_rx_checksum_offload(void *base);
void gemac_enable_rx_checksum_offload(void *base);
void gemac_set_mdc_div(void *base, int mdc_div);
void gemac_set_speed(void *base, enum mac_speed gem_speed);
void gemac_set_duplex(void *base, int duplex);
void gemac_set_mode(void *base, int mode);
void gemac_enable(void *base);
void gemac_tx_disable(void *base);
void gemac_tx_enable(void *base);
void gemac_disable(void *base);
void gemac_reset(void *base);
void gemac_set_address(void *base, struct spec_addr *addr);
struct spec_addr gemac_get_address(void *base);
void gemac_set_loop(void *base, enum mac_loop gem_loop);
void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
unsigned int entry_index);
void gemac_clear_laddr1(void *base);
void gemac_clear_laddr2(void *base);
void gemac_clear_laddr3(void *base);
void gemac_clear_laddr4(void *base);
void gemac_clear_laddrN(void *base, unsigned int entry_index);
struct pfe_mac_addr gemac_get_hash(void *base);
void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
struct pfe_mac_addr gem_get_laddr1(void *base);
struct pfe_mac_addr gem_get_laddr2(void *base);
struct pfe_mac_addr gem_get_laddr3(void *base);
struct pfe_mac_addr gem_get_laddr4(void *base);
struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
void gemac_set_config(void *base, struct gemac_cfg *cfg);
void gemac_allow_broadcast(void *base);
void gemac_no_broadcast(void *base);
void gemac_enable_1536_rx(void *base);
void gemac_disable_1536_rx(void *base);
int gemac_set_rx(void *base, int mtu);
void gemac_enable_rx_jmb(void *base);
void gemac_disable_rx_jmb(void *base);
void gemac_enable_stacked_vlan(void *base);
void gemac_disable_stacked_vlan(void *base);
void gemac_enable_pause_rx(void *base);
void gemac_disable_pause_rx(void *base);
void gemac_enable_pause_tx(void *base);
void gemac_disable_pause_tx(void *base);
void gemac_enable_copy_all(void *base);
void gemac_disable_copy_all(void *base);
void gemac_set_bus_width(void *base, int width);
void gemac_set_wol(void *base, u32 wol_conf);
void gpi_init(void *base, struct gpi_cfg *cfg);
void gpi_reset(void *base);
void gpi_enable(void *base);
void gpi_disable(void *base);
void gpi_set_config(void *base, struct gpi_cfg *cfg);
void hif_init(void);
void hif_tx_enable(void);
void hif_tx_disable(void);
void hif_rx_enable(void);
void hif_rx_disable(void);
/* Get Chip Revision level
*
*/
@ -336,4 +400,23 @@ static inline void hif_tx_dma_start(void)
writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
}
static inline void *pfe_mem_ptov(phys_addr_t paddr)
{
return rte_mem_iova2virt(paddr);
}
static phys_addr_t pfe_mem_vtop(uint64_t vaddr) __attribute__((unused));
static inline phys_addr_t pfe_mem_vtop(uint64_t vaddr)
{
const struct rte_memseg *memseg;
memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
if (memseg)
return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
return (size_t)NULL;
}
#endif /* _PFE_H_ */

View File

@ -6,4 +6,18 @@ if host_machine.system() != 'linux'
endif
deps += ['bus_dpaa']
sources = files('pfe_ethdev.c')
sources = files('pfe_ethdev.c',
'pfe_hal.c',
'pfe_hif_lib.c',
'pfe_hif.c')
if cc.has_argument('-Wno-pointer-arith')
cflags += '-Wno-pointer-arith'
endif
# Driver uses below experimental APIs
# rte_mem_iova2virt
# rte_mem_virt2memseg
allow_experimental_apis = true
includes += include_directories('base')

View File

@ -17,6 +17,8 @@ extern unsigned int pfe_svr;
#define SVR_LS1012A_REV2 0x87040020
#define SVR_LS1012A_REV1 0x87040010
#define PFE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
#define MAX_MTU_ON_REV1 1878
struct ls1012a_eth_platform_data {
/* device specific information */
u32 device_flags;

View File

@ -23,9 +23,32 @@ static struct pfe *g_pfe;
* information from HW.
*/
unsigned int pfe_svr = SVR_LS1012A_REV1;
static void *cbus_emac_base[3];
static void *cbus_gpi_base[3];
int pfe_logtype_pmd;
/* pfe_gemac_init
*/
static int
pfe_gemac_init(struct pfe_eth_priv_s *priv)
{
struct gemac_cfg cfg;
cfg.speed = SPEED_1000M;
cfg.duplex = DUPLEX_FULL;
gemac_set_config(priv->EMAC_baseaddr, &cfg);
gemac_allow_broadcast(priv->EMAC_baseaddr);
gemac_enable_1536_rx(priv->EMAC_baseaddr);
gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
gemac_enable_pause_rx(priv->EMAC_baseaddr);
gemac_set_bus_width(priv->EMAC_baseaddr, 64);
gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
return 0;
}
static void
pfe_soc_version_get(void)
{
@ -100,18 +123,44 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
{
struct rte_eth_dev *eth_dev = NULL;
struct pfe_eth_priv_s *priv = NULL;
struct ls1012a_eth_platform_data *einfo;
struct ls1012a_pfe_platform_data *pfe_info;
int err;
eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
if (eth_dev == NULL)
return -ENOMEM;
/* Extract pltform data */
pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
if (!pfe_info) {
PFE_PMD_ERR("pfe missing additional platform data");
err = -ENODEV;
goto err0;
}
einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
/* einfo never be NULL, but no harm in having this check */
if (!einfo) {
PFE_PMD_ERR("pfe missing additional gemacs platform data");
err = -ENODEV;
goto err0;
}
priv = eth_dev->data->dev_private;
priv->ndev = eth_dev;
priv->id = einfo[id].gem_id;
priv->pfe = pfe;
pfe->eth.eth_priv[id] = priv;
/* Set the info in the priv to the current info */
priv->einfo = &einfo[id];
priv->EMAC_baseaddr = cbus_emac_base[id];
priv->PHY_baseaddr = cbus_emac_base[id];
priv->GPI_baseaddr = cbus_gpi_base[id];
#define HIF_GEMAC_TMUQ_BASE 6
priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
priv->high_tmu_q = priv->low_tmu_q + 1;
@ -129,6 +178,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
}
eth_dev->data->mtu = 1500;
pfe_gemac_init(priv);
eth_dev->data->nb_rx_queues = 1;
eth_dev->data->nb_tx_queues = 1;
@ -146,6 +196,58 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
return err;
}
static int
pfe_get_gemac_if_proprties(struct pfe *pfe,
__rte_unused const struct device_node *parent,
unsigned int port, unsigned int if_cnt,
struct ls1012a_pfe_platform_data *pdata)
{
const struct device_node *gem = NULL;
size_t size;
unsigned int ii = 0, phy_id = 0;
const u32 *addr;
const void *mac_addr;
for (ii = 0; ii < if_cnt; ii++) {
gem = of_get_next_child(parent, gem);
if (!gem)
goto err;
addr = of_get_property(gem, "reg", &size);
if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
break;
}
if (ii >= if_cnt) {
PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
goto err;
}
pdata->ls1012a_eth_pdata[port].gem_id = port;
mac_addr = of_get_mac_address(gem);
if (mac_addr) {
memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
ETH_ALEN);
}
addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
if (!addr) {
PFE_PMD_ERR("Invalid mdio-mux-val....");
} else {
phy_id = rte_be_to_cpu_32((unsigned int)*addr);
pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
}
if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
pdata->ls1012a_eth_pdata[port].mdio_muxval;
return 0;
err:
return -1;
}
/* Parse integer from integer argument */
static int
parse_integer_arg(const char *key __rte_unused,
@ -204,7 +306,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev)
const uint32_t *addr;
uint64_t cbus_addr, ddr_size, cbus_size;
int rc = -1, fd = -1, gem_id;
unsigned int interface_count = 0;
unsigned int ii, interface_count = 0;
size_t size = 0;
struct pfe_vdev_init_params init_params = {
.gem_id = -1
@ -268,6 +370,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev)
goto err;
}
g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
g_pfe->ddr_size = ddr_size;
g_pfe->cbus_size = cbus_size;
@ -299,6 +402,42 @@ pmd_pfe_probe(struct rte_vdev_device *vdev)
PFE_PMD_INFO("num interfaces = %d ", interface_count);
g_pfe->max_intf = interface_count;
g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
for (ii = 0; ii < interface_count; ii++) {
pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
&g_pfe->platform_data);
}
pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
cbus_emac_base[0] = EMAC1_BASE_ADDR;
cbus_emac_base[1] = EMAC2_BASE_ADDR;
cbus_gpi_base[0] = EGPI1_BASE_ADDR;
cbus_gpi_base[1] = EGPI2_BASE_ADDR;
rc = pfe_hif_lib_init(g_pfe);
if (rc < 0)
goto err_hif_lib;
rc = pfe_hif_init(g_pfe);
if (rc < 0)
goto err_hif;
pfe_soc_version_get();
eth_init:
if (init_params.gem_id < 0)
@ -318,6 +457,12 @@ pmd_pfe_probe(struct rte_vdev_device *vdev)
return 0;
err_eth:
pfe_hif_exit(g_pfe);
err_hif:
pfe_hif_lib_exit(g_pfe);
err_hif_lib:
err_prop:
munmap(g_pfe->cbus_baseaddr, cbus_size);
err:
@ -347,6 +492,12 @@ pmd_pfe_remove(struct rte_vdev_device *vdev)
pfe_eth_exit(eth_dev, g_pfe);
munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
if (g_pfe->nb_devs == 0) {
pfe_hif_exit(g_pfe);
pfe_hif_lib_exit(g_pfe);
rte_free(g_pfe);
g_pfe = NULL;
}
return 0;
}

629
drivers/net/pfe/pfe_hal.c Normal file
View File

@ -0,0 +1,629 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2019 NXP
*/
#include "pfe_logs.h"
#include "pfe_mod.h"
#define PFE_MTU_RESET_MASK 0xC000FFFF
void *cbus_base_addr;
void *ddr_base_addr;
unsigned long ddr_phys_base_addr;
unsigned int ddr_size;
static struct pe_info pe[MAX_PE];
/* Initializes the PFE library.
* Must be called before using any of the library functions.
*
* @param[in] cbus_base CBUS virtual base address (as mapped in
* the host CPU address space)
* @param[in] ddr_base PFE DDR range virtual base address (as
* mapped in the host CPU address space)
* @param[in] ddr_phys_base PFE DDR range physical base address (as
* mapped in platform)
* @param[in] size PFE DDR range size (as defined by the host
* software)
*/
void
pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
unsigned int size)
{
cbus_base_addr = cbus_base;
ddr_base_addr = ddr_base;
ddr_phys_base_addr = ddr_phys_base;
ddr_size = size;
pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
#if !defined(CONFIG_FSL_PFE_UTIL_DISABLED)
pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
#endif
}
/**************************** MTIP GEMAC ***************************/
/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
* TCP or UDP checksums are discarded
*
* @param[in] base GEMAC base address.
*/
void
gemac_enable_rx_checksum_offload(__rte_unused void *base)
{
/*Do not find configuration to do this */
}
/* Disable Rx Checksum Engine.
*
* @param[in] base GEMAC base address.
*/
void
gemac_disable_rx_checksum_offload(__rte_unused void *base)
{
/*Do not find configuration to do this */
}
/* GEMAC set speed.
* @param[in] base GEMAC base address
* @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
*/
void
gemac_set_speed(void *base, enum mac_speed gem_speed)
{
u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
switch (gem_speed) {
case SPEED_10M:
rcr |= EMAC_RCNTRL_RMII_10T;
break;
case SPEED_1000M:
ecr |= EMAC_ECNTRL_SPEED;
break;
case SPEED_100M:
default:
/*It is in 100M mode */
break;
}
writel(ecr, (base + EMAC_ECNTRL_REG));
writel(rcr, (base + EMAC_RCNTRL_REG));
}
/* GEMAC set duplex.
* @param[in] base GEMAC base address
* @param[in] duplex GEMAC duplex mode (Full, Half)
*/
void
gemac_set_duplex(void *base, int duplex)
{
if (duplex == DUPLEX_HALF) {
writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
+ EMAC_TCNTRL_REG);
writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
+ EMAC_RCNTRL_REG));
} else {
writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
+ EMAC_TCNTRL_REG);
writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
+ EMAC_RCNTRL_REG));
}
}
/* GEMAC set mode.
* @param[in] base GEMAC base address
* @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
*/
void
gemac_set_mode(void *base, __rte_unused int mode)
{
u32 val = readl(base + EMAC_RCNTRL_REG);
/*Remove loopbank*/
val &= ~EMAC_RCNTRL_LOOP;
/*Enable flow control and MII mode*/
val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
writel(val, base + EMAC_RCNTRL_REG);
}
/* GEMAC enable function.
* @param[in] base GEMAC base address
*/
void
gemac_enable(void *base)
{
writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
EMAC_ECNTRL_REG);
}
/* GEMAC disable function.
* @param[in] base GEMAC base address
*/
void
gemac_disable(void *base)
{
writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
EMAC_ECNTRL_REG);
}
/* GEMAC TX disable function.
* @param[in] base GEMAC base address
*/
void
gemac_tx_disable(void *base)
{
writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
EMAC_TCNTRL_REG);
}
void
gemac_tx_enable(void *base)
{
writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
EMAC_TCNTRL_REG);
}
/* Sets the hash register of the MAC.
* This register is used for matching unicast and multicast frames.
*
* @param[in] base GEMAC base address.
* @param[in] hash 64-bit hash to be configured.
*/
void
gemac_set_hash(void *base, struct pfe_mac_addr *hash)
{
writel(hash->bottom, base + EMAC_GALR);
writel(hash->top, base + EMAC_GAUR);
}
void
gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
unsigned int entry_index)
{
if (entry_index < 1 || entry_index > EMAC_SPEC_ADDR_MAX)
return;
entry_index = entry_index - 1;
if (entry_index < 1) {
writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
writel((htonl(address->top) | 0x8808), base +
EMAC_PHY_ADDR_HIGH);
} else {
writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
+ EMAC_SMAC_0_0);
writel((htonl(address->top) | 0x8808), base + ((entry_index -
1) * 8) + EMAC_SMAC_0_1);
}
}
void
gemac_clear_laddrN(void *base, unsigned int entry_index)
{
if (entry_index < 1 || entry_index > EMAC_SPEC_ADDR_MAX)
return;
entry_index = entry_index - 1;
if (entry_index < 1) {
writel(0, base + EMAC_PHY_ADDR_LOW);
writel(0, base + EMAC_PHY_ADDR_HIGH);
} else {
writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
}
}
/* Set the loopback mode of the MAC. This can be either no loopback for
* normal operation, local loopback through MAC internal loopback module or PHY
* loopback for external loopback through a PHY. This asserts the external
* loop pin.
*
* @param[in] base GEMAC base address.
* @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
* Loopback,
* LB_EXT - PHY Loopback.
*/
void
gemac_set_loop(void *base, __rte_unused enum mac_loop gem_loop)
{
pr_info("%s()\n", __func__);
writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
EMAC_RCNTRL_REG));
}
/* GEMAC allow frames
* @param[in] base GEMAC base address
*/
void
gemac_enable_copy_all(void *base)
{
writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
EMAC_RCNTRL_REG));
}
/* GEMAC do not allow frames
* @param[in] base GEMAC base address
*/
void
gemac_disable_copy_all(void *base)
{
writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
EMAC_RCNTRL_REG));
}
/* GEMAC allow broadcast function.
* @param[in] base GEMAC base address
*/
void
gemac_allow_broadcast(void *base)
{
writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
EMAC_RCNTRL_REG);
}
/* GEMAC no broadcast function.
* @param[in] base GEMAC base address
*/
void
gemac_no_broadcast(void *base)
{
writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
EMAC_RCNTRL_REG);
}
/* GEMAC enable 1536 rx function.
* @param[in] base GEMAC base address
*/
void
gemac_enable_1536_rx(void *base)
{
/* Set 1536 as Maximum frame length */
writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK)
| (1536 << 16),
base + EMAC_RCNTRL_REG);
}
/* GEMAC set Max rx function.
* @param[in] base GEMAC base address
*/
int
gemac_set_rx(void *base, int mtu)
{
if (mtu < HIF_RX_PKT_MIN_SIZE || mtu > JUMBO_FRAME_SIZE) {
PFE_PMD_ERR("Invalid or not support MTU size");
return -1;
}
if (pfe_svr == SVR_LS1012A_REV1 &&
mtu > (MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD)) {
PFE_PMD_ERR("Max supported MTU on Rev1 is %d", MAX_MTU_ON_REV1);
return -1;
}
writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK)
| (mtu << 16),
base + EMAC_RCNTRL_REG);
return 0;
}
/* GEMAC enable jumbo function.
* @param[in] base GEMAC base address
*/
void
gemac_enable_rx_jmb(void *base)
{
if (pfe_svr == SVR_LS1012A_REV1) {
PFE_PMD_ERR("Jumbo not supported on Rev1");
return;
}
writel((readl(base + EMAC_RCNTRL_REG) & PFE_MTU_RESET_MASK) |
(JUMBO_FRAME_SIZE << 16), base + EMAC_RCNTRL_REG);
}
/* GEMAC enable stacked vlan function.
* @param[in] base GEMAC base address
*/
void
gemac_enable_stacked_vlan(__rte_unused void *base)
{
/* MTIP doesn't support stacked vlan */
}
/* GEMAC enable pause rx function.
* @param[in] base GEMAC base address
*/
void
gemac_enable_pause_rx(void *base)
{
writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
base + EMAC_RCNTRL_REG);
}
/* GEMAC disable pause rx function.
* @param[in] base GEMAC base address
*/
void
gemac_disable_pause_rx(void *base)
{
writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
base + EMAC_RCNTRL_REG);
}
/* GEMAC enable pause tx function.
* @param[in] base GEMAC base address
*/
void
gemac_enable_pause_tx(void *base)
{
writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
}
/* GEMAC disable pause tx function.
* @param[in] base GEMAC base address
*/
void
gemac_disable_pause_tx(void *base)
{
writel(0x0, base + EMAC_RX_SECTION_EMPTY);
}
/* GEMAC wol configuration
* @param[in] base GEMAC base address
* @param[in] wol_conf WoL register configuration
*/
void
gemac_set_wol(void *base, u32 wol_conf)
{
u32 val = readl(base + EMAC_ECNTRL_REG);
if (wol_conf)
val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
else
val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
writel(val, base + EMAC_ECNTRL_REG);
}
/* Sets Gemac bus width to 64bit
* @param[in] base GEMAC base address
* @param[in] width gemac bus width to be set possible values are 32/64/128
*/
void
gemac_set_bus_width(__rte_unused void *base, __rte_unused int width)
{
}
/* Sets Gemac configuration.
* @param[in] base GEMAC base address
* @param[in] cfg GEMAC configuration
*/
void
gemac_set_config(void *base, struct gemac_cfg *cfg)
{
/*GEMAC config taken from VLSI */
writel(0x00000004, base + EMAC_TFWR_STR_FWD);
writel(0x00000005, base + EMAC_RX_SECTION_FULL);
if (pfe_svr == SVR_LS1012A_REV1)
writel(0x00000768, base + EMAC_TRUNC_FL);
else
writel(0x00003fff, base + EMAC_TRUNC_FL);
writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
gemac_set_mode(base, cfg->mode);
gemac_set_speed(base, cfg->speed);
gemac_set_duplex(base, cfg->duplex);
}
/**************************** GPI ***************************/
/* Initializes a GPI block.
* @param[in] base GPI base address
* @param[in] cfg GPI configuration
*/
void
gpi_init(void *base, struct gpi_cfg *cfg)
{
gpi_reset(base);
gpi_disable(base);
gpi_set_config(base, cfg);
}
/* Resets a GPI block.
* @param[in] base GPI base address
*/
void
gpi_reset(void *base)
{
writel(CORE_SW_RESET, base + GPI_CTRL);
}
/* Enables a GPI block.
* @param[in] base GPI base address
*/
void
gpi_enable(void *base)
{
writel(CORE_ENABLE, base + GPI_CTRL);
}
/* Disables a GPI block.
* @param[in] base GPI base address
*/
void
gpi_disable(void *base)
{
writel(CORE_DISABLE, base + GPI_CTRL);
}
/* Sets the configuration of a GPI block.
* @param[in] base GPI base address
* @param[in] cfg GPI configuration
*/
void
gpi_set_config(void *base, struct gpi_cfg *cfg)
{
writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
+ GPI_LMEM_ALLOC_ADDR);
writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
+ GPI_LMEM_FREE_ADDR);
writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
+ GPI_DDR_ALLOC_ADDR);
writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
+ GPI_DDR_FREE_ADDR);
writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
writel(1, base + GPI_TOE_CHKSUM_EN);
if (cfg->mtip_pause_reg) {
writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
}
}
/**************************** HIF ***************************/
/* Initializes HIF copy block.
*
*/
void
hif_init(void)
{
/*Initialize HIF registers*/
writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
HIF_POLL_CTRL);
}
/* Enable hif tx DMA and interrupt
*
*/
void
hif_tx_enable(void)
{
writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
HIF_INT_ENABLE);
}
/* Disable hif tx DMA and interrupt
*
*/
void
hif_tx_disable(void)
{
u32 hif_int;
writel(0, HIF_TX_CTRL);
hif_int = readl(HIF_INT_ENABLE);
hif_int &= HIF_TXPKT_INT_EN;
writel(hif_int, HIF_INT_ENABLE);
}
/* Enable hif rx DMA and interrupt
*
*/
void
hif_rx_enable(void)
{
hif_rx_dma_start();
writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
HIF_INT_ENABLE);
}
/* Disable hif rx DMA and interrupt
*
*/
void
hif_rx_disable(void)
{
u32 hif_int;
writel(0, HIF_RX_CTRL);
hif_int = readl(HIF_INT_ENABLE);
hif_int &= HIF_RXPKT_INT_EN;
writel(hif_int, HIF_INT_ENABLE);
}

256
drivers/net/pfe/pfe_hif.c Normal file
View File

@ -0,0 +1,256 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2019 NXP
*/
#include "pfe_logs.h"
#include "pfe_mod.h"
#include <sys/ioctl.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
static int
pfe_hif_alloc_descr(struct pfe_hif *hif)
{
void *addr;
int err = 0;
PMD_INIT_FUNC_TRACE();
addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
if (!addr) {
PFE_PMD_ERR("Could not allocate buffer descriptors!");
err = -ENOMEM;
goto err0;
}
hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
hif->descr_baseaddr_v = addr;
hif->rx_ring_size = HIF_RX_DESC_NT;
hif->tx_ring_size = HIF_TX_DESC_NT;
return 0;
err0:
return err;
}
static void
pfe_hif_free_descr(struct pfe_hif *hif)
{
PMD_INIT_FUNC_TRACE();
rte_free(hif->descr_baseaddr_v);
}
#if defined(LS1012A_PFE_RESET_WA)
static void
pfe_hif_disable_rx_desc(struct pfe_hif *hif)
{
u32 ii;
struct hif_desc *desc = hif->rx_base;
/*Mark all descriptors as LAST_BD */
for (ii = 0; ii < hif->rx_ring_size; ii++) {
desc->ctrl |= BD_CTRL_LAST_BD;
desc++;
}
}
struct class_rx_hdr_t {
u32 next_ptr; /* ptr to the start of the first DDR buffer */
u16 length; /* total packet length */
u16 phyno; /* input physical port number */
u32 status; /* gemac status bits */
u32 status2; /* reserved for software usage */
};
/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
* except overflow
*/
#define STATUS_BAD_FRAME_ERR BIT(16)
#define STATUS_LENGTH_ERR BIT(17)
#define STATUS_CRC_ERR BIT(18)
#define STATUS_TOO_SHORT_ERR BIT(19)
#define STATUS_TOO_LONG_ERR BIT(20)
#define STATUS_CODE_ERR BIT(21)
#define STATUS_MC_HASH_MATCH BIT(22)
#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
#define STATUS_UNICAST_HASH_MATCH BIT(24)
#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
#define MIN_PKT_SIZE 64
#define DUMMY_PKT_COUNT 128
static inline void
copy_to_lmem(u32 *dst, u32 *src, int len)
{
int i;
for (i = 0; i < len; i += sizeof(u32)) {
*dst = htonl(*src);
dst++; src++;
}
}
#if defined(RTE_TOOLCHAIN_GCC)
__attribute__ ((optimize(1)))
#endif
static void
send_dummy_pkt_to_hif(void)
{
void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
u64 physaddr;
struct class_rx_hdr_t local_hdr;
static u32 dummy_pkt[] = {
0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
if (!ddr_ptr)
return;
lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
if (!lmem_ptr)
return;
PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
local_hdr.phyno = htons(0); /* RX_PHY_0 */
local_hdr.length = htons(MIN_PKT_SIZE);
local_hdr.next_ptr = htonl((u32)physaddr);
/*Mark checksum is correct */
local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
STATUS_UDP_CHECKSUM_CORRECT |
STATUS_TCP_CHECKSUM_CORRECT |
STATUS_UNICAST_HASH_MATCH |
STATUS_CUMULATIVE_ARC_HIT));
copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
sizeof(local_hdr));
copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
0x40);
writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
}
void
pfe_hif_rx_idle(struct pfe_hif *hif)
{
int hif_stop_loop = DUMMY_PKT_COUNT;
u32 rx_status;
pfe_hif_disable_rx_desc(hif);
PFE_PMD_INFO("Bringing hif to idle state...");
writel(0, HIF_INT_ENABLE);
/*If HIF Rx BDP is busy send a dummy packet */
do {
rx_status = readl(HIF_RX_STATUS);
if (rx_status & BDP_CSR_RX_DMA_ACTV)
send_dummy_pkt_to_hif();
sleep(1);
} while (--hif_stop_loop);
if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
PFE_PMD_ERR("Failed\n");
else
PFE_PMD_INFO("Done\n");
}
#endif
/*
* pfe_hif_init
* This function initializes the baseaddresses and irq, etc.
*/
int
pfe_hif_init(struct pfe *pfe)
{
struct pfe_hif *hif = &pfe->hif;
int err;
PMD_INIT_FUNC_TRACE();
#if defined(LS1012A_PFE_RESET_WA)
pfe_hif_rx_idle(hif);
#endif
err = pfe_hif_alloc_descr(hif);
if (err)
goto err0;
rte_spinlock_init(&hif->tx_lock);
rte_spinlock_init(&hif->lock);
gpi_enable(HGPI_BASE_ADDR);
if (getenv("PFE_INTR_SUPPORT")) {
struct epoll_event epoll_ev;
int event_fd = -1, epoll_fd, pfe_cdev_fd;
pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
if (pfe_cdev_fd < 0) {
PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
PFE_CDEV_PATH);
pfe->cdev_fd = PFE_CDEV_INVALID_FD;
return -1;
}
pfe->cdev_fd = pfe_cdev_fd;
event_fd = eventfd(0, EFD_NONBLOCK);
/* hif interrupt enable */
err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
if (err) {
PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n",
errno);
goto err0;
}
epoll_fd = epoll_create(1);
epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
epoll_ev.data.fd = event_fd;
err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
if (err < 0) {
PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno);
goto err0;
}
pfe->hif.epoll_fd = epoll_fd;
}
return 0;
err0:
return err;
}
/* pfe_hif_exit- */
void
pfe_hif_exit(struct pfe *pfe)
{
struct pfe_hif *hif = &pfe->hif;
PMD_INIT_FUNC_TRACE();
rte_spinlock_lock(&hif->lock);
hif->shm->g_client_status[0] = 0;
/* Make sure all clients are disabled*/
hif->shm->g_client_status[1] = 0;
rte_spinlock_unlock(&hif->lock);
if (hif->setuped) {
#if defined(LS1012A_PFE_RESET_WA)
pfe_hif_rx_idle(hif);
#endif
/*Disable Rx/Tx */
hif_rx_disable();
hif_tx_disable();
pfe_hif_free_descr(hif);
pfe->hif.setuped = 0;
}
gpi_disable(HGPI_BASE_ADDR);
}

106
drivers/net/pfe/pfe_hif.h Normal file
View File

@ -0,0 +1,106 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2019 NXP
*/
#ifndef _PFE_HIF_H_
#define _PFE_HIF_H_
#define HIF_CLIENT_QUEUES_MAX 16
#define HIF_RX_PKT_MIN_SIZE RTE_CACHE_LINE_SIZE
/*
* HIF_TX_DESC_NT value should be always greter than 4,
* Otherwise HIF_TX_POLL_MARK will become zero.
*/
#define HIF_RX_DESC_NT 64
#define HIF_TX_DESC_NT 2048
enum {
PFE_CL_GEM0 = 0,
PFE_CL_GEM1,
HIF_CLIENTS_MAX
};
/*structure to store client queue info */
struct hif_rx_queue {
struct rx_queue_desc *base;
u32 size;
u32 write_idx;
};
struct hif_tx_queue {
struct tx_queue_desc *base;
u32 size;
u32 ack_idx;
};
/*Structure to store the client info */
struct hif_client {
unsigned int rx_qn;
struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
unsigned int tx_qn;
struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
};
/*HIF hardware buffer descriptor */
struct hif_desc {
u32 ctrl;
u32 status;
u32 data;
u32 next;
};
struct __hif_desc {
u32 ctrl;
u32 status;
u32 data;
};
struct hif_desc_sw {
dma_addr_t data;
u16 len;
u8 client_id;
u8 q_no;
u16 flags;
};
struct pfe_hif {
/* To store registered clients in hif layer */
struct hif_client client[HIF_CLIENTS_MAX];
struct hif_shm *shm;
void *descr_baseaddr_v;
unsigned long descr_baseaddr_p;
struct hif_desc *rx_base;
u32 rx_ring_size;
u32 rxtoclean_index;
void *rx_buf_addr[HIF_RX_DESC_NT];
void *rx_buf_vaddr[HIF_RX_DESC_NT];
int rx_buf_len[HIF_RX_DESC_NT];
unsigned int qno;
unsigned int client_id;
unsigned int client_ctrl;
unsigned int started;
unsigned int setuped;
struct hif_desc *tx_base;
u32 tx_ring_size;
u32 txtosend;
u32 txtoclean;
u32 txavail;
u32 txtoflush;
struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
int32_t epoll_fd; /**< File descriptor created for interrupt polling */
/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
rte_spinlock_t tx_lock;
/* lock synchronizes hif rx queue processing */
rte_spinlock_t lock;
struct rte_device *dev;
};
int pfe_hif_init(struct pfe *pfe);
void pfe_hif_exit(struct pfe *pfe);
void pfe_hif_rx_idle(struct pfe_hif *hif);
#endif /* _PFE_HIF_H_ */

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2019 NXP
*/
#include "pfe_logs.h"
#include "pfe_mod.h"
int
pfe_hif_lib_init(__rte_unused struct pfe *pfe)
{
PMD_INIT_FUNC_TRACE();
return 0;
}
void
pfe_hif_lib_exit(__rte_unused struct pfe *pfe)
{
PMD_INIT_FUNC_TRACE();
}

View File

@ -0,0 +1,162 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2019 NXP
*/
#ifndef _PFE_HIF_LIB_H_
#define _PFE_HIF_LIB_H_
#define HIF_CL_REQ_TIMEOUT 10
#define GFP_DMA_PFE 0
enum {
REQUEST_CL_REGISTER = 0,
REQUEST_CL_UNREGISTER,
HIF_REQUEST_MAX
};
enum {
/* Event to indicate that client rx queue is reached water mark level */
EVENT_HIGH_RX_WM = 0,
/* Event to indicate that, packet received for client */
EVENT_RX_PKT_IND,
/* Event to indicate that, packet tx done for client */
EVENT_TXDONE_IND,
HIF_EVENT_MAX
};
/*structure to store client queue info */
/*structure to store client queue info */
struct hif_client_rx_queue {
struct rx_queue_desc *base;
u32 size;
u32 read_idx;
u32 write_idx;
u16 queue_id;
u16 port_id;
void *priv;
};
struct hif_client_tx_queue {
struct tx_queue_desc *base;
u32 size;
u32 read_idx;
u32 write_idx;
u32 tx_pending;
unsigned long jiffies_last_packet;
u32 nocpy_flag;
u32 prev_tmu_tx_pkts;
u32 done_tmu_tx_pkts;
u16 queue_id;
u16 port_id;
void *priv;
};
struct hif_client_s {
int id;
unsigned int tx_qn;
unsigned int rx_qn;
void *rx_qbase;
void *tx_qbase;
int tx_qsize;
int rx_qsize;
int cpu_id;
int port_id;
struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
int (*event_handler)(void *data, int event, int qno);
unsigned long queue_mask[HIF_EVENT_MAX];
struct pfe *pfe;
void *priv;
};
/*
* Client specific shared memory
* It contains number of Rx/Tx queues, base addresses and queue sizes
*/
struct hif_client_shm {
u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
unsigned long rx_qbase; /*Rx queue base address */
u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
unsigned long tx_qbase; /* Tx queue base address */
u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
};
/*Client shared memory ctrl bit description */
#define CLIENT_CTRL_RX_Q_CNT_OFST 0
#define CLIENT_CTRL_TX_Q_CNT_OFST 8
#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
& 0xFF)
#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
& 0xFF)
/*
* Shared memory used to communicate between HIF driver and host/client drivers
* Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
* initialized with host buffers and buffers count in the pool.
* rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
*
*/
struct hif_shm {
u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
/*Rx buffers required to initialize HIF rx descriptors */
struct rte_mempool *pool;
void *rx_buf_pool[HIF_RX_DESC_NT];
unsigned long g_client_status[2]; /*Global client status bit mask */
/* Client specific shared memory */
struct hif_client_shm client[HIF_CLIENTS_MAX];
};
#define CL_DESC_OWN BIT(31)
/* This sets owner ship to HIF driver */
#define CL_DESC_LAST BIT(30)
/* This indicates last packet for multi buffers handling */
#define CL_DESC_FIRST BIT(29)
/* This indicates first packet for multi buffers handling */
#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
struct rx_queue_desc {
void *data;
u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
u32 client_ctrl;
};
struct tx_queue_desc {
void *data;
u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
};
/* HIF Rx is not working properly for 2-byte aligned buffers and
* ip_header should be 4byte aligned for better iperformance.
* "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
* In case HW parse support:
* "ip_header = 64 + 6(hif_header) + 16 (parse) + 14 (MAC Header)" will be
* 4byte aligned.
*/
#define PFE_HIF_SIZE sizeof(struct hif_hdr)
#ifdef RTE_LIBRTE_PFE_SW_PARSE
#define PFE_PKT_HEADER_SZ PFE_HIF_SIZE
#else
#define PFE_PKT_HEADER_SZ (PFE_HIF_SIZE + sizeof(struct pfe_parse))
#endif
#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
+ MAX_L4_HDR_SIZE)
/* Used in page mode to clamp packet size to the maximum supported by the hif
*hw interface (<16KiB)
*/
#define MAX_PFE_PKT_SIZE 16380UL
extern unsigned int emac_txq_cnt;
int pfe_hif_lib_init(struct pfe *pfe);
void pfe_hif_lib_exit(struct pfe *pfe);
#endif /* _PFE_HIF_LIB_H_ */

View File

@ -7,6 +7,9 @@
struct pfe;
#include "pfe.h"
#include "pfe_hif.h"
#include "pfe_hif_lib.h"
#include "pfe_eth.h"
#define PHYID_MAX_VAL 32
@ -42,6 +45,8 @@ struct pfe {
uint64_t ddr_size;
void *cbus_baseaddr;
uint64_t cbus_size;
struct ls1012a_pfe_platform_data platform_data;
struct pfe_hif hif;
struct pfe_eth eth;
int mdio_muxval[PHYID_MAX_VAL];
uint8_t nb_devs;