cgem: add 64-bit support

Add 64-bit address support to Cadence CGEM Ethernet driver for use in
other SoCs such as the Zynq UltraScale+ and SiFive HighFive Unleashed.

Reviewed by:	philip, 0mp (manpages)
Differential Revision: https://reviews.freebsd.org/D24304
This commit is contained in:
Thomas Skibo 2021-01-10 16:18:41 -04:00 committed by Mitchell Horne
parent 5a181b8bce
commit facdd1cd20
10 changed files with 323 additions and 126 deletions

View File

@ -106,6 +106,7 @@ MAN= aac.4 \
cdceem.4 \ cdceem.4 \
cfi.4 \ cfi.4 \
cfumass.4 \ cfumass.4 \
${_cgem.4} \
ch.4 \ ch.4 \
chromebook_platform.4 \ chromebook_platform.4 \
${_chvgpio.4} \ ${_chvgpio.4} \
@ -903,6 +904,12 @@ _virtio_scsi.4= virtio_scsi.4
_vtnet.4= vtnet.4 _vtnet.4= vtnet.4
.endif .endif
.if ${MACHINE_CPUARCH} == "arm" || ${MACHINE_CPUARCH} == "aarch64" || \
${MACHINE_CPUARCH} == "riscv"
_cgem.4= cgem.4
MLINKS+=cgem.4 if_cgem.4
.endif
.if empty(MAN_ARCH) .if empty(MAN_ARCH)
__arches= ${MACHINE} ${MACHINE_ARCH} ${MACHINE_CPUARCH} __arches= ${MACHINE} ${MACHINE_ARCH} ${MACHINE_CPUARCH}
.elif ${MAN_ARCH} == "all" .elif ${MAN_ARCH} == "all"

View File

@ -24,7 +24,7 @@
.\" .\"
.\" $FreeBSD$ .\" $FreeBSD$
.\" .\"
.Dd August 26, 2014 .Dd January 10, 2021
.Dt CGEM 4 .Dt CGEM 4
.Os .Os
.Sh NAME .Sh NAME
@ -44,7 +44,8 @@ The
.Nm .Nm
driver provides support for the Cadence GEM (Gigabit Ethernet MAC). driver provides support for the Cadence GEM (Gigabit Ethernet MAC).
The Cadence GEM is used in some SoC (System on a Chip) devices such as The Cadence GEM is used in some SoC (System on a Chip) devices such as
the Xilinx Zynq-7000 and the Atmel SAMA5D3. the Xilinx Zynq-7000, the Xilinx Zynq UltraScale+, and the SiFive
HiFive Unleashed.
.Pp .Pp
The The
.Nm .Nm
@ -284,7 +285,7 @@ There are
variables that count variables that count
packets discarded by the hardware (see below). packets discarded by the hardware (see below).
.Pp .Pp
The GEM used in the Zynq-7000 has a bug such that the receiver can The GEM used in the Zynq-7000 has a bug such that the receiver can
potentially freeze up under a high load. potentially freeze up under a high load.
The issue is described in sec. 16.7 The issue is described in sec. 16.7
"Known Issues" of the Zynq-7000 SoC Technical Reference Manual (Xilinx "Known Issues" of the Zynq-7000 SoC Technical Reference Manual (Xilinx
@ -292,7 +293,10 @@ UG585 v1.7).
The The
.Nm .Nm
driver implements the work-around suggested in the manual. driver implements the work-around suggested in the manual.
If the bug does not exist in other versions of this device, the It is believed that the bug does not exist in the Zynq UltraScale+ and
work-around can be disabled by setting the dev.cgem.%d.rxhangwar SiFive SoCs so the work-around is disabled in those instances and enabled
in all others.
The work-around can be disabled by setting the
.Va dev.cgem.%d.rxhangwar
.Xr sysctl 8 .Xr sysctl 8
variable to 0. variable to 0.

View File

@ -8,7 +8,6 @@ MAN= \
aw_spi.4 \ aw_spi.4 \
aw_syscon.4 \ aw_syscon.4 \
bcm283x_pwm.4 \ bcm283x_pwm.4 \
cgem.4 \
devcfg.4 \ devcfg.4 \
imx6_ahci.4 \ imx6_ahci.4 \
imx6_snvs.4 \ imx6_snvs.4 \
@ -16,8 +15,7 @@ MAN= \
mge.4 \ mge.4 \
ti_adc.4 ti_adc.4
MLINKS= cgem.4 if_cgem.4 MLINKS= imx_wdog.4 imxwdt.4
MLINKS+= imx_wdog.4 imxwdt.4
MLINKS+= mge.4 if_mge.4 MLINKS+= mge.4 if_mge.4
MANSUBDIR=/arm MANSUBDIR=/arm

View File

@ -228,8 +228,8 @@ device e6000sw
device miibus device miibus
device awg # 10/100/1000 integrated EMAC controller device awg # 10/100/1000 integrated EMAC controller
device cgem # Cadence GEM Gigabit Ethernet device
device cpsw # TI Common Platform Ethernet Switch (CPSW) device cpsw # TI Common Platform Ethernet Switch (CPSW)
device cgem # Zynq-7000 gig ethernet device
device dwc # 10/100/1000 integrated GMAC controller device dwc # 10/100/1000 integrated GMAC controller
device emac # 10/100 integrated EMAC controller device emac # 10/100 integrated EMAC controller
device ffec # Freescale Fast Ethernet Controller device ffec # Freescale Fast Ethernet Controller

View File

@ -49,7 +49,7 @@ device mpcore_timer
device loop device loop
device ether device ether
device cgem # Zynq-7000 gig ethernet device device cgem # Cadence GEM Gigabit Ethernet device
device mii device mii
device e1000phy device e1000phy
device rgephy # Zybo uses Realtek RTL8211E device rgephy # Zybo uses Realtek RTL8211E

View File

@ -278,6 +278,7 @@ device miibus # MII bus support
device al_eth # Annapurna Alpine Ethernet NIC device al_eth # Annapurna Alpine Ethernet NIC
device awg # Allwinner EMAC Gigabit Ethernet device awg # Allwinner EMAC Gigabit Ethernet
device axa # AMD Opteron A1100 integrated NIC device axa # AMD Opteron A1100 integrated NIC
device cgem # Cadence GEM Gigabit Ethernet device
device dwc_rk # Rockchip Designware device dwc_rk # Rockchip Designware
device dwc_socfpga # Altera SOCFPGA Ethernet MAC device dwc_socfpga # Altera SOCFPGA Ethernet MAC
device ffec # iMX FFEC device ffec # iMX FFEC

View File

@ -77,6 +77,14 @@ __FBSDID("$FreeBSD$");
#include <dev/mii/mii.h> #include <dev/mii/mii.h>
#include <dev/mii/miivar.h> #include <dev/mii/miivar.h>
#ifdef EXT_RESOURCES
#include <dev/extres/clk/clk.h>
#endif
#if INTPTR_MAX == INT64_MAX
#define CGEM64
#endif
#include <dev/cadence/if_cgem_hw.h> #include <dev/cadence/if_cgem_hw.h>
#include "miibus_if.h" #include "miibus_if.h"
@ -86,9 +94,6 @@ __FBSDID("$FreeBSD$");
#define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */
#define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */
#define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
/* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */
#define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */
@ -97,11 +102,18 @@ __FBSDID("$FreeBSD$");
#define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \
CSUM_TCP_IPV6 | CSUM_UDP_IPV6) CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
#define HWTYPE_GENERIC_GEM 1
#define HWTYPE_ZYNQ 2
#define HWTYPE_ZYNQMP 3
#define HWTYPE_SIFIVE_FU540 4
static struct ofw_compat_data compat_data[] = { static struct ofw_compat_data compat_data[] = {
{ "cadence,gem", 1 }, { "cdns,zynq-gem", HWTYPE_ZYNQ },
{ "cdns,macb", 1 }, { "cdns,zynqmp-gem", HWTYPE_ZYNQMP },
{ "sifive,fu540-c000-gem", 1 }, { "sifive,fu540-c000-gem", HWTYPE_SIFIVE_FU540 },
{ NULL, 0 }, { "cdns,gem", HWTYPE_GENERIC_GEM },
{ "cadence,gem", HWTYPE_GENERIC_GEM },
{ NULL, 0 }
}; };
struct cgem_softc { struct cgem_softc {
@ -116,8 +128,13 @@ struct cgem_softc {
void *intrhand; void *intrhand;
struct callout tick_ch; struct callout tick_ch;
uint32_t net_ctl_shadow; uint32_t net_ctl_shadow;
uint32_t net_cfg_shadow;
#ifdef EXT_RESOURCES
clk_t ref_clk;
#else
int ref_clk_num; int ref_clk_num;
u_char eaddr[6]; #endif
int neednullqs;
bus_dma_tag_t desc_dma_tag; bus_dma_tag_t desc_dma_tag;
bus_dma_tag_t mbuf_dma_tag; bus_dma_tag_t mbuf_dma_tag;
@ -146,12 +163,15 @@ struct cgem_softc {
int txring_hd_ptr; /* where to put next xmits */ int txring_hd_ptr; /* where to put next xmits */
int txring_tl_ptr; /* next xmit mbuf to free */ int txring_tl_ptr; /* next xmit mbuf to free */
int txring_queued; /* num xmits segs queued */ int txring_queued; /* num xmits segs queued */
bus_dmamap_t txring_dma_map;
u_int txfull; /* tx ring full events */ u_int txfull; /* tx ring full events */
u_int txdefrags; /* tx calls to m_defrag() */ u_int txdefrags; /* tx calls to m_defrag() */
u_int txdefragfails; /* tx m_defrag() failures */ u_int txdefragfails; /* tx m_defrag() failures */
u_int txdmamapfails; /* tx dmamap failures */ u_int txdmamapfails; /* tx dmamap failures */
/* null descriptor rings */
void *null_qs;
bus_addr_t null_qs_physaddr;
/* hardware provided statistics */ /* hardware provided statistics */
struct cgem_hw_stats { struct cgem_hw_stats {
uint64_t tx_bytes; uint64_t tx_bytes;
@ -274,9 +294,9 @@ cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
/* /*
* cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash * cgem_mac_hash(): map 48-bit address to a 6-bit hash. The 6-bit hash
* corresponds to a bit in a 64-bit hash register. Setting that bit in the hash * corresponds to a bit in a 64-bit hash register. Setting that bit in the
* register enables reception of all frames with a destination address that * hash register enables reception of all frames with a destination address
* hashes to that 6-bit value. * that hashes to that 6-bit value.
* *
* The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
* Reference Manual. Bits 0-5 in the hash are the exclusive-or of * Reference Manual. Bits 0-5 in the hash are the exclusive-or of
@ -321,18 +341,15 @@ cgem_rx_filter(struct cgem_softc *sc)
{ {
if_t ifp = sc->ifp; if_t ifp = sc->ifp;
uint32_t hashes[2] = { 0, 0 }; uint32_t hashes[2] = { 0, 0 };
uint32_t net_cfg;
net_cfg = RD4(sc, CGEM_NET_CFG); sc->net_cfg_shadow &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL);
if ((if_getflags(ifp) & IFF_PROMISC) != 0) if ((if_getflags(ifp) & IFF_PROMISC) != 0)
net_cfg |= CGEM_NET_CFG_COPY_ALL; sc->net_cfg_shadow |= CGEM_NET_CFG_COPY_ALL;
else { else {
if ((if_getflags(ifp) & IFF_BROADCAST) == 0) if ((if_getflags(ifp) & IFF_BROADCAST) == 0)
net_cfg |= CGEM_NET_CFG_NO_BCAST; sc->net_cfg_shadow |= CGEM_NET_CFG_NO_BCAST;
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
hashes[0] = 0xffffffff; hashes[0] = 0xffffffff;
hashes[1] = 0xffffffff; hashes[1] = 0xffffffff;
@ -340,12 +357,12 @@ cgem_rx_filter(struct cgem_softc *sc)
if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes); if_foreach_llmaddr(ifp, cgem_hash_maddr, hashes);
if (hashes[0] != 0 || hashes[1] != 0) if (hashes[0] != 0 || hashes[1] != 0)
net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN; sc->net_cfg_shadow |= CGEM_NET_CFG_MULTI_HASH_EN;
} }
WR4(sc, CGEM_HASH_TOP, hashes[0]); WR4(sc, CGEM_HASH_TOP, hashes[0]);
WR4(sc, CGEM_HASH_BOT, hashes[1]); WR4(sc, CGEM_HASH_BOT, hashes[1]);
WR4(sc, CGEM_NET_CFG, net_cfg); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
} }
/* For bus_dmamap_load() callback. */ /* For bus_dmamap_load() callback. */
@ -358,40 +375,92 @@ cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
*(bus_addr_t *)arg = segs[0].ds_addr; *(bus_addr_t *)arg = segs[0].ds_addr;
} }
/* Set up null queues for priority queues we actually can't disable. */
static void
cgem_null_qs(struct cgem_softc *sc)
{
struct cgem_rx_desc *rx_desc;
struct cgem_tx_desc *tx_desc;
uint32_t queue_mask;
int n;
/* Read design config register 6 to determine number of queues. */
queue_mask = (RD4(sc, CGEM_DESIGN_CFG6) &
CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK) >> 1;
if (queue_mask == 0)
return;
/* Create empty RX queue and empty TX buf queues. */
memset(sc->null_qs, 0, sizeof(struct cgem_rx_desc) +
sizeof(struct cgem_tx_desc));
rx_desc = sc->null_qs;
rx_desc->addr = CGEM_RXDESC_OWN | CGEM_RXDESC_WRAP;
tx_desc = (struct cgem_tx_desc *)(rx_desc + 1);
tx_desc->ctl = CGEM_TXDESC_USED | CGEM_TXDESC_WRAP;
/* Point all valid ring base pointers to the null queues. */
for (n = 1; (queue_mask & 1) != 0; n++, queue_mask >>= 1) {
WR4(sc, CGEM_RX_QN_BAR(n), sc->null_qs_physaddr);
WR4(sc, CGEM_TX_QN_BAR(n), sc->null_qs_physaddr +
sizeof(struct cgem_rx_desc));
}
}
/* Create DMA'able descriptor rings. */ /* Create DMA'able descriptor rings. */
static int static int
cgem_setup_descs(struct cgem_softc *sc) cgem_setup_descs(struct cgem_softc *sc)
{ {
int i, err; int i, err;
int desc_rings_size = CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc) +
CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
if (sc->neednullqs)
desc_rings_size += sizeof(struct cgem_rx_desc) +
sizeof(struct cgem_tx_desc);
sc->txring = NULL; sc->txring = NULL;
sc->rxring = NULL; sc->rxring = NULL;
/* Allocate non-cached DMA space for RX and TX descriptors. */ /* Allocate non-cached DMA space for RX and TX descriptors. */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, #ifdef CGEM64
MAX_DESC_RING_SIZE, 1, MAX_DESC_RING_SIZE, 0, 1ULL << 32, /* Do not cross a 4G boundary. */
#else
0,
#endif
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
desc_rings_size, 1, desc_rings_size, 0,
busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag); busdma_lock_mutex, &sc->sc_mtx, &sc->desc_dma_tag);
if (err) if (err)
return (err); return (err);
/* Set up a bus_dma_tag for mbufs. */ /* Set up a bus_dma_tag for mbufs. */
err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
MCLBYTES, TX_MAX_DMA_SEGS, MCLBYTES, 0, TX_MAX_DMA_SEGS, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx,
busdma_lock_mutex, &sc->sc_mtx, &sc->mbuf_dma_tag); &sc->mbuf_dma_tag);
if (err) if (err)
return (err); return (err);
/* Allocate DMA memory in non-cacheable space. */ /*
* Allocate DMA memory in non-cacheable space. We allocate transmit,
* receive and null descriptor queues all at once because the
* hardware only provides one register for the upper 32 bits of
* rx and tx descriptor queues hardware addresses.
*/
err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring, err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->rxring,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rxring_dma_map); #ifdef __arm__
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO,
#else
BUS_DMA_NOWAIT | BUS_DMA_NOCACHE | BUS_DMA_ZERO,
#endif
&sc->rxring_dma_map);
if (err) if (err)
return (err); return (err);
/* Load descriptor DMA memory. */ /* Load descriptor DMA memory. */
err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
(void *)sc->rxring, CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), (void *)sc->rxring, desc_rings_size,
cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT); cgem_getaddr, &sc->rxring_physaddr, BUS_DMA_NOWAIT);
if (err) if (err)
return (err); return (err);
@ -409,18 +478,9 @@ cgem_setup_descs(struct cgem_softc *sc)
sc->rxring_tl_ptr = 0; sc->rxring_tl_ptr = 0;
sc->rxring_queued = 0; sc->rxring_queued = 0;
/* Allocate DMA memory for TX descriptors in non-cacheable space. */ sc->txring = (struct cgem_tx_desc *)(sc->rxring + CGEM_NUM_RX_DESCS);
err = bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->txring, sc->txring_physaddr = sc->rxring_physaddr + CGEM_NUM_RX_DESCS *
BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->txring_dma_map); sizeof(struct cgem_rx_desc);
if (err)
return (err);
/* Load TX descriptor DMA memory. */
err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
(void *)sc->txring, CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
cgem_getaddr, &sc->txring_physaddr, BUS_DMA_NOWAIT);
if (err)
return (err);
/* Initialize TX descriptor ring. */ /* Initialize TX descriptor ring. */
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
@ -435,6 +495,14 @@ cgem_setup_descs(struct cgem_softc *sc)
sc->txring_tl_ptr = 0; sc->txring_tl_ptr = 0;
sc->txring_queued = 0; sc->txring_queued = 0;
if (sc->neednullqs) {
sc->null_qs = (void *)(sc->txring + CGEM_NUM_TX_DESCS);
sc->null_qs_physaddr = sc->txring_physaddr +
CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc);
cgem_null_qs(sc);
}
return (0); return (0);
} }
@ -484,6 +552,9 @@ cgem_fill_rqueue(struct cgem_softc *sc)
/* Write rx descriptor and increment head pointer. */ /* Write rx descriptor and increment head pointer. */
sc->rxring[sc->rxring_hd_ptr].ctl = 0; sc->rxring[sc->rxring_hd_ptr].ctl = 0;
#ifdef CGEM64
sc->rxring[sc->rxring_hd_ptr].addrhi = segs[0].ds_addr >> 32;
#endif
if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
CGEM_RXDESC_WRAP; CGEM_RXDESC_WRAP;
@ -509,7 +580,7 @@ cgem_recv(struct cgem_softc *sc)
m_hd = NULL; m_hd = NULL;
m_tl = &m_hd; m_tl = &m_hd;
while (sc->rxring_queued > 0 && while (sc->rxring_queued > 0 &&
(sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
ctl = sc->rxring[sc->rxring_tl_ptr].ctl; ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
/* Grab filled mbuf. */ /* Grab filled mbuf. */
@ -629,9 +700,16 @@ cgem_clean_tx(struct cgem_softc *sc)
/* Check the status. */ /* Check the status. */
if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
/* Serious bus error. log to console. */ /* Serious bus error. log to console. */
#ifdef CGEM64
device_printf(sc->dev,
"cgem_clean_tx: AHB error, addr=0x%x%08x\n",
sc->txring[sc->txring_tl_ptr].addrhi,
sc->txring[sc->txring_tl_ptr].addr);
#else
device_printf(sc->dev, device_printf(sc->dev,
"cgem_clean_tx: AHB error, addr=0x%x\n", "cgem_clean_tx: AHB error, addr=0x%x\n",
sc->txring[sc->txring_tl_ptr].addr); sc->txring[sc->txring_tl_ptr].addr);
#endif
} else if ((ctl & (CGEM_TXDESC_RETRY_ERR | } else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
CGEM_TXDESC_LATE_COLL)) != 0) { CGEM_TXDESC_LATE_COLL)) != 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
@ -640,8 +718,8 @@ cgem_clean_tx(struct cgem_softc *sc)
/* /*
* If the packet spanned more than one tx descriptor, skip * If the packet spanned more than one tx descriptor, skip
* descriptors until we find the end so that only start-of-frame * descriptors until we find the end so that only
* descriptors are processed. * start-of-frame descriptors are processed.
*/ */
while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
if ((ctl & CGEM_TXDESC_WRAP) != 0) if ((ctl & CGEM_TXDESC_WRAP) != 0)
@ -759,7 +837,10 @@ cgem_start_locked(if_t ifp)
/* Descriptor address. */ /* Descriptor address. */
sc->txring[sc->txring_hd_ptr + i].addr = sc->txring[sc->txring_hd_ptr + i].addr =
segs[i].ds_addr; segs[i].ds_addr;
#ifdef CGEM64
sc->txring[sc->txring_hd_ptr + i].addrhi =
segs[i].ds_addr >> 32;
#endif
/* Descriptor control word. */ /* Descriptor control word. */
ctl = segs[i].ds_len; ctl = segs[i].ds_len;
if (i == nsegs - 1) { if (i == nsegs - 1) {
@ -961,8 +1042,21 @@ cgem_reset(struct cgem_softc *sc)
CGEM_ASSERT_LOCKED(sc); CGEM_ASSERT_LOCKED(sc);
/* Determine data bus width from design configuration register. */
switch (RD4(sc, CGEM_DESIGN_CFG1) &
CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK) {
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_64;
break;
case CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_128;
break;
default:
sc->net_cfg_shadow = CGEM_NET_CFG_DBUS_WIDTH_32;
}
WR4(sc, CGEM_NET_CTRL, 0); WR4(sc, CGEM_NET_CTRL, 0);
WR4(sc, CGEM_NET_CFG, 0); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
@ -973,8 +1067,8 @@ cgem_reset(struct cgem_softc *sc)
WR4(sc, CGEM_RX_QBAR, 0); WR4(sc, CGEM_RX_QBAR, 0);
/* Get management port running even if interface is down. */ /* Get management port running even if interface is down. */
WR4(sc, CGEM_NET_CFG, CGEM_NET_CFG_DBUS_WIDTH_32 | sc->net_cfg_shadow |= CGEM_NET_CFG_MDC_CLK_DIV_48;
CGEM_NET_CFG_MDC_CLK_DIV_64); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
@ -985,33 +1079,33 @@ static void
cgem_config(struct cgem_softc *sc) cgem_config(struct cgem_softc *sc)
{ {
if_t ifp = sc->ifp; if_t ifp = sc->ifp;
uint32_t net_cfg;
uint32_t dma_cfg; uint32_t dma_cfg;
u_char *eaddr = if_getlladdr(ifp); u_char *eaddr = if_getlladdr(ifp);
CGEM_ASSERT_LOCKED(sc); CGEM_ASSERT_LOCKED(sc);
/* Program Net Config Register. */ /* Program Net Config Register. */
net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | sc->net_cfg_shadow &= (CGEM_NET_CFG_MDC_CLK_DIV_MASK |
CGEM_NET_CFG_MDC_CLK_DIV_64 | CGEM_NET_CFG_DBUS_WIDTH_MASK);
CGEM_NET_CFG_FCS_REMOVE | sc->net_cfg_shadow |= (CGEM_NET_CFG_FCS_REMOVE |
CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_GIGE_EN | CGEM_NET_CFG_1536RXEN |
CGEM_NET_CFG_1536RXEN | CGEM_NET_CFG_FULL_DUPLEX | CGEM_NET_CFG_SPEED100);
CGEM_NET_CFG_FULL_DUPLEX |
CGEM_NET_CFG_SPEED100;
/* Enable receive checksum offloading? */ /* Enable receive checksum offloading? */
if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; sc->net_cfg_shadow |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
WR4(sc, CGEM_NET_CFG, net_cfg); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
/* Program DMA Config Register. */ /* Program DMA Config Register. */
dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
#ifdef CGEM64
CGEM_DMA_CFG_ADDR_BUS_64 |
#endif
CGEM_DMA_CFG_DISC_WHEN_NO_AHB; CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
/* Enable transmit checksum offloading? */ /* Enable transmit checksum offloading? */
@ -1021,8 +1115,12 @@ cgem_config(struct cgem_softc *sc)
WR4(sc, CGEM_DMA_CFG, dma_cfg); WR4(sc, CGEM_DMA_CFG, dma_cfg);
/* Write the rx and tx descriptor ring addresses to the QBAR regs. */ /* Write the rx and tx descriptor ring addresses to the QBAR regs. */
WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); WR4(sc, CGEM_RX_QBAR, (uint32_t)sc->rxring_physaddr);
WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); WR4(sc, CGEM_TX_QBAR, (uint32_t)sc->txring_physaddr);
#ifdef CGEM64
WR4(sc, CGEM_RX_QBAR_HI, (uint32_t)(sc->rxring_physaddr >> 32));
WR4(sc, CGEM_TX_QBAR_HI, (uint32_t)(sc->txring_physaddr >> 32));
#endif
/* Enable rx and tx. */ /* Enable rx and tx. */
sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
@ -1055,8 +1153,10 @@ cgem_init_locked(struct cgem_softc *sc)
if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
mii = device_get_softc(sc->miibus); if (sc->miibus != NULL) {
mii_mediachg(mii); mii = device_get_softc(sc->miibus);
mii_mediachg(mii);
}
callout_reset(&sc->tick_ch, hz, cgem_tick, sc); callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
} }
@ -1085,9 +1185,9 @@ cgem_stop(struct cgem_softc *sc)
cgem_reset(sc); cgem_reset(sc);
/* Clear out transmit queue. */ /* Clear out transmit queue. */
memset(sc->txring, 0, CGEM_NUM_TX_DESCS * sizeof(struct cgem_tx_desc));
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
sc->txring[i].ctl = CGEM_TXDESC_USED; sc->txring[i].ctl = CGEM_TXDESC_USED;
sc->txring[i].addr = 0;
if (sc->txring_m[i]) { if (sc->txring_m[i]) {
/* Unload and destroy dmamap. */ /* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag, bus_dmamap_unload(sc->mbuf_dma_tag,
@ -1106,9 +1206,9 @@ cgem_stop(struct cgem_softc *sc)
sc->txring_queued = 0; sc->txring_queued = 0;
/* Clear out receive queue. */ /* Clear out receive queue. */
memset(sc->rxring, 0, CGEM_NUM_RX_DESCS * sizeof(struct cgem_rx_desc));
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
sc->rxring[i].addr = CGEM_RXDESC_OWN; sc->rxring[i].addr = CGEM_RXDESC_OWN;
sc->rxring[i].ctl = 0;
if (sc->rxring_m[i]) { if (sc->rxring_m[i]) {
/* Unload and destroy dmamap. */ /* Unload and destroy dmamap. */
bus_dmamap_unload(sc->mbuf_dma_tag, bus_dmamap_unload(sc->mbuf_dma_tag,
@ -1171,6 +1271,8 @@ cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
case SIOCSIFMEDIA: case SIOCSIFMEDIA:
case SIOCGIFMEDIA: case SIOCGIFMEDIA:
if (sc->miibus == NULL)
return (ENXIO);
mii = device_get_softc(sc->miibus); mii = device_get_softc(sc->miibus);
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
break; break;
@ -1205,16 +1307,16 @@ cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
/* Turn on RX checksumming. */ /* Turn on RX checksumming. */
if_setcapenablebit(ifp, IFCAP_RXCSUM | if_setcapenablebit(ifp, IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6, 0); IFCAP_RXCSUM_IPV6, 0);
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow |=
RD4(sc, CGEM_NET_CFG) | CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
} else { } else {
/* Turn off RX checksumming. */ /* Turn off RX checksumming. */
if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | if_setcapenablebit(ifp, 0, IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6); IFCAP_RXCSUM_IPV6);
WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow &=
RD4(sc, CGEM_NET_CFG) & ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
} }
} }
if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) ==
@ -1235,15 +1337,6 @@ cgem_ioctl(if_t ifp, u_long cmd, caddr_t data)
/* MII bus support routines. /* MII bus support routines.
*/ */
static void
cgem_child_detached(device_t dev, device_t child)
{
struct cgem_softc *sc = device_get_softc(dev);
if (child == sc->miibus)
sc->miibus = NULL;
}
static int static int
cgem_ifmedia_upd(if_t ifp) cgem_ifmedia_upd(if_t ifp)
{ {
@ -1380,24 +1473,22 @@ __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
static void static void
cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
{ {
uint32_t net_cfg;
int ref_clk_freq; int ref_clk_freq;
CGEM_ASSERT_LOCKED(sc); CGEM_ASSERT_LOCKED(sc);
/* Update hardware to reflect media. */ /* Update hardware to reflect media. */
net_cfg = RD4(sc, CGEM_NET_CFG); sc->net_cfg_shadow &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
CGEM_NET_CFG_FULL_DUPLEX); CGEM_NET_CFG_FULL_DUPLEX);
switch (IFM_SUBTYPE(mii->mii_media_active)) { switch (IFM_SUBTYPE(mii->mii_media_active)) {
case IFM_1000_T: case IFM_1000_T:
net_cfg |= (CGEM_NET_CFG_SPEED100 | sc->net_cfg_shadow |= (CGEM_NET_CFG_SPEED100 |
CGEM_NET_CFG_GIGE_EN); CGEM_NET_CFG_GIGE_EN);
ref_clk_freq = 125000000; ref_clk_freq = 125000000;
break; break;
case IFM_100_TX: case IFM_100_TX:
net_cfg |= CGEM_NET_CFG_SPEED100; sc->net_cfg_shadow |= CGEM_NET_CFG_SPEED100;
ref_clk_freq = 25000000; ref_clk_freq = 25000000;
break; break;
default: default:
@ -1405,15 +1496,25 @@ cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii)
} }
if ((mii->mii_media_active & IFM_FDX) != 0) if ((mii->mii_media_active & IFM_FDX) != 0)
net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; sc->net_cfg_shadow |= CGEM_NET_CFG_FULL_DUPLEX;
WR4(sc, CGEM_NET_CFG, net_cfg); WR4(sc, CGEM_NET_CFG, sc->net_cfg_shadow);
#ifdef EXT_RESOURCES
if (sc->ref_clk != NULL) {
CGEM_UNLOCK(sc);
if (clk_set_freq(sc->ref_clk, ref_clk_freq, 0))
device_printf(sc->dev, "could not set ref clk to %d\n",
ref_clk_freq);
CGEM_LOCK(sc);
}
#else
/* Set the reference clock if necessary. */ /* Set the reference clock if necessary. */
if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq)) if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
device_printf(sc->dev, device_printf(sc->dev,
"cgem_mediachange: could not set ref clk%d to %d.\n", "cgem_mediachange: could not set ref clk%d to %d.\n",
sc->ref_clk_num, ref_clk_freq); sc->ref_clk_num, ref_clk_freq);
#endif
sc->mii_media_active = mii->mii_media_active; sc->mii_media_active = mii->mii_media_active;
} }
@ -1640,19 +1741,46 @@ cgem_attach(device_t dev)
{ {
struct cgem_softc *sc = device_get_softc(dev); struct cgem_softc *sc = device_get_softc(dev);
if_t ifp = NULL; if_t ifp = NULL;
phandle_t node;
pcell_t cell;
int rid, err; int rid, err;
u_char eaddr[ETHER_ADDR_LEN]; u_char eaddr[ETHER_ADDR_LEN];
int hwtype;
#ifndef EXT_RESOURCES
phandle_t node;
pcell_t cell;
#endif
sc->dev = dev; sc->dev = dev;
CGEM_LOCK_INIT(sc); CGEM_LOCK_INIT(sc);
/* Key off of compatible string and set hardware-specific options. */
hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
if (hwtype == HWTYPE_ZYNQMP)
sc->neednullqs = 1;
if (hwtype == HWTYPE_ZYNQ)
sc->rxhangwar = 1;
#ifdef EXT_RESOURCES
if (hwtype == HWTYPE_ZYNQ || hwtype == HWTYPE_ZYNQMP) {
if (clk_get_by_ofw_name(dev, 0, "tx_clk", &sc->ref_clk) != 0)
device_printf(dev,
"could not retrieve reference clock.\n");
else if (clk_enable(sc->ref_clk) != 0)
device_printf(dev, "could not enable clock.\n");
}
else if (hwtype == HWTYPE_SIFIVE_FU540) {
if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->ref_clk) != 0)
device_printf(dev,
"could not retrieve reference clock.\n");
else if (clk_enable(sc->ref_clk) != 0)
device_printf(dev, "could not enable clock.\n");
}
#else
/* Get reference clock number and base divider from fdt. */ /* Get reference clock number and base divider from fdt. */
node = ofw_bus_get_node(dev); node = ofw_bus_get_node(dev);
sc->ref_clk_num = 0; sc->ref_clk_num = 0;
if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0) if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
sc->ref_clk_num = fdt32_to_cpu(cell); sc->ref_clk_num = fdt32_to_cpu(cell);
#endif
/* Get memory resource. */ /* Get memory resource. */
rid = 0; rid = 0;
@ -1665,7 +1793,8 @@ cgem_attach(device_t dev)
/* Get IRQ resource. */ /* Get IRQ resource. */
rid = 0; rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (sc->irq_res == NULL) { if (sc->irq_res == NULL) {
device_printf(dev, "could not allocate interrupt resource.\n"); device_printf(dev, "could not allocate interrupt resource.\n");
cgem_detach(dev); cgem_detach(dev);
@ -1697,7 +1826,6 @@ cgem_attach(device_t dev)
sc->if_old_flags = if_getflags(ifp); sc->if_old_flags = if_getflags(ifp);
sc->rxbufs = DEFAULT_NUM_RX_BUFS; sc->rxbufs = DEFAULT_NUM_RX_BUFS;
sc->rxhangwar = 1;
/* Reset hardware. */ /* Reset hardware. */
CGEM_LOCK(sc); CGEM_LOCK(sc);
@ -1708,11 +1836,8 @@ cgem_attach(device_t dev)
err = mii_attach(dev, &sc->miibus, ifp, err = mii_attach(dev, &sc->miibus, ifp,
cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK, cgem_ifmedia_upd, cgem_ifmedia_sts, BMSR_DEFCAPMASK,
MII_PHY_ANY, MII_OFFSET_ANY, 0); MII_PHY_ANY, MII_OFFSET_ANY, 0);
if (err) { if (err)
device_printf(dev, "attaching PHYs failed\n"); device_printf(dev, "warning: attaching PHYs failed\n");
cgem_detach(dev);
return (err);
}
/* Set up TX and RX descriptor area. */ /* Set up TX and RX descriptor area. */
err = cgem_setup_descs(sc); err = cgem_setup_descs(sc);
@ -1787,26 +1912,21 @@ cgem_detach(device_t dev)
bus_dmamap_unload(sc->desc_dma_tag, bus_dmamap_unload(sc->desc_dma_tag,
sc->rxring_dma_map); sc->rxring_dma_map);
sc->rxring_physaddr = 0; sc->rxring_physaddr = 0;
sc->txring_physaddr = 0;
sc->null_qs_physaddr = 0;
} }
bus_dmamem_free(sc->desc_dma_tag, sc->rxring, bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
sc->rxring_dma_map); sc->rxring_dma_map);
sc->rxring = NULL; sc->rxring = NULL;
sc->txring = NULL;
sc->null_qs = NULL;
for (i = 0; i < CGEM_NUM_RX_DESCS; i++) for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
if (sc->rxring_m_dmamap[i] != NULL) { if (sc->rxring_m_dmamap[i] != NULL) {
bus_dmamap_destroy(sc->mbuf_dma_tag, bus_dmamap_destroy(sc->mbuf_dma_tag,
sc->rxring_m_dmamap[i]); sc->rxring_m_dmamap[i]);
sc->rxring_m_dmamap[i] = NULL; sc->rxring_m_dmamap[i] = NULL;
} }
}
if (sc->txring != NULL) {
if (sc->txring_physaddr != 0) {
bus_dmamap_unload(sc->desc_dma_tag,
sc->txring_dma_map);
sc->txring_physaddr = 0;
}
bus_dmamem_free(sc->desc_dma_tag, sc->txring,
sc->txring_dma_map);
sc->txring = NULL;
for (i = 0; i < CGEM_NUM_TX_DESCS; i++) for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
if (sc->txring_m_dmamap[i] != NULL) { if (sc->txring_m_dmamap[i] != NULL) {
bus_dmamap_destroy(sc->mbuf_dma_tag, bus_dmamap_destroy(sc->mbuf_dma_tag,
@ -1823,6 +1943,13 @@ cgem_detach(device_t dev)
sc->mbuf_dma_tag = NULL; sc->mbuf_dma_tag = NULL;
} }
#ifdef EXT_RESOURCES
if (sc->ref_clk != NULL) {
clk_release(sc->ref_clk);
sc->ref_clk = NULL;
}
#endif
bus_generic_detach(dev); bus_generic_detach(dev);
CGEM_LOCK_DESTROY(sc); CGEM_LOCK_DESTROY(sc);
@ -1836,9 +1963,6 @@ static device_method_t cgem_methods[] = {
DEVMETHOD(device_attach, cgem_attach), DEVMETHOD(device_attach, cgem_attach),
DEVMETHOD(device_detach, cgem_detach), DEVMETHOD(device_detach, cgem_detach),
/* Bus interface */
DEVMETHOD(bus_child_detached, cgem_child_detached),
/* MII interface */ /* MII interface */
DEVMETHOD(miibus_readreg, cgem_miibus_readreg), DEVMETHOD(miibus_readreg, cgem_miibus_readreg),
DEVMETHOD(miibus_writereg, cgem_miibus_writereg), DEVMETHOD(miibus_writereg, cgem_miibus_writereg),
@ -1858,3 +1982,4 @@ DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
MODULE_DEPEND(cgem, miibus, 1, 1, 1); MODULE_DEPEND(cgem, miibus, 1, 1, 1);
MODULE_DEPEND(cgem, ether, 1, 1, 1); MODULE_DEPEND(cgem, ether, 1, 1, 1);
SIMPLEBUS_PNP_INFO(compat_data);

View File

@ -35,6 +35,10 @@
* Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
* (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16
* and register definitions are in appendix B.18. * and register definitions are in appendix B.18.
*
* Additional Reference: Zynq UltraScale+ Device Register Reference
* (UG1087 v1.7 Feb 8,2019):
* https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers.html
*/ */
#ifndef _IF_CGEM_HW_H_ #ifndef _IF_CGEM_HW_H_
@ -113,6 +117,7 @@
#define CGEM_USER_IO 0x00C /* User I/O */ #define CGEM_USER_IO 0x00C /* User I/O */
#define CGEM_DMA_CFG 0x010 /* DMA Config */ #define CGEM_DMA_CFG 0x010 /* DMA Config */
#define CGEM_DMA_CFG_ADDR_BUS_64 (1 << 30)
#define CGEM_DMA_CFG_DISC_WHEN_NO_AHB (1 << 24) #define CGEM_DMA_CFG_DISC_WHEN_NO_AHB (1 << 24)
#define CGEM_DMA_CFG_RX_BUF_SIZE_SHIFT 16 #define CGEM_DMA_CFG_RX_BUF_SIZE_SHIFT 16
#define CGEM_DMA_CFG_RX_BUF_SIZE_MASK (0xff << 16) #define CGEM_DMA_CFG_RX_BUF_SIZE_MASK (0xff << 16)
@ -290,6 +295,29 @@
#define CGEM_PTP_PEER_RX_S 0x1F8 /* PTP Peer Event rcv'd s */ #define CGEM_PTP_PEER_RX_S 0x1F8 /* PTP Peer Event rcv'd s */
#define CGEM_PTP_PEER_RX_NS 0x1FC /* PTP Peer Event rcv'd ns */ #define CGEM_PTP_PEER_RX_NS 0x1FC /* PTP Peer Event rcv'd ns */
#define CGEM_DESIGN_CFG1 0x280 /* Design Configuration 1 */
#define CGEM_DESIGN_CFG1_AXI_CACHE_WIDTH_MASK (0xfU << 28)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_MASK (7 << 25)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_32 (1 << 25)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_64 (2 << 25)
#define CGEM_DESIGN_CFG1_DMA_BUS_WIDTH_128 (4 << 25)
#define CGEM_DESIGN_CFG1_IRQ_READ_CLR (1 << 23)
#define CGEM_DESIGN_CFG1_NO_SNAPSHOT (1 << 22)
#define CGEM_DESIGN_CFG1_NO_STATS (1 << 21)
#define CGEM_DESIGN_CFG1_NO_SCAN_PINS (1 << 20)
#define CGEM_DESIGN_CFG1_USER_IN_WIDTH_MASK (0x1f << 15)
#define CGEM_DESIGN_CFG1_USER_OUT_WIDTH_MASK (0x1f << 10)
#define CGEM_DESIGN_CFG1_USER_IO (1 << 9)
#define CGEM_DESIGN_CFG1_APB_REV2 (1 << 8)
#define CGEM_DESIGN_CFG1_APB_REV1 (1 << 7)
#define CGEM_DESIGN_CFG1_EXT_FIFO_INTERFACE (1 << 6)
#define CGEM_DESIGN_CFG1_NO_INT_LOOPBACK (1 << 5)
#define CGEM_DESIGN_CFG1_INT_LOOPBACK (1 << 4)
#define CGEM_DESIGN_CFG1_TDC_50 (1 << 3)
#define CGEM_DESIGN_CFG1_RDC_50 (1 << 2)
#define CGEM_DESIGN_CFG1_SERDES (1 << 1)
#define CGEM_DESIGN_CFG1_NO_PCS (1 << 0)
#define CGEM_DESIGN_CFG2 0x284 /* Design Configuration 2 */ #define CGEM_DESIGN_CFG2 0x284 /* Design Configuration 2 */
#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_SHIFT 26 #define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_SHIFT 26
#define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK (0xf << 26) #define CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK (0xf << 26)
@ -330,7 +358,25 @@
#define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf << 4) #define CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf << 4)
#define CGEM_DESIGN_CFG5_RX_FIFO_CNT_WIDTH_MASK 0xf #define CGEM_DESIGN_CFG5_RX_FIFO_CNT_WIDTH_MASK 0xf
/* Transmit Descriptors */ #define CGEM_DESIGN_CFG6 0x294 /* Design Configuration 6 */
#define CGEM_DESIGN_CFG6_ADDR_64B (1 << 23) /* 64-bit addr cap */
#define CGEM_DESIGN_CFG6_DMA_PRIO_Q_MASK 0xfffe
#define CGEM_DESIGN_CFG6_DMA_PRIO_Q(n) (1 << (n))
#define CGEM_TX_QN_BAR(n) (0x440 + ((n) - 1) * 4)
#define CGEM_RX_QN_BAR(n) (0x480 + ((n) - 1) * 4)
#define CGEM_TX_QBAR_HI 0x4C8
#define CGEM_RX_QBAR_HI 0x4D4
/*
* Transmit Descriptors: two or four 32-bit words:
* word0: address
* word1: length and control
* word2: address upper 32-bits (64-bit mode)
* word3: unused (64-bit mode)
*/
struct cgem_tx_desc { struct cgem_tx_desc {
uint32_t addr; uint32_t addr;
uint32_t ctl; uint32_t ctl;
@ -350,8 +396,20 @@ struct cgem_tx_desc {
#define CGEM_TXDESC_NO_CRC_APPENDED (1 << 16) #define CGEM_TXDESC_NO_CRC_APPENDED (1 << 16)
#define CGEM_TXDESC_LAST_BUF (1 << 15) /* last in frame */ #define CGEM_TXDESC_LAST_BUF (1 << 15) /* last in frame */
#define CGEM_TXDESC_LENGTH_MASK 0x3fff #define CGEM_TXDESC_LENGTH_MASK 0x3fff
#ifdef CGEM64
uint32_t addrhi;
uint32_t unused;
#endif
}; };
/*
* Receive Descriptors: two or four 32-bit words:
* word0: address | WRAP and OWN flags
* word1: length and control
* word2: address upper 32 bits (64-bit mode)
* word3: unused
*/
struct cgem_rx_desc { struct cgem_rx_desc {
uint32_t addr; uint32_t addr;
#define CGEM_RXDESC_WRAP (1 << 1) /* goes in addr! */ #define CGEM_RXDESC_WRAP (1 << 1) /* goes in addr! */
@ -379,6 +437,10 @@ struct cgem_rx_desc {
#define CGEM_RXDESC_SOF (1 << 14) /* start of frame */ #define CGEM_RXDESC_SOF (1 << 14) /* start of frame */
#define CGEM_RXDESC_BAD_FCS (1 << 13) #define CGEM_RXDESC_BAD_FCS (1 << 13)
#define CGEM_RXDESC_LENGTH_MASK 0x1fff #define CGEM_RXDESC_LENGTH_MASK 0x1fff
#ifdef CGEM64
uint32_t addrhi;
uint32_t unused;
#endif
}; };
#endif /* _IF_CGEM_HW_H_ */ #endif /* _IF_CGEM_HW_H_ */

View File

@ -176,20 +176,20 @@
// Gigabit Ethernet controllers // Gigabit Ethernet controllers
eth0: eth@b000 { eth0: eth@b000 {
device_type = "network"; device_type = "network";
compatible = "cadence,gem"; compatible = "cdns,zynq-gem", "cadence,gem";
status = "disabled"; status = "disabled";
reg = <0xb000 0x1000>; reg = <0xb000 0x1000>;
interrupts = <0 22 4>; interrupts = <0 22 4>;
interrupt-parent = <&GIC>; interrupt-parent = <&GIC>;
ref-clock-num = <0>; ref-clock-num = <0>;
}; };
eth1: eth@c000 { eth1: eth@c000 {
device_type = "network"; device_type = "network";
compatible = "cadence,gem"; compatible = "cdns,zynq-gem", "cadence,gem";
status = "disabled"; status = "disabled";
reg = <0xc000 0x1000>; reg = <0xc000 0x1000>;
interrupts = <0 45 4>; interrupts = <0 45 4>;
interrupt-parent = <&GIC>; interrupt-parent = <&GIC>;
ref-clock-num = <1>; ref-clock-num = <1>;

View File

@ -114,8 +114,8 @@ device uart_ns8250 # ns8250-type UART driver
device goldfish_rtc # QEMU RTC device goldfish_rtc # QEMU RTC
# Ethernet drivers # Ethernet drivers
device cgem # Cadence GEM Gigabit Ethernet device
device miibus # MII bus support device miibus # MII bus support
device cgem # Cadence Gigabit Ethernet MAC
device xae # Xilinx AXI Ethernet MAC device xae # Xilinx AXI Ethernet MAC
# DMA support # DMA support