Port the Linux AMX 10G network driver to FreeBSD as axgbe. It is unlikely

we will import a newer version of the Linux code so the linuxkpi was not
used.

This is still missing 10G support, and multicast has not been tested.

Reviewed by:	gnn
Obtained from:	ABT Systems Ltd
Sponsored by:	SoftIron Inc
Differential Revision:	https://reviews.freebsd.org/D8549
This commit is contained in:
Andrew Turner 2017-02-15 13:56:04 +00:00
parent 44b781cfe0
commit 9c6d6488fa
10 changed files with 1341 additions and 2704 deletions

View File

@ -119,6 +119,7 @@ options PCI_IOV # PCI SR-IOV support
device mii device mii
device miibus # MII bus support device miibus # MII bus support
device awg # Allwinner EMAC Gigabit Ethernet device awg # Allwinner EMAC Gigabit Ethernet
device axgbe # AMD Opteron A1100 integrated NIC
device em # Intel PRO/1000 Gigabit Ethernet Family device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel 10Gb Ethernet Family device ix # Intel 10Gb Ethernet Family
device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet

View File

@ -146,6 +146,11 @@ crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support
crypto/des/des_enc.c optional crypto | ipsec | ipsec_support | netsmb crypto/des/des_enc.c optional crypto | ipsec | ipsec_support | netsmb
dev/acpica/acpi_if.m optional acpi dev/acpica/acpi_if.m optional acpi
dev/ahci/ahci_generic.c optional ahci dev/ahci/ahci_generic.c optional ahci
dev/axgbe/if_axgbe.c optional axgbe
dev/axgbe/xgbe-desc.c optional axgbe
dev/axgbe/xgbe-dev.c optional axgbe
dev/axgbe/xgbe-drv.c optional axgbe
dev/axgbe/xgbe-mdio.c optional axgbe
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/hwpmc/hwpmc_arm64.c optional hwpmc dev/hwpmc/hwpmc_arm64.c optional hwpmc
dev/hwpmc/hwpmc_arm64_md.c optional hwpmc dev/hwpmc/hwpmc_arm64_md.c optional hwpmc

619
sys/dev/axgbe/if_axgbe.c Normal file
View File

@ -0,0 +1,619 @@
/*-
* Copyright (c) 2016,2017 SoftIron Inc.
* All rights reserved.
*
* This software was developed by Andrew Turner under
* the sponsorship of SoftIron Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <machine/bus.h>
#include "miibus_if.h"
#include "xgbe.h"
#include "xgbe-common.h"
static device_probe_t axgbe_probe;
static device_attach_t axgbe_attach;
struct axgbe_softc {
/* Must be first */
struct xgbe_prv_data prv;
uint8_t mac_addr[ETHER_ADDR_LEN];
struct ifmedia media;
};
static struct ofw_compat_data compat_data[] = {
{ "amd,xgbe-seattle-v1a", true },
{ NULL, false }
};
static struct resource_spec old_phy_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Rx/Tx regs */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* Interrupt */
{ -1, 0 }
};
static struct resource_spec old_mac_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
/* Per-channel interrupts */
{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
{ -1, 0 }
};
static struct resource_spec mac_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
{ SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
{ SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Rx/Tx regs */
{ SYS_RES_MEMORY, 3, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_MEMORY, 4, RF_ACTIVE }, /* Integration regs */
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
/* Per-channel and auto-negotiation interrupts */
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
{ SYS_RES_IRQ, 5, RF_ACTIVE | RF_OPTIONAL },
{ -1, 0 }
};
MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data");
static void
axgbe_init(void *p)
{
struct axgbe_softc *sc;
struct ifnet *ifp;
sc = p;
ifp = sc->prv.netdev;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return;
ifp->if_drv_flags |= IFF_DRV_RUNNING;
}
static int
axgbe_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
{
struct axgbe_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
int error;
switch(command) {
case SIOCSIFMTU:
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
error = EINVAL;
else
error = xgbe_change_mtu(ifp, ifr->ifr_mtu);
break;
case SIOCSIFFLAGS:
error = 0;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
return (error);
}
static void
axgbe_qflush(struct ifnet *ifp)
{
if_qflush(ifp);
}
static int
axgbe_media_change(struct ifnet *ifp)
{
struct axgbe_softc *sc;
int cur_media;
sc = ifp->if_softc;
sx_xlock(&sc->prv.an_mutex);
cur_media = sc->media.ifm_cur->ifm_media;
switch (IFM_SUBTYPE(cur_media)) {
case IFM_10G_KR:
sc->prv.phy.speed = SPEED_10000;
sc->prv.phy.autoneg = AUTONEG_DISABLE;
break;
case IFM_2500_KX:
sc->prv.phy.speed = SPEED_2500;
sc->prv.phy.autoneg = AUTONEG_DISABLE;
break;
case IFM_1000_KX:
sc->prv.phy.speed = SPEED_1000;
sc->prv.phy.autoneg = AUTONEG_DISABLE;
break;
case IFM_AUTO:
sc->prv.phy.autoneg = AUTONEG_ENABLE;
break;
}
sx_xunlock(&sc->prv.an_mutex);
return (-sc->prv.phy_if.phy_config_aneg(&sc->prv));
}
static void
axgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct axgbe_softc *sc;
sc = ifp->if_softc;
ifmr->ifm_status = IFM_AVALID;
if (!sc->prv.phy.link)
return;
ifmr->ifm_status |= IFM_ACTIVE;
ifmr->ifm_active = IFM_ETHER;
if (sc->prv.phy.duplex == DUPLEX_FULL)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
switch (sc->prv.phy.speed) {
case SPEED_10000:
ifmr->ifm_active |= IFM_10G_KR;
break;
case SPEED_2500:
ifmr->ifm_active |= IFM_2500_KX;
break;
case SPEED_1000:
ifmr->ifm_active |= IFM_1000_KX;
break;
}
}
static uint64_t
axgbe_get_counter(struct ifnet *ifp, ift_counter c)
{
struct xgbe_prv_data *pdata = ifp->if_softc;
struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
DBGPR("-->%s\n", __func__);
pdata->hw_if.read_mmc_stats(pdata);
switch(c) {
case IFCOUNTER_IPACKETS:
return (pstats->rxframecount_gb);
case IFCOUNTER_IERRORS:
return (pstats->rxframecount_gb -
pstats->rxbroadcastframes_g -
pstats->rxmulticastframes_g -
pstats->rxunicastframes_g);
case IFCOUNTER_OPACKETS:
return (pstats->txframecount_gb);
case IFCOUNTER_OERRORS:
return (pstats->txframecount_gb - pstats->txframecount_g);
case IFCOUNTER_IBYTES:
return (pstats->rxoctetcount_gb);
case IFCOUNTER_OBYTES:
return (pstats->txoctetcount_gb);
default:
return (if_get_counter_default(ifp, c));
}
}
static int
axgbe_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
return (ENXIO);
device_set_desc(dev, "AMD 10 Gigabit Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
axgbe_get_optional_prop(device_t dev, phandle_t node, const char *name,
int *data, size_t len)
{
if (!OF_hasprop(node, name))
return (-1);
if (OF_getencprop(node, name, data, len) <= 0) {
device_printf(dev,"%s property is invalid\n", name);
return (ENXIO);
}
return (0);
}
static int
axgbe_attach(device_t dev)
{
struct axgbe_softc *sc;
struct ifnet *ifp;
pcell_t phy_handle;
device_t phydev;
phandle_t node, phy_node;
struct resource *mac_res[11];
struct resource *phy_res[4];
ssize_t len;
int error, i, j;
sc = device_get_softc(dev);
node = ofw_bus_get_node(dev);
if (OF_getencprop(node, "phy-handle", &phy_handle,
sizeof(phy_handle)) <= 0) {
phy_node = node;
if (bus_alloc_resources(dev, mac_spec, mac_res)) {
device_printf(dev,
"could not allocate phy resources\n");
return (ENXIO);
}
sc->prv.xgmac_res = mac_res[0];
sc->prv.xpcs_res = mac_res[1];
sc->prv.rxtx_res = mac_res[2];
sc->prv.sir0_res = mac_res[3];
sc->prv.sir1_res = mac_res[4];
sc->prv.dev_irq_res = mac_res[5];
sc->prv.per_channel_irq = OF_hasprop(node,
XGBE_DMA_IRQS_PROPERTY);
for (i = 0, j = 6; j < nitems(mac_res) - 1 &&
mac_res[j + 1] != NULL; i++, j++) {
if (sc->prv.per_channel_irq) {
sc->prv.chan_irq_res[i] = mac_res[j];
}
}
/* The last entry is the auto-negotiation interrupt */
sc->prv.an_irq_res = mac_res[j];
} else {
phydev = OF_device_from_xref(phy_handle);
phy_node = ofw_bus_get_node(phydev);
if (bus_alloc_resources(phydev, old_phy_spec, phy_res)) {
device_printf(dev,
"could not allocate phy resources\n");
return (ENXIO);
}
if (bus_alloc_resources(dev, old_mac_spec, mac_res)) {
device_printf(dev,
"could not allocate mac resources\n");
return (ENXIO);
}
sc->prv.rxtx_res = phy_res[0];
sc->prv.sir0_res = phy_res[1];
sc->prv.sir1_res = phy_res[2];
sc->prv.an_irq_res = phy_res[3];
sc->prv.xgmac_res = mac_res[0];
sc->prv.xpcs_res = mac_res[1];
sc->prv.dev_irq_res = mac_res[2];
sc->prv.per_channel_irq = OF_hasprop(node,
XGBE_DMA_IRQS_PROPERTY);
if (sc->prv.per_channel_irq) {
for (i = 0, j = 3; i < nitems(sc->prv.chan_irq_res) &&
mac_res[j] != NULL; i++, j++) {
sc->prv.chan_irq_res[i] = mac_res[j];
}
}
}
if ((len = OF_getproplen(node, "mac-address")) < 0) {
device_printf(dev, "No mac-address property\n");
return (EINVAL);
}
if (len != ETHER_ADDR_LEN)
return (EINVAL);
OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN);
sc->prv.netdev = ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "Cannot alloc ifnet\n");
return (ENXIO);
}
sc->prv.dev = dev;
sc->prv.dmat = bus_get_dma_tag(dev);
sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
ADVERTISED_1000baseKX_Full;
/*
* Read the needed properties from the phy node.
*/
/* This is documented as optional, but Linux requires it */
if (OF_getencprop(phy_node, XGBE_SPEEDSET_PROPERTY, &sc->prv.speed_set,
sizeof(sc->prv.speed_set)) <= 0) {
device_printf(dev, "%s property is missing\n",
XGBE_SPEEDSET_PROPERTY);
return (EINVAL);
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_BLWC_PROPERTY,
sc->prv.serdes_blwc, sizeof(sc->prv.serdes_blwc));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_blwc[0] = XGBE_SPEED_1000_BLWC;
sc->prv.serdes_blwc[1] = XGBE_SPEED_2500_BLWC;
sc->prv.serdes_blwc[2] = XGBE_SPEED_10000_BLWC;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_CDR_RATE_PROPERTY,
sc->prv.serdes_cdr_rate, sizeof(sc->prv.serdes_cdr_rate));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_cdr_rate[0] = XGBE_SPEED_1000_CDR;
sc->prv.serdes_cdr_rate[1] = XGBE_SPEED_2500_CDR;
sc->prv.serdes_cdr_rate[2] = XGBE_SPEED_10000_CDR;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_PQ_SKEW_PROPERTY,
sc->prv.serdes_pq_skew, sizeof(sc->prv.serdes_pq_skew));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_pq_skew[0] = XGBE_SPEED_1000_PQ;
sc->prv.serdes_pq_skew[1] = XGBE_SPEED_2500_PQ;
sc->prv.serdes_pq_skew[2] = XGBE_SPEED_10000_PQ;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_TX_AMP_PROPERTY,
sc->prv.serdes_tx_amp, sizeof(sc->prv.serdes_tx_amp));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_tx_amp[0] = XGBE_SPEED_1000_TXAMP;
sc->prv.serdes_tx_amp[1] = XGBE_SPEED_2500_TXAMP;
sc->prv.serdes_tx_amp[2] = XGBE_SPEED_10000_TXAMP;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_CFG_PROPERTY,
sc->prv.serdes_dfe_tap_cfg, sizeof(sc->prv.serdes_dfe_tap_cfg));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_dfe_tap_cfg[0] = XGBE_SPEED_1000_DFE_TAP_CONFIG;
sc->prv.serdes_dfe_tap_cfg[1] = XGBE_SPEED_2500_DFE_TAP_CONFIG;
sc->prv.serdes_dfe_tap_cfg[2] = XGBE_SPEED_10000_DFE_TAP_CONFIG;
}
error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_ENA_PROPERTY,
sc->prv.serdes_dfe_tap_ena, sizeof(sc->prv.serdes_dfe_tap_ena));
if (error > 0) {
return (error);
} else if (error < 0) {
sc->prv.serdes_dfe_tap_ena[0] = XGBE_SPEED_1000_DFE_TAP_ENABLE;
sc->prv.serdes_dfe_tap_ena[1] = XGBE_SPEED_2500_DFE_TAP_ENABLE;
sc->prv.serdes_dfe_tap_ena[2] = XGBE_SPEED_10000_DFE_TAP_ENABLE;
}
/* Check if the NIC is DMA coherent */
sc->prv.coherent = OF_hasprop(node, "dma-coherent");
if (sc->prv.coherent) {
sc->prv.axdomain = XGBE_DMA_OS_AXDOMAIN;
sc->prv.arcache = XGBE_DMA_OS_ARCACHE;
sc->prv.awcache = XGBE_DMA_OS_AWCACHE;
} else {
sc->prv.axdomain = XGBE_DMA_SYS_AXDOMAIN;
sc->prv.arcache = XGBE_DMA_SYS_ARCACHE;
sc->prv.awcache = XGBE_DMA_SYS_AWCACHE;
}
/* Create the lock & workqueues */
spin_lock_init(&sc->prv.xpcs_lock);
sc->prv.dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
taskqueue_thread_enqueue, &sc->prv.dev_workqueue);
taskqueue_start_threads(&sc->prv.dev_workqueue, 1, PI_NET,
"axgbe taskq");
/* Set the needed pointers */
xgbe_init_function_ptrs_phy(&sc->prv.phy_if);
xgbe_init_function_ptrs_dev(&sc->prv.hw_if);
xgbe_init_function_ptrs_desc(&sc->prv.desc_if);
/* Reset the hardware */
sc->prv.hw_if.exit(&sc->prv);
/* Read the hardware features */
xgbe_get_all_hw_features(&sc->prv);
/* Set default values */
sc->prv.pblx8 = DMA_PBL_X8_ENABLE;
sc->prv.tx_desc_count = XGBE_TX_DESC_CNT;
sc->prv.tx_sf_mode = MTL_TSF_ENABLE;
sc->prv.tx_threshold = MTL_TX_THRESHOLD_64;
sc->prv.tx_pbl = DMA_PBL_16;
sc->prv.tx_osp_mode = DMA_OSP_ENABLE;
sc->prv.rx_desc_count = XGBE_RX_DESC_CNT;
sc->prv.rx_sf_mode = MTL_RSF_DISABLE;
sc->prv.rx_threshold = MTL_RX_THRESHOLD_64;
sc->prv.rx_pbl = DMA_PBL_16;
sc->prv.pause_autoneg = 1;
sc->prv.tx_pause = 1;
sc->prv.rx_pause = 1;
sc->prv.phy_speed = SPEED_UNKNOWN;
sc->prv.power_down = 0;
/* TODO: Limit to min(ncpus, hw rings) */
sc->prv.tx_ring_count = 1;
sc->prv.tx_q_count = 1;
sc->prv.rx_ring_count = 1;
sc->prv.rx_q_count = sc->prv.hw_feat.rx_q_cnt;
/* Init the PHY */
sc->prv.phy_if.phy_init(&sc->prv);
/* Set the coalescing */
xgbe_init_rx_coalesce(&sc->prv);
xgbe_init_tx_coalesce(&sc->prv);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_init = axgbe_init;
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = axgbe_ioctl;
ifp->if_transmit = xgbe_xmit;
ifp->if_qflush = axgbe_qflush;
ifp->if_get_counter = axgbe_get_counter;
/* TODO: Support HW offload */
ifp->if_capabilities = 0;
ifp->if_capenable = 0;
ifp->if_hwassist = 0;
ether_ifattach(ifp, sc->mac_addr);
ifmedia_init(&sc->media, IFM_IMASK, axgbe_media_change,
axgbe_media_status);
#ifdef notyet
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
#endif
ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
set_bit(XGBE_DOWN, &sc->prv.dev_state);
if (xgbe_open(ifp) < 0) {
device_printf(dev, "ndo_open failed\n");
return (ENXIO);
}
return (0);
}
static device_method_t axgbe_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axgbe_probe),
DEVMETHOD(device_attach, axgbe_attach),
{ 0, 0 }
};
static devclass_t axgbe_devclass;
DEFINE_CLASS_0(axgbe, axgbe_driver, axgbe_methods,
sizeof(struct axgbe_softc));
DRIVER_MODULE(axgbe, simplebus, axgbe_driver, axgbe_devclass, 0, 0);
static struct ofw_compat_data phy_compat_data[] = {
{ "amd,xgbe-phy-seattle-v1a", true },
{ NULL, false }
};
static int
axgbephy_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (!ofw_bus_search_compatible(dev, phy_compat_data)->ocd_data)
return (ENXIO);
device_set_desc(dev, "AMD 10 Gigabit Ethernet");
return (BUS_PROBE_DEFAULT);
}
static int
axgbephy_attach(device_t dev)
{
phandle_t node;
node = ofw_bus_get_node(dev);
OF_device_register_xref(OF_xref_from_node(node), dev);
return (0);
}
static device_method_t axgbephy_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, axgbephy_probe),
DEVMETHOD(device_attach, axgbephy_attach),
{ 0, 0 }
};
static devclass_t axgbephy_devclass;
DEFINE_CLASS_0(axgbephy, axgbephy_driver, axgbephy_methods, 0);
EARLY_DRIVER_MODULE(axgbephy, simplebus, axgbephy_driver, axgbephy_devclass,
0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE);

View File

@ -112,11 +112,16 @@
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE. * THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/ */
#ifndef __XGBE_COMMON_H__ #ifndef __XGBE_COMMON_H__
#define __XGBE_COMMON_H__ #define __XGBE_COMMON_H__
#include <sys/bus.h>
#include <sys/rman.h>
/* DMA register offsets */ /* DMA register offsets */
#define DMA_MR 0x3000 #define DMA_MR 0x3000
#define DMA_SBMR 0x3004 #define DMA_SBMR 0x3004
@ -1123,7 +1128,7 @@ do { \
* register definitions formed using the input names * register definitions formed using the input names
*/ */
#define XGMAC_IOREAD(_pdata, _reg) \ #define XGMAC_IOREAD(_pdata, _reg) \
ioread32((_pdata)->xgmac_regs + _reg) bus_read_4((_pdata)->xgmac_res, _reg)
#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \ #define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XGMAC_IOREAD((_pdata), _reg), \ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
@ -1131,7 +1136,7 @@ do { \
_reg##_##_field##_WIDTH) _reg##_##_field##_WIDTH)
#define XGMAC_IOWRITE(_pdata, _reg, _val) \ #define XGMAC_IOWRITE(_pdata, _reg, _val) \
iowrite32((_val), (_pdata)->xgmac_regs + _reg) bus_write_4((_pdata)->xgmac_res, _reg, (_val))
#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \ #define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \ do { \
@ -1147,7 +1152,7 @@ do { \
* base register value is calculated by the queue or traffic class number * base register value is calculated by the queue or traffic class number
*/ */
#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \ #define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
ioread32((_pdata)->xgmac_regs + \ bus_read_4((_pdata)->xgmac_res, \
MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg) MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \ #define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
@ -1156,8 +1161,8 @@ do { \
_reg##_##_field##_WIDTH) _reg##_##_field##_WIDTH)
#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \ #define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
iowrite32((_val), (_pdata)->xgmac_regs + \ bus_write_4((_pdata)->xgmac_res, \
MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg) MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg, (_val))
#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \ #define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
do { \ do { \
@ -1173,7 +1178,7 @@ do { \
* base register value is obtained from the ring * base register value is obtained from the ring
*/ */
#define XGMAC_DMA_IOREAD(_channel, _reg) \ #define XGMAC_DMA_IOREAD(_channel, _reg) \
ioread32((_channel)->dma_regs + _reg) bus_space_read_4((_channel)->dma_tag, (_channel)->dma_handle, _reg)
#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \ #define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
@ -1181,7 +1186,8 @@ do { \
_reg##_##_field##_WIDTH) _reg##_##_field##_WIDTH)
#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \ #define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
iowrite32((_val), (_channel)->dma_regs + _reg) bus_space_write_4((_channel)->dma_tag, (_channel)->dma_handle, \
_reg, (_val))
#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \ #define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
do { \ do { \
@ -1196,10 +1202,10 @@ do { \
* within the register values of XPCS registers. * within the register values of XPCS registers.
*/ */
#define XPCS_IOWRITE(_pdata, _off, _val) \ #define XPCS_IOWRITE(_pdata, _off, _val) \
iowrite32(_val, (_pdata)->xpcs_regs + (_off)) bus_write_4((_pdata)->xpcs_res, (_off), _val)
#define XPCS_IOREAD(_pdata, _off) \ #define XPCS_IOREAD(_pdata, _off) \
ioread32((_pdata)->xpcs_regs + (_off)) bus_read_4((_pdata)->xpcs_res, (_off))
/* Macros for building, reading or writing register values or bits /* Macros for building, reading or writing register values or bits
* within the register values of SerDes integration registers. * within the register values of SerDes integration registers.
@ -1215,7 +1221,7 @@ do { \
_prefix##_##_field##_WIDTH, (_val)) _prefix##_##_field##_WIDTH, (_val))
#define XSIR0_IOREAD(_pdata, _reg) \ #define XSIR0_IOREAD(_pdata, _reg) \
ioread16((_pdata)->sir0_regs + _reg) bus_read_2((_pdata)->sir0_res, _reg)
#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \ #define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XSIR0_IOREAD((_pdata), _reg), \ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
@ -1223,7 +1229,7 @@ do { \
_reg##_##_field##_WIDTH) _reg##_##_field##_WIDTH)
#define XSIR0_IOWRITE(_pdata, _reg, _val) \ #define XSIR0_IOWRITE(_pdata, _reg, _val) \
iowrite16((_val), (_pdata)->sir0_regs + _reg) bus_write_2((_pdata)->sir0_res, _reg, (_val))
#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \ #define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \ do { \
@ -1235,7 +1241,7 @@ do { \
} while (0) } while (0)
#define XSIR1_IOREAD(_pdata, _reg) \ #define XSIR1_IOREAD(_pdata, _reg) \
ioread16((_pdata)->sir1_regs + _reg) bus_read_2((_pdata)->sir1_res, _reg)
#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \ #define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XSIR1_IOREAD((_pdata), _reg), \ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
@ -1243,7 +1249,7 @@ do { \
_reg##_##_field##_WIDTH) _reg##_##_field##_WIDTH)
#define XSIR1_IOWRITE(_pdata, _reg, _val) \ #define XSIR1_IOWRITE(_pdata, _reg, _val) \
iowrite16((_val), (_pdata)->sir1_regs + _reg) bus_write_2((_pdata)->sir1_res, _reg, (_val))
#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \ #define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \ do { \
@ -1258,7 +1264,7 @@ do { \
* within the register values of SerDes RxTx registers. * within the register values of SerDes RxTx registers.
*/ */
#define XRXTX_IOREAD(_pdata, _reg) \ #define XRXTX_IOREAD(_pdata, _reg) \
ioread16((_pdata)->rxtx_regs + _reg) bus_read_2((_pdata)->rxtx_res, _reg)
#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \ #define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XRXTX_IOREAD((_pdata), _reg), \ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
@ -1266,7 +1272,7 @@ do { \
_reg##_##_field##_WIDTH) _reg##_##_field##_WIDTH)
#define XRXTX_IOWRITE(_pdata, _reg, _val) \ #define XRXTX_IOWRITE(_pdata, _reg, _val) \
iowrite16((_val), (_pdata)->rxtx_regs + _reg) bus_write_2((_pdata)->rxtx_res, _reg, (_val))
#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \ #define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \ do { \

View File

@ -114,6 +114,9 @@
* THE POSSIBILITY OF SUCH DAMAGE. * THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "xgbe.h" #include "xgbe.h"
#include "xgbe-common.h" #include "xgbe-common.h"
@ -128,46 +131,30 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
if (!ring) if (!ring)
return; return;
bus_dmamap_destroy(ring->mbuf_dmat, ring->mbuf_map);
bus_dma_tag_destroy(ring->mbuf_dmat);
ring->mbuf_map = NULL;
ring->mbuf_dmat = NULL;
if (ring->rdata) { if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) { for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i); rdata = XGBE_GET_DESC_DATA(ring, i);
xgbe_unmap_rdata(pdata, rdata); xgbe_unmap_rdata(pdata, rdata);
} }
kfree(ring->rdata); free(ring->rdata, M_AXGBE);
ring->rdata = NULL; ring->rdata = NULL;
} }
if (ring->rx_hdr_pa.pages) { bus_dmamap_unload(ring->rdesc_dmat, ring->rdesc_map);
dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); bus_dma_tag_destroy(ring->rdesc_dmat);
put_page(ring->rx_hdr_pa.pages);
ring->rx_hdr_pa.pages = NULL; ring->rdesc_map = NULL;
ring->rx_hdr_pa.pages_len = 0; ring->rdesc_dmat = NULL;
ring->rx_hdr_pa.pages_offset = 0;
ring->rx_hdr_pa.pages_dma = 0;
}
if (ring->rx_buf_pa.pages) {
dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
put_page(ring->rx_buf_pa.pages);
ring->rx_buf_pa.pages = NULL;
ring->rx_buf_pa.pages_len = 0;
ring->rx_buf_pa.pages_offset = 0;
ring->rx_buf_pa.pages_dma = 0;
}
if (ring->rdesc) {
dma_free_coherent(pdata->dev,
(sizeof(struct xgbe_ring_desc) *
ring->rdesc_count),
ring->rdesc, ring->rdesc_dma);
ring->rdesc = NULL; ring->rdesc = NULL;
} }
}
static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata) static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
{ {
@ -185,32 +172,71 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_ring_resources\n"); DBGPR("<--xgbe_free_ring_resources\n");
} }
static void xgbe_ring_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg,
int error)
{
if (error)
return;
*(bus_addr_t *) arg = segs->ds_addr;
}
static int xgbe_init_ring(struct xgbe_prv_data *pdata, static int xgbe_init_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, unsigned int rdesc_count) struct xgbe_ring *ring, unsigned int rdesc_count)
{ {
bus_size_t len;
int err, flags;
DBGPR("-->xgbe_init_ring\n"); DBGPR("-->xgbe_init_ring\n");
if (!ring) if (!ring)
return 0; return 0;
flags = 0;
if (pdata->coherent)
flags = BUS_DMA_COHERENT;
/* Descriptors */ /* Descriptors */
ring->rdesc_count = rdesc_count; ring->rdesc_count = rdesc_count;
ring->rdesc = dma_alloc_coherent(pdata->dev, len = sizeof(struct xgbe_ring_desc) * rdesc_count;
(sizeof(struct xgbe_ring_desc) * err = bus_dma_tag_create(pdata->dmat, 512, 0, BUS_SPACE_MAXADDR,
rdesc_count), &ring->rdesc_dma, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, flags, NULL, NULL,
GFP_KERNEL); &ring->rdesc_dmat);
if (!ring->rdesc) if (err != 0) {
return -ENOMEM; printf("Unable to create the DMA tag: %d\n", err);
return -err;
}
err = bus_dmamem_alloc(ring->rdesc_dmat, (void **)&ring->rdesc,
BUS_DMA_WAITOK | BUS_DMA_COHERENT, &ring->rdesc_map);
if (err != 0) {
bus_dma_tag_destroy(ring->rdesc_dmat);
printf("Unable to allocate DMA memory: %d\n", err);
return -err;
}
err = bus_dmamap_load(ring->rdesc_dmat, ring->rdesc_map, ring->rdesc,
len, xgbe_ring_dmamap_cb, &ring->rdesc_paddr, 0);
if (err != 0) {
bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
bus_dma_tag_destroy(ring->rdesc_dmat);
printf("Unable to load DMA memory\n");
return -err;
}
/* Descriptor information */ /* Descriptor information */
ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data), ring->rdata = malloc(rdesc_count * sizeof(struct xgbe_ring_data),
GFP_KERNEL); M_AXGBE, M_WAITOK | M_ZERO);
if (!ring->rdata)
return -ENOMEM;
netif_dbg(pdata, drv, pdata->netdev, /* Create the space DMA tag for mbufs */
"rdesc=%p, rdesc_dma=%pad, rdata=%p\n", err = bus_dma_tag_create(pdata->dmat, 1, 0, BUS_SPACE_MAXADDR,
ring->rdesc, &ring->rdesc_dma, ring->rdata); BUS_SPACE_MAXADDR, NULL, NULL, XGBE_TX_MAX_BUF_SIZE * rdesc_count,
rdesc_count, XGBE_TX_MAX_BUF_SIZE, flags, NULL, NULL,
&ring->mbuf_dmat);
if (err != 0)
return -err;
err = bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
if (err != 0)
return -err;
DBGPR("<--xgbe_init_ring\n"); DBGPR("<--xgbe_init_ring\n");
@ -227,25 +253,17 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
channel = pdata->channel; channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) { for (i = 0; i < pdata->channel_count; i++, channel++) {
netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
channel->name);
ret = xgbe_init_ring(pdata, channel->tx_ring, ret = xgbe_init_ring(pdata, channel->tx_ring,
pdata->tx_desc_count); pdata->tx_desc_count);
if (ret) { if (ret) {
netdev_alert(pdata->netdev, printf("error initializing Tx ring\n");
"error initializing Tx ring\n");
goto err_ring; goto err_ring;
} }
netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
channel->name);
ret = xgbe_init_ring(pdata, channel->rx_ring, ret = xgbe_init_ring(pdata, channel->rx_ring,
pdata->rx_desc_count); pdata->rx_desc_count);
if (ret) { if (ret) {
netdev_alert(pdata->netdev, printf("error initializing Rx ring\n");
"error initializing Rx ring\n");
goto err_ring; goto err_ring;
} }
} }
@ -260,93 +278,58 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
return ret; return ret;
} }
static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct xgbe_page_alloc *pa, gfp_t gfp, int order)
{
struct page *pages = NULL;
dma_addr_t pages_dma;
int ret;
/* Try to obtain pages, decreasing order if necessary */
gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
break;
order--;
}
if (!pages)
return -ENOMEM;
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
ret = dma_mapping_error(pdata->dev, pages_dma);
if (ret) {
put_page(pages);
return ret;
}
pa->pages = pages;
pa->pages_len = PAGE_SIZE << order;
pa->pages_offset = 0;
pa->pages_dma = pages_dma;
return 0;
}
static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
struct xgbe_page_alloc *pa,
unsigned int len)
{
get_page(pa->pages);
bd->pa = *pa;
bd->dma_base = pa->pages_dma;
bd->dma_off = pa->pages_offset;
bd->dma_len = len;
pa->pages_offset += len;
if ((pa->pages_offset + len) > pa->pages_len) {
/* This data descriptor is responsible for unmapping page(s) */
bd->pa_unmap = *pa;
/* Get a new allocation next time */
pa->pages = NULL;
pa->pages_len = 0;
pa->pages_offset = 0;
pa->pages_dma = 0;
}
}
static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, struct xgbe_ring *ring,
struct xgbe_ring_data *rdata) struct xgbe_ring_data *rdata)
{ {
int order, ret; bus_dmamap_t mbuf_map;
bus_dma_segment_t segs[2];
struct mbuf *m0, *m1;
int err, nsegs;
if (!ring->rx_hdr_pa.pages) { m0 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); if (m0 == NULL)
if (ret) return (-ENOBUFS);
return ret;
m1 = m_getjcl(M_NOWAIT, MT_DATA, 0, MCLBYTES);
if (m1 == NULL) {
m_freem(m0);
return (-ENOBUFS);
} }
if (!ring->rx_buf_pa.pages) { m0->m_next = m1;
order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); m0->m_flags |= M_PKTHDR;
ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, m0->m_len = MHLEN;
order); m0->m_pkthdr.len = MHLEN + MCLBYTES;
if (ret)
return ret; m1->m_len = MCLBYTES;
m1->m_next = NULL;
m1->m_pkthdr.len = MCLBYTES;
err = bus_dmamap_create(ring->mbuf_dmat, 0, &mbuf_map);
if (err != 0) {
m_freem(m0);
return (-err);
} }
/* Set up the header page info */ err = bus_dmamap_load_mbuf_sg(ring->mbuf_dmat, mbuf_map, m0, segs,
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, &nsegs, BUS_DMA_NOWAIT);
XGBE_SKB_ALLOC_SIZE); if (err != 0) {
m_freem(m0);
bus_dmamap_destroy(ring->mbuf_dmat, mbuf_map);
return (-err);
}
/* Set up the buffer page info */ KASSERT(nsegs == 2,
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, ("xgbe_map_rx_buffer: Unable to handle multiple segments %d",
pdata->rx_buf_size); nsegs));
rdata->mb = m0;
rdata->mbuf_free = 0;
rdata->mbuf_dmat = ring->mbuf_dmat;
rdata->mbuf_map = mbuf_map;
rdata->mbuf_hdr_paddr = segs[0].ds_addr;
rdata->mbuf_data_paddr = segs[1].ds_addr;
return 0; return 0;
} }
@ -358,7 +341,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring; struct xgbe_ring *ring;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc; struct xgbe_ring_desc *rdesc;
dma_addr_t rdesc_dma; bus_addr_t rdesc_paddr;
unsigned int i, j; unsigned int i, j;
DBGPR("-->xgbe_wrapper_tx_descriptor_init\n"); DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
@ -370,16 +353,16 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
break; break;
rdesc = ring->rdesc; rdesc = ring->rdesc;
rdesc_dma = ring->rdesc_dma; rdesc_paddr = ring->rdesc_paddr;
for (j = 0; j < ring->rdesc_count; j++) { for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j); rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc; rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma; rdata->rdata_paddr = rdesc_paddr;
rdesc++; rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc); rdesc_paddr += sizeof(struct xgbe_ring_desc);
} }
ring->cur = 0; ring->cur = 0;
@ -399,7 +382,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
struct xgbe_ring *ring; struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc; struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
dma_addr_t rdesc_dma; bus_addr_t rdesc_paddr;
unsigned int i, j; unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@ -411,19 +394,19 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
break; break;
rdesc = ring->rdesc; rdesc = ring->rdesc;
rdesc_dma = ring->rdesc_dma; rdesc_paddr = ring->rdesc_paddr;
for (j = 0; j < ring->rdesc_count; j++) { for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j); rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc; rdata->rdesc = rdesc;
rdata->rdesc_dma = rdesc_dma; rdata->rdata_paddr = rdesc_paddr;
if (xgbe_map_rx_buffer(pdata, ring, rdata)) if (xgbe_map_rx_buffer(pdata, ring, rdata))
break; break;
rdesc++; rdesc++;
rdesc_dma += sizeof(struct xgbe_ring_desc); rdesc_paddr += sizeof(struct xgbe_ring_desc);
} }
ring->cur = 0; ring->cur = 0;
@ -431,78 +414,81 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
hw_if->rx_desc_init(channel); hw_if->rx_desc_init(channel);
} }
DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
} }
static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata, static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata) struct xgbe_ring_data *rdata)
{ {
if (rdata->skb_dma) {
if (rdata->mapped_as_page) {
dma_unmap_page(pdata->dev, rdata->skb_dma,
rdata->skb_dma_len, DMA_TO_DEVICE);
} else {
dma_unmap_single(pdata->dev, rdata->skb_dma,
rdata->skb_dma_len, DMA_TO_DEVICE);
}
rdata->skb_dma = 0;
rdata->skb_dma_len = 0;
}
if (rdata->skb) { if (rdata->mbuf_map != NULL)
dev_kfree_skb_any(rdata->skb); bus_dmamap_destroy(rdata->mbuf_dmat, rdata->mbuf_map);
rdata->skb = NULL;
}
if (rdata->rx.hdr.pa.pages) if (rdata->mbuf_free)
put_page(rdata->rx.hdr.pa.pages); m_freem(rdata->mb);
if (rdata->rx.hdr.pa_unmap.pages) { rdata->mb = NULL;
dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma, rdata->mbuf_free = 0;
rdata->rx.hdr.pa_unmap.pages_len, rdata->mbuf_hdr_paddr = 0;
DMA_FROM_DEVICE); rdata->mbuf_data_paddr = 0;
put_page(rdata->rx.hdr.pa_unmap.pages); rdata->mbuf_len = 0;
}
if (rdata->rx.buf.pa.pages)
put_page(rdata->rx.buf.pa.pages);
if (rdata->rx.buf.pa_unmap.pages) {
dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
rdata->rx.buf.pa_unmap.pages_len,
DMA_FROM_DEVICE);
put_page(rdata->rx.buf.pa_unmap.pages);
}
memset(&rdata->tx, 0, sizeof(rdata->tx)); memset(&rdata->tx, 0, sizeof(rdata->tx));
memset(&rdata->rx, 0, sizeof(rdata->rx)); memset(&rdata->rx, 0, sizeof(rdata->rx));
rdata->mapped_as_page = 0;
if (rdata->state_saved) {
rdata->state_saved = 0;
rdata->state.skb = NULL;
rdata->state.len = 0;
rdata->state.error = 0;
}
} }
static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) struct xgbe_map_tx_skb_data {
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
unsigned int cur_index;
};
static void xgbe_map_tx_skb_cb(void *callback_arg, bus_dma_segment_t *segs,
int nseg, bus_size_t mapsize, int error)
{
struct xgbe_map_tx_skb_data *data;
struct xgbe_ring_data *rdata;
struct xgbe_ring *ring;
int i;
if (error != 0)
return;
data = callback_arg;
ring = data->ring;
for (i = 0; i < nseg; i++) {
rdata = XGBE_GET_DESC_DATA(ring, data->cur_index);
KASSERT(segs[i].ds_len <= XGBE_TX_MAX_BUF_SIZE,
("%s: Segment size is too large %ld > %d", __func__,
segs[i].ds_len, XGBE_TX_MAX_BUF_SIZE));
if (i == 0) {
rdata->mbuf_dmat = ring->mbuf_dmat;
bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
}
rdata->mbuf_hdr_paddr = 0;
rdata->mbuf_data_paddr = segs[i].ds_addr;
rdata->mbuf_len = segs[i].ds_len;
data->packet->length += rdata->mbuf_len;
data->cur_index++;
}
}
static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct mbuf *m)
{ {
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_map_tx_skb_data cbdata;
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet; struct xgbe_packet_data *packet;
struct skb_frag_struct *frag;
dma_addr_t skb_dma;
unsigned int start_index, cur_index; unsigned int start_index, cur_index;
unsigned int offset, tso, vlan, datalen, len; int err;
unsigned int i;
DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur); DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
offset = 0;
start_index = ring->cur; start_index = ring->cur;
cur_index = ring->cur; cur_index = ring->cur;
@ -510,105 +496,24 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
packet->rdesc_count = 0; packet->rdesc_count = 0;
packet->length = 0; packet->length = 0;
tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, cbdata.ring = ring;
TSO_ENABLE); cbdata.packet = packet;
vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, cbdata.cur_index = cur_index;
VLAN_CTAG);
/* Save space for a context descriptor if needed */ err = bus_dmamap_load_mbuf(ring->mbuf_dmat, ring->mbuf_map, m,
if ((tso && (packet->mss != ring->tx.cur_mss)) || xgbe_map_tx_skb_cb, &cbdata, BUS_DMA_NOWAIT);
(vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) if (err != 0) /* TODO: Undo the mapping */
cur_index++; return (-err);
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
if (tso) { cur_index = cbdata.cur_index;
/* Map the TSO header */
skb_dma = dma_map_single(pdata->dev, skb->data,
packet->header_len, DMA_TO_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev, "dma_map_single failed\n");
goto err_out;
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = packet->header_len;
netif_dbg(pdata, tx_queued, pdata->netdev,
"skb header: index=%u, dma=%pad, len=%u\n",
cur_index, &skb_dma, packet->header_len);
offset = packet->header_len; /* Save the mbuf address in the last entry. We always have some data
packet->length += packet->header_len;
cur_index++;
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
/* Map the (remainder of the) packet */
for (datalen = skb_headlen(skb) - offset; datalen; ) {
len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
DMA_TO_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev, "dma_map_single failed\n");
goto err_out;
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
netif_dbg(pdata, tx_queued, pdata->netdev,
"skb data: index=%u, dma=%pad, len=%u\n",
cur_index, &skb_dma, len);
datalen -= len;
offset += len;
packet->length += len;
cur_index++;
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
netif_dbg(pdata, tx_queued, pdata->netdev,
"mapping frag %u\n", i);
frag = &skb_shinfo(skb)->frags[i];
offset = 0;
for (datalen = skb_frag_size(frag); datalen; ) {
len = min_t(unsigned int, datalen,
XGBE_TX_MAX_BUF_SIZE);
skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
len, DMA_TO_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
netdev_alert(pdata->netdev,
"skb_frag_dma_map failed\n");
goto err_out;
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
rdata->mapped_as_page = 1;
netif_dbg(pdata, tx_queued, pdata->netdev,
"skb frag: index=%u, dma=%pad, len=%u\n",
cur_index, &skb_dma, len);
datalen -= len;
offset += len;
packet->length += len;
cur_index++;
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
}
}
/* Save the skb address in the last entry. We always have some data
* that has been mapped so rdata is always advanced past the last * that has been mapped so rdata is always advanced past the last
* piece of mapped data - use the entry pointed to by cur_index - 1. * piece of mapped data - use the entry pointed to by cur_index - 1.
*/ */
rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
rdata->skb = skb; rdata->mb = m;
rdata->mbuf_free = 1;
/* Save the number of descriptor entries used */ /* Save the number of descriptor entries used */
packet->rdesc_count = cur_index - start_index; packet->rdesc_count = cur_index - start_index;
@ -616,16 +521,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count); DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
return packet->rdesc_count; return packet->rdesc_count;
err_out:
while (start_index < cur_index) {
rdata = XGBE_GET_DESC_DATA(ring, start_index++);
xgbe_unmap_rdata(pdata, rdata);
}
DBGPR("<--xgbe_map_tx_skb: count=0\n");
return 0;
} }
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -114,17 +114,17 @@
* THE POSSIBILITY OF SUCH DAMAGE. * THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <linux/module.h> #include <sys/cdefs.h>
#include <linux/kmod.h> __FBSDID("$FreeBSD$");
#include <linux/mdio.h>
#include <linux/phy.h> #include <sys/param.h>
#include <linux/of.h> #include <sys/kernel.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include "xgbe.h" #include "xgbe.h"
#include "xgbe-common.h" #include "xgbe-common.h"
static void xgbe_an_state_machine(struct xgbe_prv_data *pdata);
static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata) static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
{ {
unsigned int reg; unsigned int reg;
@ -154,7 +154,7 @@ static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
reg |= MDIO_CTRL1_LPOWER; reg |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
usleep_range(75, 100); DELAY(75);
reg &= ~MDIO_CTRL1_LPOWER; reg &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
@ -177,7 +177,7 @@ static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
/* Wait for Rx and Tx ready */ /* Wait for Rx and Tx ready */
wait = XGBE_RATECHANGE_COUNT; wait = XGBE_RATECHANGE_COUNT;
while (wait--) { while (wait--) {
usleep_range(50, 75); DELAY(50);
status = XSIR0_IOREAD(pdata, SIR0_STATUS); status = XSIR0_IOREAD(pdata, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
@ -185,9 +185,6 @@ static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
goto rx_reset; goto rx_reset;
} }
netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
status);
rx_reset: rx_reset:
/* Perform Rx reset for the DFE changes */ /* Perform Rx reset for the DFE changes */
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0); XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
@ -238,8 +235,6 @@ static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]); pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
xgbe_serdes_complete_ratechange(pdata); xgbe_serdes_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
} }
static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata) static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
@ -286,8 +281,6 @@ static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]); pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
xgbe_serdes_complete_ratechange(pdata); xgbe_serdes_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
} }
static void xgbe_gmii_mode(struct xgbe_prv_data *pdata) static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
@ -334,8 +327,6 @@ static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]); pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
xgbe_serdes_complete_ratechange(pdata); xgbe_serdes_complete_ratechange(pdata);
netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
} }
static void xgbe_cur_mode(struct xgbe_prv_data *pdata, static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
@ -440,15 +431,11 @@ static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
static void xgbe_restart_an(struct xgbe_prv_data *pdata) static void xgbe_restart_an(struct xgbe_prv_data *pdata)
{ {
xgbe_set_an(pdata, true, true); xgbe_set_an(pdata, true, true);
netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
} }
static void xgbe_disable_an(struct xgbe_prv_data *pdata) static void xgbe_disable_an(struct xgbe_prv_data *pdata)
{ {
xgbe_set_an(pdata, false, false); xgbe_set_an(pdata, false, false);
netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
} }
static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata, static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
@ -483,9 +470,6 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
reg); reg);
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0); XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
} }
return XGBE_AN_PAGE_RECEIVED; return XGBE_AN_PAGE_RECEIVED;
@ -554,19 +538,16 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
enum xgbe_an ret; enum xgbe_an ret;
if (!pdata->an_start) { if (!pdata->an_start) {
pdata->an_start = jiffies; pdata->an_start = ticks;
} else { } else {
an_timeout = pdata->an_start + an_timeout = pdata->an_start +
msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); ((uint64_t)XGBE_AN_MS_TIMEOUT * (uint64_t)hz) / 1000ull;
if (time_after(jiffies, an_timeout)) { if ((int)(ticks - an_timeout) > 0) {
/* Auto-negotiation timed out, reset state */ /* Auto-negotiation timed out, reset state */
pdata->kr_state = XGBE_RX_BPA; pdata->kr_state = XGBE_RX_BPA;
pdata->kx_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = jiffies; pdata->an_start = ticks;
netif_dbg(pdata, link, pdata->netdev,
"AN timed out, resetting state\n");
} }
} }
@ -620,12 +601,10 @@ static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
return XGBE_AN_INCOMPAT_LINK; return XGBE_AN_INCOMPAT_LINK;
} }
static irqreturn_t xgbe_an_isr(int irq, void *data) static void xgbe_an_isr(void *data)
{ {
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
/* Disable AN interrupts */ /* Disable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
@ -636,57 +615,19 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
/* Clear the interrupt(s) that fired and process them */ /* Clear the interrupt(s) that fired and process them */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
queue_work(pdata->an_workqueue, &pdata->an_irq_work); xgbe_an_state_machine(pdata);
} else { } else {
/* Enable AN interrupts */ /* Enable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
XGBE_AN_INT_MASK); XGBE_AN_INT_MASK);
} }
return IRQ_HANDLED;
} }
static void xgbe_an_irq_work(struct work_struct *work) static void xgbe_an_state_machine(struct xgbe_prv_data *pdata)
{ {
struct xgbe_prv_data *pdata = container_of(work,
struct xgbe_prv_data,
an_irq_work);
/* Avoid a race between enabling the IRQ and exiting the work by
* waiting for the work to finish and then queueing it
*/
flush_work(&pdata->an_work);
queue_work(pdata->an_workqueue, &pdata->an_work);
}
static const char *xgbe_state_as_string(enum xgbe_an state)
{
switch (state) {
case XGBE_AN_READY:
return "Ready";
case XGBE_AN_PAGE_RECEIVED:
return "Page-Received";
case XGBE_AN_INCOMPAT_LINK:
return "Incompatible-Link";
case XGBE_AN_COMPLETE:
return "Complete";
case XGBE_AN_NO_LINK:
return "No-Link";
case XGBE_AN_ERROR:
return "Error";
default:
return "Undefined";
}
}
static void xgbe_an_state_machine(struct work_struct *work)
{
struct xgbe_prv_data *pdata = container_of(work,
struct xgbe_prv_data,
an_work);
enum xgbe_an cur_state = pdata->an_state; enum xgbe_an cur_state = pdata->an_state;
mutex_lock(&pdata->an_mutex); sx_xlock(&pdata->an_mutex);
if (!pdata->an_int) if (!pdata->an_int)
goto out; goto out;
@ -708,9 +649,6 @@ static void xgbe_an_state_machine(struct work_struct *work)
pdata->an_result = pdata->an_state; pdata->an_result = pdata->an_state;
again: again:
netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
xgbe_state_as_string(pdata->an_state));
cur_state = pdata->an_state; cur_state = pdata->an_state;
switch (pdata->an_state) { switch (pdata->an_state) {
@ -731,9 +669,6 @@ static void xgbe_an_state_machine(struct work_struct *work)
case XGBE_AN_COMPLETE: case XGBE_AN_COMPLETE:
pdata->parallel_detect = pdata->an_supported ? 0 : 1; pdata->parallel_detect = pdata->an_supported ? 0 : 1;
netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
pdata->an_supported ? "Auto negotiation"
: "Parallel detection");
break; break;
case XGBE_AN_NO_LINK: case XGBE_AN_NO_LINK:
@ -747,10 +682,6 @@ static void xgbe_an_state_machine(struct work_struct *work)
pdata->an_int = 0; pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
} else if (pdata->an_state == XGBE_AN_ERROR) { } else if (pdata->an_state == XGBE_AN_ERROR) {
netdev_err(pdata->netdev,
"error during auto-negotiation, state=%u\n",
cur_state);
pdata->an_int = 0; pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
} }
@ -761,9 +692,6 @@ static void xgbe_an_state_machine(struct work_struct *work)
pdata->kr_state = XGBE_RX_BPA; pdata->kr_state = XGBE_RX_BPA;
pdata->kx_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0; pdata->an_start = 0;
netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
} }
if (cur_state != pdata->an_state) if (cur_state != pdata->an_state)
@ -776,7 +704,7 @@ static void xgbe_an_state_machine(struct work_struct *work)
/* Enable AN interrupts on the way out */ /* Enable AN interrupts on the way out */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
mutex_unlock(&pdata->an_mutex); sx_xunlock(&pdata->an_mutex);
} }
static void xgbe_an_init(struct xgbe_prv_data *pdata) static void xgbe_an_init(struct xgbe_prv_data *pdata)
@ -785,9 +713,6 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
/* Set up Advertisement register 3 first */ /* Set up Advertisement register 3 first */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
reg |= 0xc000;
else
reg &= ~0xc000; reg &= ~0xc000;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
@ -823,48 +748,6 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
reg &= ~XGBE_XNP_NP_EXCHANGE; reg &= ~XGBE_XNP_NP_EXCHANGE;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
}
static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
{
if (pdata->tx_pause && pdata->rx_pause)
return "rx/tx";
else if (pdata->rx_pause)
return "rx";
else if (pdata->tx_pause)
return "tx";
else
return "off";
}
static const char *xgbe_phy_speed_string(int speed)
{
switch (speed) {
case SPEED_1000:
return "1Gbps";
case SPEED_2500:
return "2.5Gbps";
case SPEED_10000:
return "10Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
return "Unsupported";
}
}
static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
{
if (pdata->phy.link)
netdev_info(pdata->netdev,
"Link is Up - %s/%s - flow control %s\n",
xgbe_phy_speed_string(pdata->phy.speed),
pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
xgbe_phy_fc_string(pdata));
else
netdev_info(pdata->netdev, "Link is Down\n");
} }
static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
@ -902,14 +785,10 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
pdata->phy_link = 0; pdata->phy_link = 0;
pdata->phy_speed = SPEED_UNKNOWN; pdata->phy_speed = SPEED_UNKNOWN;
} }
if (new_state && netif_msg_link(pdata))
xgbe_phy_print_status(pdata);
} }
static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
{ {
netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
/* Disable auto-negotiation */ /* Disable auto-negotiation */
xgbe_disable_an(pdata); xgbe_disable_an(pdata);
@ -939,15 +818,16 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{ {
set_bit(XGBE_LINK_INIT, &pdata->dev_state); set_bit(XGBE_LINK_INIT, &pdata->dev_state);
pdata->link_check = jiffies; pdata->link_check = ticks;
if (pdata->phy.autoneg != AUTONEG_ENABLE) if (pdata->phy.autoneg != AUTONEG_ENABLE)
return xgbe_phy_config_fixed(pdata); return xgbe_phy_config_fixed(pdata);
netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
/* Disable auto-negotiation interrupt */ /* Disable auto-negotiation interrupt */
disable_irq(pdata->an_irq); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
/* Clear any auto-negotitation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
/* Start auto-negotiation in a supported mode */ /* Start auto-negotiation in a supported mode */
if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) { if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
@ -956,7 +836,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) { (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
xgbe_set_mode(pdata, XGBE_MODE_KX); xgbe_set_mode(pdata, XGBE_MODE_KX);
} else { } else {
enable_irq(pdata->an_irq); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
return -EINVAL; return -EINVAL;
} }
@ -972,7 +852,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
pdata->kx_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA;
/* Re-enable auto-negotiation interrupt */ /* Re-enable auto-negotiation interrupt */
enable_irq(pdata->an_irq); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
/* Set up advertisement registers based on current settings */ /* Set up advertisement registers based on current settings */
xgbe_an_init(pdata); xgbe_an_init(pdata);
@ -987,7 +867,7 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{ {
int ret; int ret;
mutex_lock(&pdata->an_mutex); sx_xlock(&pdata->an_mutex);
ret = __xgbe_phy_config_aneg(pdata); ret = __xgbe_phy_config_aneg(pdata);
if (ret) if (ret)
@ -995,7 +875,7 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
else else
clear_bit(XGBE_LINK_ERR, &pdata->dev_state); clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
mutex_unlock(&pdata->an_mutex); sx_unlock(&pdata->an_mutex);
return ret; return ret;
} }
@ -1009,9 +889,8 @@ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{ {
unsigned long link_timeout; unsigned long link_timeout;
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ); link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * hz);
if (time_after(jiffies, link_timeout)) { if ((int)(ticks - link_timeout) >= 0) {
netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
xgbe_phy_config_aneg(pdata); xgbe_phy_config_aneg(pdata);
} }
} }
@ -1109,10 +988,6 @@ static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
/* Compare Advertisement and Link Partner register 3 */ /* Compare Advertisement and Link Partner register 3 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
if (lp_reg & 0xc000)
pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
pdata->phy.duplex = DUPLEX_FULL;
} }
static void xgbe_phy_status(struct xgbe_prv_data *pdata) static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@ -1120,8 +995,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
unsigned int reg, link_aneg; unsigned int reg, link_aneg;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) { if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
netif_carrier_off(pdata->netdev);
pdata->phy.link = 0; pdata->phy.link = 0;
goto adjust_link; goto adjust_link;
} }
@ -1145,8 +1018,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state); clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
netif_carrier_on(pdata->netdev);
} else { } else {
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) { if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
xgbe_check_link_timeout(pdata); xgbe_check_link_timeout(pdata);
@ -1156,8 +1027,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
} }
xgbe_phy_status_aneg(pdata); xgbe_phy_status_aneg(pdata);
netif_carrier_off(pdata->netdev);
} }
adjust_link: adjust_link:
@ -1166,7 +1035,6 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
static void xgbe_phy_stop(struct xgbe_prv_data *pdata) static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{ {
netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
/* Disable auto-negotiation */ /* Disable auto-negotiation */
xgbe_disable_an(pdata); xgbe_disable_an(pdata);
@ -1174,27 +1042,22 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation interrupts */ /* Disable auto-negotiation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
devm_free_irq(pdata->dev, pdata->an_irq, pdata); bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
pdata->phy.link = 0; pdata->phy.link = 0;
netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata); xgbe_phy_adjust_link(pdata);
} }
static int xgbe_phy_start(struct xgbe_prv_data *pdata) static int xgbe_phy_start(struct xgbe_prv_data *pdata)
{ {
struct net_device *netdev = pdata->netdev;
int ret; int ret;
netif_dbg(pdata, link, pdata->netdev, "starting PHY\n"); ret = bus_setup_intr(pdata->dev, pdata->an_irq_res,
INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_an_isr, pdata,
ret = devm_request_irq(pdata->dev, pdata->an_irq, &pdata->an_irq_tag);
xgbe_an_isr, 0, pdata->an_name,
pdata);
if (ret) { if (ret) {
netdev_err(netdev, "phy irq request failed\n"); return -ret;
return ret;
} }
/* Set initial mode - call the mode setting routines /* Set initial mode - call the mode setting routines
@ -1220,7 +1083,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
return xgbe_phy_config_aneg(pdata); return xgbe_phy_config_aneg(pdata);
err_irq: err_irq:
devm_free_irq(pdata->dev, pdata->an_irq, pdata); bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
return ret; return ret;
} }
@ -1235,7 +1098,7 @@ static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
count = 50; count = 50;
do { do {
msleep(20); DELAY(20);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
} while ((reg & MDIO_CTRL1_RESET) && --count); } while ((reg & MDIO_CTRL1_RESET) && --count);
@ -1251,50 +1114,9 @@ static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
return 0; return 0;
} }
static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
MDIO_AN_ADVERTISE,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
MDIO_AN_ADVERTISE + 1,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
MDIO_AN_ADVERTISE + 2,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
MDIO_AN_COMP_STAT,
XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
dev_dbg(dev, "\n*************************************************\n");
}
static void xgbe_phy_init(struct xgbe_prv_data *pdata) static void xgbe_phy_init(struct xgbe_prv_data *pdata)
{ {
mutex_init(&pdata->an_mutex); sx_init(&pdata->an_mutex, "axgbe AN lock");
INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
pdata->mdio_mmd = MDIO_MMD_PCS; pdata->mdio_mmd = MDIO_MMD_PCS;
/* Initialize supported features */ /* Initialize supported features */
@ -1343,9 +1165,6 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata)
if (pdata->tx_pause) if (pdata->tx_pause)
pdata->phy.advertising ^= ADVERTISED_Asym_Pause; pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
if (netif_msg_drv(pdata))
xgbe_dump_phy_registers(pdata);
} }
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)

View File

@ -112,21 +112,17 @@
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE. * THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/ */
#ifndef __XGBE_H__ #ifndef __XGBE_H__
#define __XGBE_H__ #define __XGBE_H__
#include <linux/dma-mapping.h> #include "xgbe_osdep.h"
#include <linux/netdevice.h>
#include <linux/workqueue.h> /* From linux/dcbnl.h */
#include <linux/phy.h> #define IEEE_8021QAZ_MAX_TCS 8
#include <linux/if_vlan.h>
#include <linux/bitops.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <net/dcbnl.h>
#define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.2" #define XGBE_DRV_VERSION "1.0.2"
@ -151,7 +147,7 @@
*/ */
#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2) #define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) #define XGBE_RX_MIN_BUF_SIZE 1522
#define XGBE_RX_BUF_ALIGN 64 #define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256 #define XGBE_SKB_ALLOC_SIZE 256
#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */ #define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
@ -288,7 +284,7 @@
struct xgbe_prv_data; struct xgbe_prv_data;
struct xgbe_packet_data { struct xgbe_packet_data {
struct sk_buff *skb; struct mbuf *m;
unsigned int attributes; unsigned int attributes;
@ -297,18 +293,8 @@ struct xgbe_packet_data {
unsigned int rdesc_count; unsigned int rdesc_count;
unsigned int length; unsigned int length;
unsigned int header_len;
unsigned int tcp_header_len;
unsigned int tcp_payload_len;
unsigned short mss;
unsigned short vlan_ctag;
u64 rx_tstamp; u64 rx_tstamp;
u32 rss_hash;
enum pkt_hash_types rss_hash_type;
unsigned int tx_packets; unsigned int tx_packets;
unsigned int tx_bytes; unsigned int tx_bytes;
}; };
@ -321,25 +307,6 @@ struct xgbe_ring_desc {
__le32 desc3; __le32 desc3;
}; };
/* Page allocation related values */
struct xgbe_page_alloc {
struct page *pages;
unsigned int pages_len;
unsigned int pages_offset;
dma_addr_t pages_dma;
};
/* Ring entry buffer data */
struct xgbe_buffer_data {
struct xgbe_page_alloc pa;
struct xgbe_page_alloc pa_unmap;
dma_addr_t dma_base;
unsigned long dma_off;
unsigned int dma_len;
};
/* Tx-related ring data */ /* Tx-related ring data */
struct xgbe_tx_ring_data { struct xgbe_tx_ring_data {
unsigned int packets; /* BQL packet count */ unsigned int packets; /* BQL packet count */
@ -348,9 +315,6 @@ struct xgbe_tx_ring_data {
/* Rx-related ring data */ /* Rx-related ring data */
struct xgbe_rx_ring_data { struct xgbe_rx_ring_data {
struct xgbe_buffer_data hdr; /* Header locations */
struct xgbe_buffer_data buf; /* Payload locations */
unsigned short hdr_len; /* Length of received header */ unsigned short hdr_len; /* Length of received header */
unsigned short len; /* Length of received packet */ unsigned short len; /* Length of received packet */
}; };
@ -361,28 +325,19 @@ struct xgbe_rx_ring_data {
*/ */
struct xgbe_ring_data { struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */ struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
dma_addr_t rdesc_dma; /* DMA address of descriptor */ bus_addr_t rdata_paddr;
struct sk_buff *skb; /* Virtual address of SKB */ bus_dma_tag_t mbuf_dmat;
dma_addr_t skb_dma; /* DMA address of SKB data */ bus_dmamap_t mbuf_map;
unsigned int skb_dma_len; /* Length of SKB DMA area */ bus_addr_t mbuf_hdr_paddr;
bus_addr_t mbuf_data_paddr;
bus_size_t mbuf_len;
int mbuf_free;
struct mbuf *mb;
struct xgbe_tx_ring_data tx; /* Tx-related data */ struct xgbe_tx_ring_data tx; /* Tx-related data */
struct xgbe_rx_ring_data rx; /* Rx-related data */ struct xgbe_rx_ring_data rx; /* Rx-related data */
unsigned int mapped_as_page;
/* Incomplete receive save location. If the budget is exhausted
* or the last descriptor (last normal descriptor or a following
* context descriptor) has not been DMA'd yet the current state
* of the receive processing needs to be saved.
*/
unsigned int state_saved;
struct {
struct sk_buff *skb;
unsigned int len;
unsigned int error;
} state;
}; };
struct xgbe_ring { struct xgbe_ring {
@ -394,18 +349,19 @@ struct xgbe_ring {
/* Virtual/DMA addresses and count of allocated descriptor memory */ /* Virtual/DMA addresses and count of allocated descriptor memory */
struct xgbe_ring_desc *rdesc; struct xgbe_ring_desc *rdesc;
dma_addr_t rdesc_dma; bus_dmamap_t rdesc_map;
bus_dma_tag_t rdesc_dmat;
bus_addr_t rdesc_paddr;
unsigned int rdesc_count; unsigned int rdesc_count;
bus_dma_tag_t mbuf_dmat;
bus_dmamap_t mbuf_map;
/* Array of descriptor data corresponding the descriptor memory /* Array of descriptor data corresponding the descriptor memory
* (always use the XGBE_GET_DESC_DATA macro to access this data) * (always use the XGBE_GET_DESC_DATA macro to access this data)
*/ */
struct xgbe_ring_data *rdata; struct xgbe_ring_data *rdata;
/* Page allocation for RX buffers */
struct xgbe_page_alloc rx_hdr_pa;
struct xgbe_page_alloc rx_buf_pa;
/* Ring index values /* Ring index values
* cur - Tx: index of descriptor to be used for current transfer * cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability * Rx: index of descriptor to check for packet availability
@ -426,7 +382,7 @@ struct xgbe_ring {
unsigned short cur_vlan_ctag; unsigned short cur_vlan_ctag;
} tx; } tx;
}; };
} ____cacheline_aligned; } __aligned(CACHE_LINE_SIZE);
/* Structure used to describe the descriptor rings associated with /* Structure used to describe the descriptor rings associated with
* a DMA channel. * a DMA channel.
@ -439,23 +395,18 @@ struct xgbe_channel {
/* Queue index and base address of queue's DMA registers */ /* Queue index and base address of queue's DMA registers */
unsigned int queue_index; unsigned int queue_index;
void __iomem *dma_regs; bus_space_tag_t dma_tag;
bus_space_handle_t dma_handle;
/* Per channel interrupt irq number */ /* Per channel interrupt irq number */
int dma_irq; struct resource *dma_irq_res;
char dma_irq_name[IFNAMSIZ + 32]; void *dma_irq_tag;
/* Netdev related settings */
struct napi_struct napi;
unsigned int saved_ier; unsigned int saved_ier;
unsigned int tx_timer_active;
struct timer_list tx_timer;
struct xgbe_ring *tx_ring; struct xgbe_ring *tx_ring;
struct xgbe_ring *rx_ring; struct xgbe_ring *rx_ring;
} ____cacheline_aligned; } __aligned(CACHE_LINE_SIZE);
enum xgbe_state { enum xgbe_state {
XGBE_DOWN, XGBE_DOWN,
@ -664,24 +615,8 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *); void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *); void (*read_mmc_stats)(struct xgbe_prv_data *);
/* For Timestamp config */
int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
unsigned int nsec);
u64 (*get_tstamp_time)(struct xgbe_prv_data *);
u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
/* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *);
/* For Receive Side Scaling */ /* For Receive Side Scaling */
int (*enable_rss)(struct xgbe_prv_data *);
int (*disable_rss)(struct xgbe_prv_data *); int (*disable_rss)(struct xgbe_prv_data *);
int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
}; };
struct xgbe_phy_if { struct xgbe_phy_if {
@ -701,7 +636,7 @@ struct xgbe_phy_if {
struct xgbe_desc_if { struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *); int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *); void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); int (*map_tx_skb)(struct xgbe_channel *, struct mbuf *);
int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *, int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
struct xgbe_ring_data *); struct xgbe_ring_data *);
void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
@ -756,34 +691,33 @@ struct xgbe_hw_features {
}; };
struct xgbe_prv_data { struct xgbe_prv_data {
struct net_device *netdev; struct ifnet *netdev;
struct platform_device *pdev; struct platform_device *pdev;
struct acpi_device *adev; struct acpi_device *adev;
struct device *dev; device_t dev;
/* ACPI or DT flag */ /* ACPI or DT flag */
unsigned int use_acpi; unsigned int use_acpi;
/* XGMAC/XPCS related mmio registers */ /* XGMAC/XPCS related mmio registers */
void __iomem *xgmac_regs; /* XGMAC CSRs */ struct resource *xgmac_res; /* XGMAC CSRs */
void __iomem *xpcs_regs; /* XPCS MMD registers */ struct resource *xpcs_res; /* XPCS MMD registers */
void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */ struct resource *rxtx_res; /* SerDes Rx/Tx CSRs */
void __iomem *sir0_regs; /* SerDes integration registers (1/2) */ struct resource *sir0_res; /* SerDes integration registers (1/2) */
void __iomem *sir1_regs; /* SerDes integration registers (2/2) */ struct resource *sir1_res; /* SerDes integration registers (2/2) */
/* Overall device lock */ /* DMA tag */
spinlock_t lock; bus_dma_tag_t dmat;
/* XPCS indirect addressing lock */ /* XPCS indirect addressing lock */
spinlock_t xpcs_lock; spinlock_t xpcs_lock;
/* RSS addressing mutex */
struct mutex rss_mutex;
/* Flags representing xgbe_state */ /* Flags representing xgbe_state */
unsigned long dev_state; unsigned long dev_state;
int dev_irq; struct resource *dev_irq_res;
struct resource *chan_irq_res[4];
void *dev_irq_tag;
unsigned int per_channel_irq; unsigned int per_channel_irq;
struct xgbe_hw_if hw_if; struct xgbe_hw_if hw_if;
@ -797,9 +731,9 @@ struct xgbe_prv_data {
unsigned int awcache; unsigned int awcache;
/* Service routine support */ /* Service routine support */
struct workqueue_struct *dev_workqueue; struct taskqueue *dev_workqueue;
struct work_struct service_work; struct task service_work;
struct timer_list service_timer; struct callout service_timer;
/* Rings for Tx/Rx on a DMA channel */ /* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel; struct xgbe_channel *channel;
@ -850,35 +784,16 @@ struct xgbe_prv_data {
/* Netdev related settings */ /* Netdev related settings */
unsigned char mac_addr[ETH_ALEN]; unsigned char mac_addr[ETH_ALEN];
netdev_features_t netdev_features;
struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats; struct xgbe_mmc_stats mmc_stats;
struct xgbe_ext_stats ext_stats; struct xgbe_ext_stats ext_stats;
/* Filtering support */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* Device clocks */ /* Device clocks */
struct clk *sysclk; struct clk *sysclk;
unsigned long sysclk_rate; unsigned long sysclk_rate;
struct clk *ptpclk; struct clk *ptpclk;
unsigned long ptpclk_rate; unsigned long ptpclk_rate;
/* Timestamp support */
spinlock_t tstamp_lock;
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
struct hwtstamp_config tstamp_config;
struct cyclecounter tstamp_cc;
struct timecounter tstamp_tc;
unsigned int tstamp_addend;
struct work_struct tx_tstamp_work;
struct sk_buff *tx_tstamp_skb;
u64 tx_tstamp;
/* DCB support */ /* DCB support */
struct ieee_ets *ets;
struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES]; unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
u8 num_tcs; u8 num_tcs;
@ -887,7 +802,7 @@ struct xgbe_prv_data {
struct xgbe_hw_features hw_feat; struct xgbe_hw_features hw_feat;
/* Device restart work structure */ /* Device restart work structure */
struct work_struct restart_work; struct task restart_work;
/* Keeps track of power mode */ /* Keeps track of power mode */
unsigned int power_down; unsigned int power_down;
@ -896,7 +811,6 @@ struct xgbe_prv_data {
u32 msg_enable; u32 msg_enable;
/* Current PHY settings */ /* Current PHY settings */
phy_interface_t phy_mode;
int phy_link; int phy_link;
int phy_speed; int phy_speed;
@ -906,10 +820,9 @@ struct xgbe_prv_data {
unsigned long link_check; unsigned long link_check;
char an_name[IFNAMSIZ + 32]; char an_name[IFNAMSIZ + 32];
struct workqueue_struct *an_workqueue;
int an_irq; struct resource *an_irq_res;
struct work_struct an_irq_work; void *an_irq_tag;
unsigned int speed_set; unsigned int speed_set;
@ -928,61 +841,32 @@ struct xgbe_prv_data {
/* Auto-negotiation state machine support */ /* Auto-negotiation state machine support */
unsigned int an_int; unsigned int an_int;
struct mutex an_mutex; struct sx an_mutex;
enum xgbe_an an_result; enum xgbe_an an_result;
enum xgbe_an an_state; enum xgbe_an an_state;
enum xgbe_rx kr_state; enum xgbe_rx kr_state;
enum xgbe_rx kx_state; enum xgbe_rx kx_state;
struct work_struct an_work;
unsigned int an_supported; unsigned int an_supported;
unsigned int parallel_detect; unsigned int parallel_detect;
unsigned int fec_ability; unsigned int fec_ability;
unsigned long an_start; unsigned long an_start;
unsigned int lpm_ctrl; /* CTRL1 for resume */ unsigned int lpm_ctrl; /* CTRL1 for resume */
#ifdef CONFIG_DEBUG_FS
struct dentry *xgbe_debugfs;
unsigned int debugfs_xgmac_reg;
unsigned int debugfs_xpcs_mmd;
unsigned int debugfs_xpcs_reg;
#endif
}; };
/* Function prototypes*/ /* Function prototypes*/
int xgbe_open(struct ifnet *);
int xgbe_close(struct ifnet *);
int xgbe_xmit(struct ifnet *, struct mbuf *);
int xgbe_change_mtu(struct ifnet *, int);
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *); void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
struct net_device_ops *xgbe_get_netdev_ops(void);
struct ethtool_ops *xgbe_get_ethtool_ops(void);
#ifdef CONFIG_AMD_XGBE_DCB
const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
#endif
void xgbe_ptp_register(struct xgbe_prv_data *);
void xgbe_ptp_unregister(struct xgbe_prv_data *);
void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
unsigned int, unsigned int, unsigned int);
void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
unsigned int);
void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
void xgbe_get_all_hw_features(struct xgbe_prv_data *); void xgbe_get_all_hw_features(struct xgbe_prv_data *);
int xgbe_powerup(struct net_device *, unsigned int);
int xgbe_powerdown(struct net_device *, unsigned int);
void xgbe_init_rx_coalesce(struct xgbe_prv_data *); void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
void xgbe_init_tx_coalesce(struct xgbe_prv_data *); void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
#else
static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
#endif /* CONFIG_DEBUG_FS */
/* NOTE: Uncomment for function trace log messages in KERNEL LOG */ /* NOTE: Uncomment for function trace log messages in KERNEL LOG */
#if 0 #if 0
#define YDEBUG #define YDEBUG
@ -991,13 +875,13 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
/* For debug prints */ /* For debug prints */
#ifdef YDEBUG #ifdef YDEBUG
#define DBGPR(x...) pr_alert(x) #define DBGPR(x...) printf(x)
#else #else
#define DBGPR(x...) do { } while (0) #define DBGPR(x...) do { } while (0)
#endif #endif
#ifdef YDEBUG_MDIO #ifdef YDEBUG_MDIO
#define DBGPR_MDIO(x...) pr_alert(x) #define DBGPR_MDIO(x...) printf(x)
#else #else
#define DBGPR_MDIO(x...) do { } while (0) #define DBGPR_MDIO(x...) do { } while (0)
#endif #endif

188
sys/dev/axgbe/xgbe_osdep.h Normal file
View File

@ -0,0 +1,188 @@
/*-
* Copyright (c) 2016,2017 SoftIron Inc.
* All rights reserved.
*
* This software was developed by Andrew Turner under
* the sponsorship of SoftIron Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _XGBE_OSDEP_H_
#define _XGBE_OSDEP_H_
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t __le32;
typedef uint32_t u32;
typedef uint64_t u64;
typedef struct {
struct mtx lock;
} spinlock_t;
static inline void
spin_lock_init(spinlock_t *spinlock)
{
mtx_init(&spinlock->lock, "axgbe_spin", NULL, MTX_DEF);
}
#define spin_lock_irqsave(spinlock, flags) \
do { \
(flags) = intr_disable(); \
mtx_lock(&(spinlock)->lock); \
} while (0)
#define spin_unlock_irqrestore(spinlock, flags) \
do { \
mtx_unlock(&(spinlock)->lock); \
intr_restore(flags); \
} while (0)
#define BIT(pos) (1ul << pos)
static inline void
clear_bit(int pos, unsigned long *p)
{
atomic_clear_long(p, 1ul << pos);
}
static inline int
test_bit(int pos, unsigned long *p)
{
unsigned long val;
val = *p;
return ((val & 1ul << pos) != 0);
}
static inline void
set_bit(int pos, unsigned long *p)
{
atomic_set_long(p, 1ul << pos);
}
#define lower_32_bits(x) ((x) & 0xffffffffu)
#define upper_32_bits(x) (((x) >> 32) & 0xffffffffu)
#define cpu_to_le32(x) le32toh(x)
#define le32_to_cpu(x) htole32(x)
MALLOC_DECLARE(M_AXGBE);
#define ADVERTISED_Pause 0x01
#define ADVERTISED_Asym_Pause 0x02
#define ADVERTISED_Autoneg 0x04
#define ADVERTISED_Backplane 0x08
#define ADVERTISED_10000baseKR_Full 0x10
#define ADVERTISED_2500baseX_Full 0x20
#define ADVERTISED_1000baseKX_Full 0x40
#define AUTONEG_DISABLE 0
#define AUTONEG_ENABLE 1
#define DUPLEX_UNKNOWN 1
#define DUPLEX_FULL 2
#define SPEED_UNKNOWN 1
#define SPEED_10000 2
#define SPEED_2500 3
#define SPEED_1000 4
#define SUPPORTED_Autoneg 0x01
#define SUPPORTED_Pause 0x02
#define SUPPORTED_Asym_Pause 0x04
#define SUPPORTED_Backplane 0x08
#define SUPPORTED_10000baseKR_Full 0x10
#define SUPPORTED_1000baseKX_Full 0x20
#define SUPPORTED_2500baseX_Full 0x40
#define SUPPORTED_10000baseR_FEC 0x80
#define BMCR_SPEED100 0x2000
#define MDIO_MMD_PMAPMD 1
#define MDIO_MMD_PCS 3
#define MDIO_MMD_AN 7
#define MDIO_PMA_10GBR_FECABLE 170
#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001
#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002
#define MII_ADDR_C45 (1<<30)
#define MDIO_CTRL1 0x00 /* MII_BMCR */
#define MDIO_CTRL1_RESET 0x8000 /* BMCR_RESET */
#define MDIO_CTRL1_SPEEDSELEXT 0x2040 /* BMCR_SPEED1000|BMCR_SPEED100*/
#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x3c)
#define MDIO_AN_CTRL1_ENABLE 0x1000 /* BMCR_AUTOEN */
#define MDIO_CTRL1_LPOWER 0x0800 /* BMCR_PDOWN */
#define MDIO_AN_CTRL1_RESTART 0x0200 /* BMCR_STARTNEG */
#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
#define MDIO_STAT1 1 /* MII_BMSR */
#define MDIO_STAT1_LSTATUS 0x0004 /* BMSR_LINK */
#define MDIO_CTRL2 0x07
#define MDIO_PCS_CTRL2_10GBR 0x0000
#define MDIO_PCS_CTRL2_10GBX 0x0001
#define MDIO_PCS_CTRL2_TYPE 0x0003
#define MDIO_AN_ADVERTISE 16
#define MDIO_AN_LPA 19
#define ETH_ALEN ETHER_ADDR_LEN
#define ETH_HLEN ETHER_HDR_LEN
#define ETH_FCS_LEN 4
#define VLAN_HLEN ETHER_VLAN_ENCAP_LEN
#define ARRAY_SIZE(x) nitems(x)
#define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
#define NSEC_PER_SEC 1000000000ul
#define min_t(t, a, b) MIN((t)(a), (t)(b))
#define max_t(t, a, b) MAX((t)(a), (t)(b))
#endif /* _XGBE_OSDEP_H_ */