o) Remove some commented out or unimplemented code.

o) Remove some options that are configurable on Linux but not FreeBSD.
o) Centralize open/poll/stop routines for XAUI and SGMII and use the common
   uninit routine directly rather than providing a wrapper for it.  The init
   functions for these interfaces are now identical and the common init routine
   could merge in setting those function pointers except that some hardware
   seems to use no open/poll/stop method?
This commit is contained in:
Juli Mallett 2010-11-28 05:57:24 +00:00
parent aa54636620
commit 34e3f53b7c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=215974
9 changed files with 88 additions and 363 deletions

View File

@ -48,51 +48,6 @@ extern int octeon_is_simulation(void);
extern cvmx_bootinfo_t *octeon_bootinfo;
/**
* Get the low level ethernet statistics
*
* @param dev Device to get the statistics from
* @return Pointer to the statistics
*/
#if 0
static struct ifnet_stats *cvm_oct_common_get_stats(struct ifnet *ifp)
{
cvmx_pip_port_status_t rx_status;
cvmx_pko_port_status_t tx_status;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
if (octeon_is_simulation()) {
/* The simulator doesn't support statistics */
memset(&rx_status, 0, sizeof(rx_status));
memset(&tx_status, 0, sizeof(tx_status));
} else {
cvmx_pip_get_port_status(priv->port, 1, &rx_status);
cvmx_pko_get_port_status(priv->port, 1, &tx_status);
}
priv->stats.rx_packets += rx_status.inb_packets;
priv->stats.tx_packets += tx_status.packets;
priv->stats.rx_bytes += rx_status.inb_octets;
priv->stats.tx_bytes += tx_status.octets;
priv->stats.multicast += rx_status.multicast_packets;
priv->stats.rx_crc_errors += rx_status.inb_errors;
priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
/* The drop counter must be incremented atomically since the RX
tasklet also increments it */
#ifdef CONFIG_64BIT
cvmx_atomic_add64_nosync(&priv->stats.rx_dropped, rx_status.dropped_packets);
#else
cvmx_atomic_add32_nosync((int32_t *)&priv->stats.rx_dropped, rx_status.dropped_packets);
#endif
}
return &priv->stats;
}
#endif
/**
* Set the multicast list. Currently unimplemented.
*
@ -218,6 +173,67 @@ int cvm_oct_common_change_mtu(struct ifnet *ifp, int new_mtu)
}
/**
* Enable port.
*/
int cvm_oct_common_open(struct ifnet *ifp)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
cvmx_helper_link_info_t link_info;
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
if (!octeon_is_simulation()) {
link_info = cvmx_helper_link_get(priv->port);
if (!link_info.s.link_up)
if_link_state_change(ifp, LINK_STATE_DOWN);
else
if_link_state_change(ifp, LINK_STATE_UP);
}
return 0;
}
/**
* Disable port.
*/
int cvm_oct_common_stop(struct ifnet *ifp)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
return 0;
}
/**
* Poll for link status change.
*/
void cvm_oct_common_poll(struct ifnet *ifp)
{
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
cvmx_helper_link_info_t link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
return;
link_info = cvmx_helper_link_autoconf(priv->port);
priv->link_info = link_info.u64;
priv->need_link_update = 1;
}
/**
* Per network device initialization
*
@ -240,24 +256,11 @@ int cvm_oct_common_init(struct ifnet *ifp)
count++;
#if 0
ifp->get_stats = cvm_oct_common_get_stats;
#ifdef CONFIG_NET_POLL_CONTROLLER
ifp->poll_controller = cvm_oct_poll_controller;
#endif
#endif
cvm_oct_mdio_setup_device(ifp);
cvm_oct_common_set_mac_address(ifp, mac);
cvm_oct_common_change_mtu(ifp, ifp->if_mtu);
#if 0
/* Zero out stats for port so we won't mistakenly show counters from the
bootloader */
memset(ifp->get_stats(ifp), 0, sizeof(struct ifnet_stats));
#endif
/*
* Do any last-minute board-specific initialization.
*/

View File

@ -28,6 +28,9 @@ AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR W
*************************************************************************/
/* $FreeBSD$ */
int cvm_oct_common_open(struct ifnet *ifp);
int cvm_oct_common_stop(struct ifnet *ifp);
void cvm_oct_common_poll(struct ifnet *ifp);
int cvm_oct_common_init(struct ifnet *ifp);
void cvm_oct_common_uninit(struct ifnet *ifp);

View File

@ -40,13 +40,6 @@ AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR W
* This kernel config option allows the user to control the number of
* packet and work queue buffers allocated by the driver. If this is zero,
* the driver uses the default from below.
* USE_MBUFS_IN_HW
* Tells the driver to populate the packet buffers with kernel mbufs.
* This allows the driver to receive packets without copying them. It also
* means that 32bit userspace can't access the packet buffers.
* USE_32BIT_SHARED
* This define tells the driver to allocate memory for buffers from the
* 32bit sahred region instead of the kernel memory space.
* USE_HW_TCPUDP_CHECKSUM
* Controls if the Octeon TCP/UDP checksum engine is used for packet
* output. If this is zero, the kernel will perform the checksum in
@ -64,14 +57,6 @@ AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR W
#define CONFIG_CAVIUM_RESERVE32 0
#endif
#if CONFIG_CAVIUM_RESERVE32
#define USE_32BIT_SHARED 1
#define USE_MBUFS_IN_HW 0
#else
#define USE_32BIT_SHARED 0
#define USE_MBUFS_IN_HW 1
#endif
#define INTERRUPT_LIMIT 10000 /* Max interrupts per second per core */
/*#define INTERRUPT_LIMIT 0 *//* Don't limit the number of interrupts */
#define USE_HW_TCPUDP_CHECKSUM 1

View File

@ -70,10 +70,6 @@ int cvm_oct_mdio_read(struct ifnet *ifp, int phy_id, int location)
cvmx_write_csr(CVMX_SMI_CMD, smi_cmd.u64);
do {
#if 0
if (!in_interrupt())
yield();
#endif
smi_rd.u64 = cvmx_read_csr(CVMX_SMI_RD_DAT);
} while (smi_rd.s.pending);
@ -111,10 +107,6 @@ void cvm_oct_mdio_write(struct ifnet *ifp, int phy_id, int location, int val)
cvmx_write_csr(CVMX_SMI_CMD, smi_cmd.u64);
do {
#if 0
if (!in_interrupt())
yield();
#endif
smi_wr.u64 = cvmx_read_csr(CVMX_SMI_WR_DAT);
} while (smi_wr.s.pending);
MDIO_UNLOCK();

View File

@ -51,7 +51,7 @@ __FBSDID("$FreeBSD$");
* @param size Size of the buffer needed for the pool
* @param elements Number of buffers to allocate
*/
static int cvm_oct_fill_hw_mbuf(int pool, int size, int elements)
int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
{
int freed = elements;
while (freed) {
@ -79,7 +79,7 @@ static int cvm_oct_fill_hw_mbuf(int pool, int size, int elements)
* @param size Size of the buffer needed for the pool
* @param elements Number of buffers to allocate
*/
static void cvm_oct_free_hw_mbuf(int pool, int size, int elements)
void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
{
char *memory;
@ -97,111 +97,3 @@ static void cvm_oct_free_hw_mbuf(int pool, int size, int elements)
else if (elements > 0)
printf("Warning: Freeing of pool %u is missing %d mbufs\n", pool, elements);
}
/**
* This function fills a hardware pool with memory. Depending
* on the config defines, this memory might come from the
* kernel or global 32bit memory allocated with
* cvmx_bootmem_alloc.
*
* @param pool Pool to populate
* @param size Size of each buffer in the pool
* @param elements Number of buffers to allocate
*/
static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
{
char *memory;
int freed = elements;
if (USE_32BIT_SHARED) {
#if 0
extern uint64_t octeon_reserve32_memory;
memory = cvmx_bootmem_alloc_range(elements*size, 128, octeon_reserve32_memory,
octeon_reserve32_memory + (CONFIG_CAVIUM_RESERVE32<<20) - 1);
if (memory == NULL)
panic("Unable to allocate %u bytes for FPA pool %d\n", elements*size, pool);
printf("Memory range %p - %p reserved for hardware\n", memory, memory + elements*size - 1);
while (freed) {
cvmx_fpa_free(memory, pool, 0);
memory += size;
freed--;
}
#else
panic("%s: may need to implement using shared memory.", __func__);
#endif
} else {
while (freed) {
/* We need to force alignment to 128 bytes here */
#if 0
memory = kmalloc(size + 127, GFP_ATOMIC);
#else
panic("%s: not yet implemented.", __func__);
#endif
if (__predict_false(memory == NULL)) {
printf("Unable to allocate %u bytes for FPA pool %d\n", elements*size, pool);
break;
}
memory = (char *)(((unsigned long)memory+127) & -128);
cvmx_fpa_free(memory, pool, 0);
freed--;
}
}
return (elements - freed);
}
/**
* Free memory previously allocated with cvm_oct_fill_hw_memory
*
* @param pool FPA pool to free
* @param size Size of each buffer in the pool
* @param elements Number of buffers that should be in the pool
*/
static void cvm_oct_free_hw_memory(int pool, int size, int elements)
{
if (USE_32BIT_SHARED) {
printf("Warning: 32 shared memory is not freeable\n");
} else {
char *memory;
do {
memory = cvmx_fpa_alloc(pool);
if (memory) {
elements--;
#if 0
kfree(phys_to_virt(cvmx_ptr_to_phys(memory)));
#else
panic("%s: not yet implemented.", __func__);
#endif
}
} while (memory);
if (elements < 0)
printf("Freeing of pool %u had too many buffers (%d)\n", pool, elements);
else if (elements > 0)
printf("Warning: Freeing of pool %u is missing %d buffers\n", pool, elements);
}
}
int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
{
int freed;
if (USE_MBUFS_IN_HW)
freed = cvm_oct_fill_hw_mbuf(pool, size, elements);
else
freed = cvm_oct_fill_hw_memory(pool, size, elements);
return (freed);
}
void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
{
if (USE_MBUFS_IN_HW)
cvm_oct_free_hw_mbuf(pool, size, elements);
else
cvm_oct_free_hw_memory(pool, size, elements);
}

View File

@ -75,21 +75,6 @@ int cvm_oct_do_interrupt(void *dev_id)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* This is called when the kernel needs to manually poll the
* device. For Octeon, this is simply calling the interrupt
* handler. We actually poll all the devices, not just the
* one supplied.
*
* @param dev Device to poll. Unused
*/
void cvm_oct_poll_controller(struct ifnet *ifp)
{
taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
}
#endif
/**
* This is called on receive errors, and determines if the packet
* can be dropped early-on in cvm_oct_tasklet_rx().
@ -226,7 +211,7 @@ void cvm_oct_tasklet_rx(void *context, int pending)
}
}
mbuf_in_hw = USE_MBUFS_IN_HW && work->word2.s.bufs == 1;
mbuf_in_hw = work->word2.s.bufs == 1;
if ((mbuf_in_hw)) {
m = *(struct mbuf **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
CVMX_PREFETCH(m, offsetof(struct mbuf, m_data));
@ -356,7 +341,7 @@ void cvm_oct_tasklet_rx(void *context, int pending)
/* Check to see if the mbuf and work share
the same packet buffer */
if (USE_MBUFS_IN_HW && (packet_not_copied)) {
if ((packet_not_copied)) {
/* This buffer needs to be replaced, increment
the number of buffers we need to free by one */
cvmx_fau_atomic_add32(
@ -375,22 +360,20 @@ void cvm_oct_tasklet_rx(void *context, int pending)
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
}
if (USE_MBUFS_IN_HW) {
/* Refill the packet buffer pool */
number_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
/* Refill the packet buffer pool */
number_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
if (number_to_free > 0) {
if (number_to_free > 0) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
-number_to_free);
num_freed =
cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
CVMX_FPA_PACKET_POOL_SIZE,
number_to_free);
if (num_freed != number_to_free) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
-number_to_free);
num_freed =
cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
CVMX_FPA_PACKET_POOL_SIZE,
number_to_free);
if (num_freed != number_to_free) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
number_to_free - num_freed);
}
number_to_free - num_freed);
}
}
sched_unpin();

View File

@ -46,72 +46,16 @@ __FBSDID("$FreeBSD$");
extern int octeon_is_simulation(void);
static int cvm_oct_sgmii_open(struct ifnet *ifp)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
cvmx_helper_link_info_t link_info;
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
if (!octeon_is_simulation()) {
link_info = cvmx_helper_link_get(priv->port);
if (!link_info.s.link_up)
if_link_state_change(ifp, LINK_STATE_DOWN);
else
if_link_state_change(ifp, LINK_STATE_UP);
}
return 0;
}
static int cvm_oct_sgmii_stop(struct ifnet *ifp)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
return 0;
}
static void cvm_oct_sgmii_poll(struct ifnet *ifp)
{
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
cvmx_helper_link_info_t link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
return;
link_info = cvmx_helper_link_autoconf(priv->port);
priv->link_info = link_info.u64;
priv->need_link_update = 1;
}
int cvm_oct_sgmii_init(struct ifnet *ifp)
{
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
cvm_oct_common_init(ifp);
priv->open = cvm_oct_sgmii_open;
priv->stop = cvm_oct_sgmii_stop;
priv->open = cvm_oct_common_open;
priv->stop = cvm_oct_common_stop;
priv->stop(ifp);
if (!octeon_is_simulation())
priv->poll = cvm_oct_sgmii_poll;
priv->poll = cvm_oct_common_poll;
/* FIXME: Need autoneg logic */
return 0;
}
void cvm_oct_sgmii_uninit(struct ifnet *ifp)
{
cvm_oct_common_uninit(ifp);
}

View File

@ -46,71 +46,15 @@ __FBSDID("$FreeBSD$");
extern int octeon_is_simulation(void);
static int cvm_oct_xaui_open(struct ifnet *ifp)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
cvmx_helper_link_info_t link_info;
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
if (!octeon_is_simulation()) {
link_info = cvmx_helper_link_get(priv->port);
if (!link_info.s.link_up)
if_link_state_change(ifp, LINK_STATE_DOWN);
else
if_link_state_change(ifp, LINK_STATE_UP);
}
return 0;
}
static int cvm_oct_xaui_stop(struct ifnet *ifp)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
return 0;
}
static void cvm_oct_xaui_poll(struct ifnet *ifp)
{
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
cvmx_helper_link_info_t link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
return;
link_info = cvmx_helper_link_autoconf(priv->port);
priv->link_info = link_info.u64;
priv->need_link_update = 1;
}
int cvm_oct_xaui_init(struct ifnet *ifp)
{
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
cvm_oct_common_init(ifp);
priv->open = cvm_oct_xaui_open;
priv->stop = cvm_oct_xaui_stop;
priv->open = cvm_oct_common_open;
priv->stop = cvm_oct_common_stop;
priv->stop(ifp);
if (!octeon_is_simulation())
priv->poll = cvm_oct_xaui_poll;
priv->poll = cvm_oct_common_poll;
return 0;
}
void cvm_oct_xaui_uninit(struct ifnet *ifp)
{
cvm_oct_common_uninit(ifp);
}

View File

@ -74,13 +74,6 @@ TUNABLE_INT("hw.octe.pow_receive_group", &pow_receive_group);
"\t\tgroup. Also any other software can submit packets to this\n"
"\t\tgroup for the kernel to process." */
static int disable_core_queueing = 1;
TUNABLE_INT("hw.octe.disable_core_queueing", &disable_core_queueing);
/*
"\t\tWhen set the networking core's tx_queue_len is set to zero. This\n"
"\t\tallows packets to be sent without lock contention in the packet scheduler\n"
"\t\tresulting in some cases in improved throughput.\n" */
extern int octeon_is_simulation(void);
/**
@ -176,9 +169,6 @@ static void cvm_do_timer(void *arg)
priv->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
}
#if 0
cvm_oct_device[port]->get_stats(cvm_oct_device[port]);
#endif
}
port++;
/* Poll the next port in a 50th of a second.
@ -312,9 +302,6 @@ int cvm_oct_init_module(device_t bus)
printf("cavium-ethernet: %s\n", OCTEON_SDK_VERSION_STRING);
#if 0
cvm_oct_proc_initialize();
#endif
cvm_oct_rx_initialize();
cvm_oct_configure_common_hw(bus);
@ -364,11 +351,6 @@ int cvm_oct_init_module(device_t bus)
printf("\t\tFailed to allocate ethernet device for port %d\n", port);
continue;
}
/* XXX/juli set max send q len. */
#if 0
if (disable_core_queueing)
ifp->tx_queue_len = 0;
#endif
/* Initialize the device private structure. */
device_probe(dev);
@ -399,7 +381,7 @@ int cvm_oct_init_module(device_t bus)
case CVMX_HELPER_INTERFACE_MODE_XAUI:
priv->init = cvm_oct_xaui_init;
priv->uninit = cvm_oct_xaui_uninit;
priv->uninit = cvm_oct_common_uninit;
device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
break;
@ -411,7 +393,7 @@ int cvm_oct_init_module(device_t bus)
case CVMX_HELPER_INTERFACE_MODE_SGMII:
priv->init = cvm_oct_sgmii_init;
priv->uninit = cvm_oct_sgmii_uninit;
priv->uninit = cvm_oct_common_uninit;
device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
break;
@ -506,9 +488,6 @@ void cvm_oct_cleanup_module(void)
}
cvmx_pko_shutdown();
#if 0
cvm_oct_proc_shutdown();
#endif
cvmx_ipd_free_ptr();