Remove some compile-time options from the driver, particularly async IOBDMA

support which is unused on FreeBSD and which complicates working on the
code now, can easily be added back later by someone determined to use it.
This commit is contained in:
Juli Mallett 2011-01-20 23:34:59 +00:00
parent 87c44314f9
commit 3e46966ed3
4 changed files with 17 additions and 112 deletions

View File

@ -30,43 +30,15 @@ AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR W
/*
* A few defines are used to control the operation of this driver:
* CONFIG_CAVIUM_RESERVE32
* This kernel config options controls the amount of memory configured
* in a wired TLB entry for all processes to share. If this is set, the
* driver will use this memory instead of kernel memory for pools. This
* allows 32bit userspace application to access the buffers, but also
* requires all received packets to be copied.
* CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
* This kernel config option allows the user to control the number of
* packet and work queue buffers allocated by the driver. If this is zero,
* the driver uses the default from below.
* USE_HW_TCPUDP_CHECKSUM
* Controls if the Octeon TCP/UDP checksum engine is used for packet
* output. If this is zero, the kernel will perform the checksum in
* software.
* USE_MULTICORE_RECEIVE
* Process receive interrupts on multiple cores. This spreads the network
* load across the first 8 processors. If ths is zero, only one core
* processes incomming packets.
* USE_ASYNC_IOBDMA
* Use asynchronous IO access to hardware. This uses Octeon's asynchronous
* IOBDMAs to issue IO accesses without stalling. Set this to zero
* to disable this. Note that IOBDMAs require CVMSEG.
*/
#ifndef CONFIG_CAVIUM_RESERVE32
#define CONFIG_CAVIUM_RESERVE32 0
#endif
#define INTERRUPT_LIMIT 10000 /* Max interrupts per second per core */
/*#define INTERRUPT_LIMIT 0 *//* Don't limit the number of interrupts */
#define USE_HW_TCPUDP_CHECKSUM 1
#define USE_MULTICORE_RECEIVE 1
#define USE_RED 1 /* Enable Random Early Dropping under load */
#if 0
#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
#else
#define USE_ASYNC_IOBDMA 0
#endif
#define USE_10MBPS_PREAMBLE_WORKAROUND 1 /* Allow SW based preamble removal at 10Mbps to workaround PHYs giving us bad preambles */
#define DONT_WRITEBACK(x) (x) /* Use this to have all FPA frees also tell the L2 not to write data to memory */
/*#define DONT_WRITEBACK(x) 0 *//* Use this to not have FPA frees control L2 */
@ -74,11 +46,6 @@ AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR W
#define MAX_RX_PACKETS 120 /* Maximum number of packets to process per interrupt. */
#define MAX_OUT_QUEUE_DEPTH 1000
#ifndef SMP
#undef USE_MULTICORE_RECEIVE
#define USE_MULTICORE_RECEIVE 0
#endif
#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t))
#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)

View File

@ -164,7 +164,6 @@ void cvm_oct_tasklet_rx(void *context, int pending)
{
int coreid;
uint64_t old_group_mask;
uint64_t old_scratch;
int rx_count = 0;
int number_to_free;
int num_freed;
@ -176,49 +175,24 @@ void cvm_oct_tasklet_rx(void *context, int pending)
/* Prefetch cvm_oct_device since we know we need it soon */
CVMX_PREFETCH(cvm_oct_device, 0);
if (USE_ASYNC_IOBDMA) {
/* Save scratch in case userspace is using it */
CVMX_SYNCIOBDMA;
old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
}
/* Only allow work for our group (and preserve priorities) */
old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
(old_group_mask & ~0xFFFFull) | 1<<pow_receive_group);
if (USE_ASYNC_IOBDMA)
cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
while (1) {
struct mbuf *m = NULL;
int mbuf_in_hw;
cvmx_wqe_t *work;
if (USE_ASYNC_IOBDMA) {
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
} else {
if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
else
work = NULL;
}
if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
else
work = NULL;
CVMX_PREFETCH(work, 0);
if (work == NULL)
break;
/* Limit each core to processing MAX_RX_PACKETS packets without a break.
This way the RX can't starve the TX task. */
if (USE_ASYNC_IOBDMA) {
if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
else {
cvmx_scratch_write64(CVMX_SCR_SCRATCH, 0x8000000000000000ull);
cvmx_pow_tag_sw_null_nocheck();
}
}
mbuf_in_hw = work->word2.s.bufs == 1;
if ((mbuf_in_hw)) {
m = *(struct mbuf **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
@ -376,10 +350,6 @@ void cvm_oct_tasklet_rx(void *context, int pending)
/* Restore the original POW group mask */
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
if (USE_ASYNC_IOBDMA) {
/* Restore the scratch area */
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
}
/* Refill the packet buffer pool */
number_to_free =

View File

@ -66,8 +66,6 @@ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
{
cvmx_pko_command_word0_t pko_command;
cvmx_buf_ptr_t hw_buffer;
uint64_t old_scratch;
uint64_t old_scratch2;
int dropped;
int qos;
cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc;
@ -95,18 +93,6 @@ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
} else
qos = 0;
if (USE_ASYNC_IOBDMA) {
/* Save scratch in case userspace is using it */
CVMX_SYNCIOBDMA;
old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH+8);
/* Assume we're going to be able t osend this packet. Fetch and increment
the number of pending packets for output */
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH+8, FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, priv->fau+qos*4, 1);
}
/* The CN3XXX series of parts has an errata (GMX-401) which causes the
GMX block to hang if a collision occurs towards the end of a
<68 byte packet. As a workaround for this, we pad packets to be
@ -195,8 +181,7 @@ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
pko_command.s.subone0 = 1;
/* Check if we can use the hardware checksumming */
if (USE_HW_TCPUDP_CHECKSUM &&
(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
/* Use hardware checksum calc */
pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
}
@ -207,16 +192,9 @@ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
* core instead of per QoS, to reduce contention here.
*/
IF_LOCK(&priv->tx_free_queue[qos]);
if (USE_ASYNC_IOBDMA) {
/* Get the number of mbufs in use by the hardware */
CVMX_SYNCIOBDMA;
in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH+8);
} else {
/* Get the number of mbufs in use by the hardware */
in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
/* Get the number of mbufs in use by the hardware */
in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);
@ -231,12 +209,6 @@ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
dropped = 1;
}
if (USE_ASYNC_IOBDMA) {
/* Restore the scratch area */
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH+8, old_scratch2);
}
if (__predict_false(dropped)) {
m_freem(m);
cvmx_fau_atomic_add32(priv->fau+qos*4, -1);

View File

@ -232,22 +232,18 @@ static void cvm_oct_configure_common_hw(device_t bus)
#ifdef SMP
if (USE_MULTICORE_RECEIVE) {
critical_enter();
{
cvmx_ciu_intx0_t en;
int core;
{
cvmx_ciu_intx0_t en;
int core;
CPU_FOREACH(core) {
if (core == PCPU_GET(cpuid))
continue;
CPU_FOREACH(core) {
if (core == PCPU_GET(cpuid))
continue;
en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2));
en.s.workq |= (1<<pow_receive_group);
cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64);
}
en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2));
en.s.workq |= (1<<pow_receive_group);
cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64);
}
critical_exit();
}
#endif
}