/***********************license start***************
* Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of Cavium Networks nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
* OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
* RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
* POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
* OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
*
*
* For any questions regarding licensing please contact marketing@caviumnetworks.com
*
***********************license end**************************************/
/**
* @file
*
* Small helper utilities.
*
*
$Revision: 42493 $
*/
#include "executive-config.h"
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-bootmem.h"
#include "cvmx-fpa.h"
#include "cvmx-pip.h"
#include "cvmx-pko.h"
#include "cvmx-ipd.h"
#include "cvmx-asx.h"
#include "cvmx-gmx.h"
#include "cvmx-spi.h"
#include "cvmx-sysinfo.h"
#include "cvmx-helper.h"
#include "cvmx-helper-util.h"
#include "cvmx-version.h"
#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
/**
* Get the version of the CVMX libraries.
*
* @return Version string. Note this buffer is allocated statically
* and will be shared by all callers.
*/
const char *cvmx_helper_get_version(void)
{
return OCTEON_SDK_VERSION_STRING;
}
/**
* Convert a interface mode into a human readable string
*
* @param mode Mode to convert
*
* @return String
*/
const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode)
{
switch (mode)
{
case CVMX_HELPER_INTERFACE_MODE_DISABLED: return "DISABLED";
case CVMX_HELPER_INTERFACE_MODE_RGMII: return "RGMII";
case CVMX_HELPER_INTERFACE_MODE_GMII: return "GMII";
case CVMX_HELPER_INTERFACE_MODE_SPI: return "SPI";
case CVMX_HELPER_INTERFACE_MODE_PCIE: return "PCIE";
case CVMX_HELPER_INTERFACE_MODE_XAUI: return "XAUI";
case CVMX_HELPER_INTERFACE_MODE_SGMII: return "SGMII";
case CVMX_HELPER_INTERFACE_MODE_PICMG: return "PICMG";
case CVMX_HELPER_INTERFACE_MODE_NPI: return "NPI";
case CVMX_HELPER_INTERFACE_MODE_LOOP: return "LOOP";
}
return "UNKNOWN";
}
/**
* Debug routine to dump the packet structure to the console
*
* @param work Work queue entry containing the packet to dump
* @return
*/
int cvmx_helper_dump_packet(cvmx_wqe_t *work)
{
uint64_t count;
uint64_t remaining_bytes;
cvmx_buf_ptr_t buffer_ptr;
uint64_t start_of_buffer;
uint8_t * data_address;
uint8_t * end_of_data;
cvmx_dprintf("Packet Length: %u\n", work->len);
cvmx_dprintf(" Input Port: %u\n", work->ipprt);
cvmx_dprintf(" QoS: %u\n", work->qos);
cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
if (work->word2.s.bufs == 0)
{
cvmx_ipd_wqe_fpa_queue_t wqe_pool;
wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
buffer_ptr.u64 = 0;
buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
buffer_ptr.s.size = 128;
buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
if (cvmx_likely(!work->word2.s.not_IP))
{
cvmx_pip_ip_offset_t pip_ip_offset;
pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
buffer_ptr.s.addr += (pip_ip_offset.s.offset<<3) - work->word2.s.ip_offset;
buffer_ptr.s.addr += (work->word2.s.is_v6^1)<<2;
}
else
{
/* WARNING: This code assume that the packet is not RAW. If it was,
we would use PIP_GBL_CFG[RAW_SHF] instead of
PIP_GBL_CFG[NIP_SHF] */
cvmx_pip_gbl_cfg_t pip_gbl_cfg;
pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
}
}
else
buffer_ptr = work->packet_ptr;
remaining_bytes = work->len;
while (remaining_bytes)
{
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
cvmx_dprintf(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
cvmx_dprintf(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
cvmx_dprintf("\t\t");
data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
end_of_data = data_address + buffer_ptr.s.size;
count = 0;
while (data_address < end_of_data)
{
if (remaining_bytes == 0)
break;
else
remaining_bytes--;
cvmx_dprintf("%02x", (unsigned int)*data_address);
data_address++;
if (remaining_bytes && (count == 7))
{
cvmx_dprintf("\n\t\t");
count = 0;
}
else
count++;
}
cvmx_dprintf("\n");
if (remaining_bytes)
buffer_ptr = *(cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
}
return 0;
}
/**
* Setup Random Early Drop on a specific input queue
*
* @param queue Input queue to setup RED on (0-7)
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incomming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
{
cvmx_ipd_qos_red_marks_t red_marks;
cvmx_ipd_red_quex_param_t red_param;
/* Set RED to begin dropping packets when there are pass_thresh buffers
left. It will linearly drop more packets until reaching drop_thresh
buffers */
red_marks.u64 = 0;
red_marks.s.drop = drop_thresh;
red_marks.s.pass = pass_thresh;
cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
/* Use the actual queue 0 counter, not the average */
red_param.u64 = 0;
red_param.s.prb_con = (255ul<<24) / (red_marks.s.pass - red_marks.s.drop);
red_param.s.avg_con = 1;
red_param.s.new_con = 255;
red_param.s.use_pcnt = 1;
cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
return 0;
}
/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incomming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
{
cvmx_ipd_portx_bp_page_cnt_t page_cnt;
cvmx_ipd_bp_prt_red_end_t ipd_bp_prt_red_end;
cvmx_ipd_red_port_enable_t red_port_enable;
int queue;
int interface;
int port;
/* Disable backpressure based on queued buffers. It needs SW support */
page_cnt.u64 = 0;
page_cnt.s.bp_enb = 0;
page_cnt.s.page_cnt = 100;
for (interface=0; interface<2; interface++)
{
for (port=cvmx_helper_get_first_ipd_port(interface); port 4)
{
cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal num_ports\n");
return(-1);
}
gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
gmx_rx_prts.s.prts = num_ports;
cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
}
/* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX) && !OCTEON_IS_MODEL(OCTEON_CN50XX))
{
/* Tell PKO the number of ports on this interface */
pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
if (interface == 0)
{
if (num_ports == 1)
pko_mode.s.mode0 = 4;
else if (num_ports == 2)
pko_mode.s.mode0 = 3;
else if (num_ports <= 4)
pko_mode.s.mode0 = 2;
else if (num_ports <= 8)
pko_mode.s.mode0 = 1;
else
pko_mode.s.mode0 = 0;
}
else
{
if (num_ports == 1)
pko_mode.s.mode1 = 4;
else if (num_ports == 2)
pko_mode.s.mode1 = 3;
else if (num_ports <= 4)
pko_mode.s.mode1 = 2;
else if (num_ports <= 8)
pko_mode.s.mode1 = 1;
else
pko_mode.s.mode1 = 0;
}
cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
}
/* Set GMX to buffer as much data as possible before starting transmit.
This reduces the chances that we have a TX under run due to memory
contention. Any packet that fits entirely in the GMX FIFO can never
have an under run regardless of memory load */
gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
{
/* These chips have a fixed max threshold of 0x40 */
gmx_tx_thresh.s.cnt = 0x40;
}
else
{
/* Choose the max value for the number of ports */
if (num_ports <= 1)
gmx_tx_thresh.s.cnt = 0x100 / 1;
else if (num_ports == 2)
gmx_tx_thresh.s.cnt = 0x100 / 2;
else
gmx_tx_thresh.s.cnt = 0x100 / 4;
}
/* SPI and XAUI can have lots of ports but the GMX hardware only ever has
a max of 4 */
if (num_ports > 4)
num_ports = 4;
for (index=0; indexcpu_clock_hz / (25 * 1000000);
divisor = (divisor-1)>>2;
/* Convert the divisor into a power of 2 shift */
CVMX_CLZ(clock_div, divisor);
clock_div = 32 - clock_div;
/* Clock divider for QLM JTAG operations. eclk is divided by 2^(CLK_DIV + 2) */
jtgc.u64 = 0;
jtgc.s.clk_div = clock_div;
jtgc.s.mux_sel = 0;
if (OCTEON_IS_MODEL(OCTEON_CN52XX))
jtgc.s.bypass = 0x3;
else
jtgc.s.bypass = 0xf;
cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
cvmx_read_csr(CVMX_CIU_QLM_JTGC);
}
/**
* Write up to 32bits into the QLM jtag chain. Bits are shifted
* into the MSB and out the LSB, so you should shift in the low
* order bits followed by the high order bits. The JTAG chain is
* 4 * 268 bits long, or 1072.
*
* @param qlm QLM to shift value into
* @param bits Number of bits to shift in (1-32).
* @param data Data to shift in. Bit 0 enters the chain first, followed by
* bit 1, etc.
*
* @return The low order bits of the JTAG chain that shifted out of the
* circle.
*/
uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
{
cvmx_ciu_qlm_jtgd_t jtgd;
jtgd.u64 = 0;
jtgd.s.shift = 1;
jtgd.s.shft_cnt = bits-1;
jtgd.s.shft_reg = data;
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
jtgd.s.select = 1 << qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do
{
jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.shift);
return jtgd.s.shft_reg >> (32-bits);
}
/**
* Shift long sequences of zeros into the QLM JTAG chain. It is
* common to need to shift more than 32 bits of zeros into the
* chain. This function is a convience wrapper around
* cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
* zeros at a time.
*
* @param qlm QLM to shift zeros into
* @param bits
*/
void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
{
while (bits > 0)
{
int n = bits;
if (n > 32)
n = 32;
cvmx_helper_qlm_jtag_shift(qlm, n, 0);
bits -= n;
}
}
/**
* Program the QLM JTAG chain into all lanes of the QLM. You must
* have already shifted in 268*4, or 1072 bits into the JTAG
* chain. Updating invalid values can possibly cause chip damage.
*
* @param qlm QLM to program
*/
void cvmx_helper_qlm_jtag_update(int qlm)
{
cvmx_ciu_qlm_jtgd_t jtgd;
/* Update the new data */
jtgd.u64 = 0;
jtgd.s.update = 1;
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
jtgd.s.select = 1 << qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do
{
jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.update);
}