The diff is the initial submission of Cavium Liquidio 2350/2360 10/25G

Intelligent NIC driver.

The submission conconsists of firmware binary file and driver sources.

Submitted by:	pkanneganti@cavium.com (Prasad V Kanneganti)
Relnotes:	Yes
Sponsored by:	Cavium Networks
Differential Revision:	https://reviews.freebsd.org/D11927
This commit is contained in:
sbruno 2017-09-12 23:36:58 +00:00
parent f3c5ae6e6d
commit 5543e587c7
41 changed files with 44552 additions and 2 deletions

View File

@ -253,6 +253,7 @@ MAN= aac.4 \
led.4 \
lge.4 \
${_linux.4} \
liquidio.4 \
lm75.4 \
lmc.4 \
lo.4 \

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 12, 2017
.Dd August 28, 2017
.Dt ALTQ 4
.Os
.Sh NAME
@ -152,6 +152,7 @@ They have been applied to the following hardware drivers:
.Xr ixgbe 4 ,
.Xr jme 4 ,
.Xr le 4 ,
.Xr liquidio 4 ,
.Xr msk 4 ,
.Xr mxge 4 ,
.Xr my 4 ,

133
share/man/man4/liquidio.4 Normal file
View File

@ -0,0 +1,133 @@
.\" BSD LICENSE
.\"
.\" Copyright(c) 2017 Cavium, Inc.. All rights reserved.
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\"
.\" * Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" * Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in
.\" the documentation and/or other materials provided with the
.\" distribution.
.\" * Neither the name of Cavium, Inc. nor the names of its
.\" contributors may be used to endorse or promote products derived
.\" from this software without specific prior written permission.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
.\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
.\" OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
.\" SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
.\" LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
.\" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\" $FreeBSD$
.\"
.Dd August 17, 2017
.Dt LIQUIDIO 4
.Os
.Sh NAME
.Nm liquidio
.Nd "Cavium 10Gb/25Gb Ethernet driver for the FreeBSD operating system"
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following line in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device lio"
.Ed
.Pp
Alternatively, to load the driver as a
module at boot time, place the following line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
if_lio_load="YES"
.Ed
.Sh DESCRIPTION
The
.Nm
driver provides support for 23XX 10Gb/25Gb Ethernet adapters.
The driver supports Jumbo Frames, Transmit/Receive checksum
offload, TCP segmentation offload (TSO), Large Receive Offload (LRO),
VLAN tag insertion/extraction, VLAN checksum offload,
VLAN TSO, and Receive Side Steering (RSS)
.Pp
Support for Jumbo Frames is provided via the interface MTU setting.
Selecting an MTU larger than 1500 bytes with the
.Xr ifconfig 8
utility configures the adapter to receive and transmit Jumbo Frames.
The maximum MTU size for Jumbo Frames is 16000.
.Pp
For more information on configuring this device, see ifconfig(8).
.Sh HARDWARE
The
.Nm
driver supports the following cards:
.Pp
.Bl -bullet -compact
.It
LiquidIO II CN2350 210SV/225SV
.It
LiquidIO II CN2360 210SV/225SV
.El
.Sh LOADER TUBABLES
Tunables can be set at the
.Xr loader 8
prompt before booting the kernel or stored in
.Xr loader.conf 5 .
.Pp
.Bl -tag -width indent
.It Va hw.lio.fw_type
.Pp
String that specifies type of firmware to be loaded.
Default is "nic". Use "none" to load firmware from flash.
.It Va hw.lio.num_queues_per_pf0
.Pp
Unsigned integers that specify number of queues per PF0.
Valid range is 0 to 64.
Use 0 to derive autoconfigures based on the number of
cpus with a max of 8
.It Va hw.lio.num_queues_per_pf1
.Pp
Unsigned integers that specify number of queues per PF1.
Valid range is 0 to 64.
Use 0 to derive autoconfigures based on the number of
cpus with a max of 8
.It Va hw.lio.console_bitmask
.Pp
Bitmask indicating which consoles have debug output
redirected to syslog.
.It Va hw.lio.rss
.Pp
To enable/disable driver RSS support
.It Va hw.lio.hwlro
.Pp
To enable/disable hardware LRO
.Sh SUPPORT
For general information and support,
go to the Cavium support website at:
.Pa http://support.cavium.com .
.Sh SEE ALSO
.Xr altq 4 ,
.Xr arp 4 ,
.Xr netintro 4 ,
.Xr ng_ether 4 ,
.Xr vlan 4 ,
.Xr ifconfig 8
.Sh HISTORY
The
.Nm
device driver first appeared in
.Fx 12.0 .
.Sh AUTHORS
The
.Nm
driver was written by
.An Derek Chickles Aq Mt derek.chickles@cavium.com .

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 8, 2016
.Dd August 28, 2017
.Dt VLAN 4
.Os
.Sh NAME
@ -135,6 +135,7 @@ in hardware:
.Xr ixgb 4 ,
.Xr ixgbe 4 ,
.Xr jme 4 ,
.Xr liquidio 4 ,
.Xr msk 4 ,
.Xr mxge 4 ,
.Xr nxge 4 ,

View File

@ -1984,6 +1984,7 @@ device xmphy # XaQti XMAC II
# lge: Support for PCI gigabit ethernet adapters based on the Level 1
# LXT1001 NetCellerator chipset. This includes the D-Link DGE-500SX,
# SMC TigerCard 1000 (SMC9462SX), and some Addtron cards.
# lio: Support for Cavium 23XX Ethernet adapters
# malo: Marvell Libertas wireless NICs.
# mwl: Marvell 88W8363 802.11n wireless NICs.
# Requires the mwl firmware module
@ -2143,6 +2144,7 @@ device ixgb # Intel Pro/10Gbe PCI-X Ethernet
device ix # Intel Pro/10Gbe PCIE Ethernet
device ixv # Intel Pro/10Gbe PCIE Ethernet VF
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device lio # Cavium 23XX Ethernet
device mxge # Myricom Myri-10G 10GbE NIC
device nxge # Neterion Xframe 10GbE Server/Storage Adapter
device oce # Emulex 10 GbE (OneConnect Ethernet)

View File

@ -2216,6 +2216,62 @@ dev/le/if_le_pci.c optional le pci
dev/le/lance.c optional le
dev/led/led.c standard
dev/lge/if_lge.c optional lge
dev/liquidio/base/cn23xx_pf_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_console.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_ctrl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_droq.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_mem_ops.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_request_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_response_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_core.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_ioctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_main.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rss.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rxtx.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_sysctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
lio.c optional lio \
compile-with "${AWK} -f $S/tools/fw_stub.awk lio_23xx_nic.bin.fw:lio_23xx_nic.bin -mlio_23xx_nic.bin -c${.TARGET}" \
no-implicit-rule before-depend local \
clean "lio.c"
lio_23xx_nic.bin.fw.fwo optional lio \
dependency "lio_23xx_nic.bin.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "lio_23xx_nic.bin.fw.fwo"
lio_23xx_nic.bin.fw optional lio \
dependency "$S/contrib/dev/liquidio/lio_23xx_nic.bin.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "lio_23xx_nic.bin.fw"
dev/lmc/if_lmc.c optional lmc
dev/malo/if_malo.c optional malo
dev/malo/if_malohal.c optional malo

View File

@ -328,6 +328,62 @@ dev/hyperv/vmbus/vmbus_res.c optional hyperv
dev/hyperv/vmbus/vmbus_xact.c optional hyperv
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
dev/liquidio/base/cn23xx_pf_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_console.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_ctrl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_droq.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_mem_ops.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_request_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_response_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_core.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_ioctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_main.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rss.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rxtx.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_sysctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
lio.c optional lio \
compile-with "${AWK} -f $S/tools/fw_stub.awk lio_23xx_nic.bin.fw:lio_23xx_nic.bin -mlio_23xx_nic.bin -c${.TARGET}" \
no-implicit-rule before-depend local \
clean "lio.c"
lio_23xx_nic.bin.fw.fwo optional lio \
dependency "lio_23xx_nic.bin.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "lio_23xx_nic.bin.fw.fwo"
lio_23xx_nic.bin.fw optional lio \
dependency "$S/contrib/dev/liquidio/lio_23xx_nic.bin.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "lio_23xx_nic.bin.fw"
dev/nctgpio/nctgpio.c optional nctgpio
dev/nfe/if_nfe.c optional nfe pci
dev/ntb/if_ntb/if_ntb.c optional if_ntb

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file cn23xx_device.h
* \brief Host Driver: Routines that perform CN23XX specific operations.
*/
#ifndef __CN23XX_PF_DEVICE_H__
#define __CN23XX_PF_DEVICE_H__
#include "cn23xx_pf_regs.h"
/*
* Register address and configuration for a CN23XX devices.
* If device specific changes need to be made then add a struct to include
* device specific fields as shown in the commented section
*/
struct lio_cn23xx_pf {
/* PCI interrupt summary register */
uint32_t intr_sum_reg64;
/* PCI interrupt enable register */
uint32_t intr_enb_reg64;
/* The PCI interrupt mask used by interrupt handler */
uint64_t intr_mask64;
struct lio_config *conf;
};
#define BUSY_READING_REG_PF_LOOP_COUNT 10000
int lio_cn23xx_pf_setup_device(struct octeon_device *oct);
uint32_t lio_cn23xx_pf_get_oq_ticks(struct octeon_device *oct,
uint32_t time_intr_in_us);
int lio_cn23xx_pf_fw_loaded(struct octeon_device *oct);
#endif /* __CN23XX_PF_DEVICE_H__ */

View File

@ -0,0 +1,411 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file cn23xx_pf_regs.h
* \brief Host Driver: Register Address and Register Mask values for
* CN23XX devices.
*/
#ifndef __CN23XX_PF_REGS_H__
#define __CN23XX_PF_REGS_H__
#define LIO_CN23XX_CFG_PCIE_DEVCTL 0x78
#define LIO_CN23XX_CFG_PCIE_UNCORRECT_ERR_MASK 0x108
#define LIO_CN23XX_CFG_PCIE_CORRECT_ERR_STATUS 0x110
#define LIO_CN23XX_CFG_PCIE_DEVCTL_MASK 0x00040000
#define LIO_CN23XX_PCIE_SRIOV_FDL 0x188
#define LIO_CN23XX_PCIE_SRIOV_FDL_BIT_POS 0x10
#define LIO_CN23XX_PCIE_SRIOV_FDL_MASK 0xFF
/* ############## BAR0 Registers ################ */
#define LIO_CN23XX_SLI_CTL_PORT_START 0x286E0
#define LIO_CN23XX_PORT_OFFSET 0x10
#define LIO_CN23XX_SLI_CTL_PORT(p) \
(LIO_CN23XX_SLI_CTL_PORT_START + \
((p) * LIO_CN23XX_PORT_OFFSET))
/* 2 scatch registers (64-bit) */
#define LIO_CN23XX_SLI_WINDOW_CTL 0x282E0
#define LIO_CN23XX_SLI_SCRATCH1 0x283C0
#define LIO_CN23XX_SLI_SCRATCH2 0x283D0
#define LIO_CN23XX_SLI_WINDOW_CTL_DEFAULT 0x200000ULL
/* 1 registers (64-bit) - SLI_CTL_STATUS */
#define LIO_CN23XX_SLI_CTL_STATUS 0x28570
/*
* SLI Packet Input Jabber Register (64 bit register)
* <31:0> for Byte count for limiting sizes of packet sizes
* that are allowed for sli packet inbound packets.
* the default value is 0xFA00(=64000).
*/
#define LIO_CN23XX_SLI_PKT_IN_JABBER 0x29170
#define LIO_CN23XX_SLI_WIN_WR_ADDR_LO 0x20000
#define LIO_CN23XX_SLI_WIN_WR_ADDR64 LIO_CN23XX_SLI_WIN_WR_ADDR_LO
#define LIO_CN23XX_SLI_WIN_RD_ADDR_LO 0x20010
#define LIO_CN23XX_SLI_WIN_RD_ADDR_HI 0x20014
#define LIO_CN23XX_SLI_WIN_RD_ADDR64 LIO_CN23XX_SLI_WIN_RD_ADDR_LO
#define LIO_CN23XX_SLI_WIN_WR_DATA_LO 0x20020
#define LIO_CN23XX_SLI_WIN_WR_DATA_HI 0x20024
#define LIO_CN23XX_SLI_WIN_WR_DATA64 LIO_CN23XX_SLI_WIN_WR_DATA_LO
#define LIO_CN23XX_SLI_WIN_RD_DATA_LO 0x20040
#define LIO_CN23XX_SLI_WIN_RD_DATA_HI 0x20044
#define LIO_CN23XX_SLI_WIN_RD_DATA64 LIO_CN23XX_SLI_WIN_RD_DATA_LO
#define LIO_CN23XX_SLI_WIN_WR_MASK_REG 0x20030
#define LIO_CN23XX_SLI_MAC_CREDIT_CNT 0x23D70
/*
* 4 registers (64-bit) for mapping IOQs to MACs(PEMs)-
* SLI_PKT_MAC(0..3)_PF(0..1)_RINFO
*/
#define LIO_CN23XX_SLI_PKT_MAC_RINFO_START64 0x29030
/*1 register (64-bit) to determine whether IOQs are in reset. */
#define LIO_CN23XX_SLI_PKT_IOQ_RING_RST 0x291E0
/* Each Input Queue register is at a 16-byte Offset in BAR0 */
#define LIO_CN23XX_IQ_OFFSET 0x20000
#define LIO_CN23XX_MAC_RINFO_OFFSET 0x20
#define LIO_CN23XX_PF_RINFO_OFFSET 0x10
#define LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac, pf) \
(LIO_CN23XX_SLI_PKT_MAC_RINFO_START64 + \
((mac) * LIO_CN23XX_MAC_RINFO_OFFSET) + \
((pf) * LIO_CN23XX_PF_RINFO_OFFSET))
/* mask for total rings, setting TRS to base */
#define LIO_CN23XX_PKT_MAC_CTL_RINFO_TRS BIT_ULL(16)
/* Starting bit of the TRS field in LIO_CN23XX_SLI_PKT_MAC_RINFO64 register */
#define LIO_CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS 16
/*###################### REQUEST QUEUE #########################*/
/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
#define LIO_CN23XX_SLI_PKT_IN_DONE_CNTS_START64 0x10040
/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
#define LIO_CN23XX_SLI_PKT_INSTR_BADDR_START64 0x10010
/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
#define LIO_CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START 0x10020
/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
#define LIO_CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START 0x10030
/*
* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
* gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
*/
#define LIO_CN23XX_SLI_PKT_INPUT_CONTROL_START64 0x10000
/*------- Request Queue Macros ---------*/
#define LIO_CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
(LIO_CN23XX_SLI_PKT_INPUT_CONTROL_START64 + \
((iq) * LIO_CN23XX_IQ_OFFSET))
#define LIO_CN23XX_SLI_IQ_BASE_ADDR64(iq) \
(LIO_CN23XX_SLI_PKT_INSTR_BADDR_START64 + \
((iq) * LIO_CN23XX_IQ_OFFSET))
#define LIO_CN23XX_SLI_IQ_SIZE(iq) \
(LIO_CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START + \
((iq) * LIO_CN23XX_IQ_OFFSET))
#define LIO_CN23XX_SLI_IQ_DOORBELL(iq) \
(LIO_CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START + \
((iq) * LIO_CN23XX_IQ_OFFSET))
#define LIO_CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
(LIO_CN23XX_SLI_PKT_IN_DONE_CNTS_START64 + \
((iq) * LIO_CN23XX_IQ_OFFSET))
/*------------------ Masks ----------------*/
#define LIO_CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32)
#define LIO_CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29)
/*
* Number of instructions to be read in one MAC read request.
* setting to Max value(4)
*/
#define LIO_CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
#define LIO_CN23XX_PKT_INPUT_CTL_IS_64B BIT(24)
#define LIO_CN23XX_PKT_INPUT_CTL_RST BIT(23)
#define LIO_CN23XX_PKT_INPUT_CTL_QUIET BIT(28)
#define LIO_CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22)
#define LIO_CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
#define LIO_CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4)
#define LIO_CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
#define LIO_CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45)
/* These bits[43:32] select the function number within the PF */
#define LIO_CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29)
#define LIO_CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL)
#define LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32)
#define LIO_CN23XX_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
#if BYTE_ORDER == LITTLE_ENDIAN
#define LIO_CN23XX_PKT_INPUT_CTL_MASK \
(LIO_CN23XX_PKT_INPUT_CTL_RDSIZE | \
LIO_CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
LIO_CN23XX_PKT_INPUT_CTL_USE_CSR)
#else /* BYTE_ORDER != LITTLE_ENDIAN */
#define LIO_CN23XX_PKT_INPUT_CTL_MASK \
(LIO_CN23XX_PKT_INPUT_CTL_RDSIZE | \
LIO_CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
LIO_CN23XX_PKT_INPUT_CTL_USE_CSR | \
LIO_CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
/*############################ OUTPUT QUEUE #########################*/
/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
#define LIO_CN23XX_SLI_PKT_OUTPUT_CONTROL_START 0x10050
/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
#define LIO_CN23XX_SLI_PKT_OUT_SIZE 0x10060
/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
#define LIO_CN23XX_SLI_SLIST_BADDR_START64 0x10070
/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
#define LIO_CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START 0x10080
/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
#define LIO_CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START 0x10090
/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
#define LIO_CN23XX_SLI_PKT_CNTS_START 0x100B0
/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
#define LIO_CN23XX_SLI_PKT_INT_LEVELS_START64 0x100A0
/* Each Output Queue register is at a 16-byte Offset in BAR0 */
#define LIO_CN23XX_OQ_OFFSET 0x20000
/* 1 (64-bit register) for Output Queue backpressure across all rings. */
#define LIO_CN23XX_SLI_OQ_WMARK 0x29180
/* Global pkt control register */
#define LIO_CN23XX_SLI_GBL_CONTROL 0x29210
/* Backpressure enable register for PF0 */
#define LIO_CN23XX_SLI_OUT_BP_EN_W1S 0x29260
/* Backpressure enable register for PF1 */
#define LIO_CN23XX_SLI_OUT_BP_EN2_W1S 0x29270
/*------- Output Queue Macros ---------*/
#define LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq) \
(LIO_CN23XX_SLI_PKT_OUTPUT_CONTROL_START + \
((oq) * LIO_CN23XX_OQ_OFFSET))
#define LIO_CN23XX_SLI_OQ_BASE_ADDR64(oq) \
(LIO_CN23XX_SLI_SLIST_BADDR_START64 + \
((oq) * LIO_CN23XX_OQ_OFFSET))
#define LIO_CN23XX_SLI_OQ_SIZE(oq) \
(LIO_CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START + \
((oq) * LIO_CN23XX_OQ_OFFSET))
#define LIO_CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
(LIO_CN23XX_SLI_PKT_OUT_SIZE + \
((oq) * LIO_CN23XX_OQ_OFFSET))
#define LIO_CN23XX_SLI_OQ_PKTS_SENT(oq) \
(LIO_CN23XX_SLI_PKT_CNTS_START + \
((oq) * LIO_CN23XX_OQ_OFFSET))
#define LIO_CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
(LIO_CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START + \
((oq) * LIO_CN23XX_OQ_OFFSET))
#define LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(oq) \
(LIO_CN23XX_SLI_PKT_INT_LEVELS_START64 + \
((oq) * LIO_CN23XX_OQ_OFFSET))
/*------------------ Masks ----------------*/
#define LIO_CN23XX_PKT_OUTPUT_CTL_TENB BIT(13)
#define LIO_CN23XX_PKT_OUTPUT_CTL_CENB BIT(12)
#define LIO_CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11)
#define LIO_CN23XX_PKT_OUTPUT_CTL_ES BIT(9)
#define LIO_CN23XX_PKT_OUTPUT_CTL_NSR BIT(8)
#define LIO_CN23XX_PKT_OUTPUT_CTL_ROR BIT(7)
#define LIO_CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6)
#define LIO_CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5)
#define LIO_CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3)
#define LIO_CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2)
#define LIO_CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1)
#define LIO_CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0)
/*######################## MSIX TABLE #########################*/
#define LIO_CN23XX_MSIX_TABLE_ADDR_START 0x0
#define CN23XX_MSIX_TABLE_DATA_START 0x8
#define CN23XX_MSIX_TABLE_SIZE 0x10
#define CN23XX_MSIX_TABLE_ADDR(idx) \
(LIO_CN23XX_MSIX_TABLE_ADDR_START + \
((idx) * LIO_CN23XX_MSIX_TABLE_SIZE))
#define CN23XX_MSIX_TABLE_DATA(idx) \
(LIO_CN23XX_MSIX_TABLE_DATA_START + \
((idx) * LIO_CN23XX_MSIX_TABLE_SIZE))
/*######################## INTERRUPTS #########################*/
#define LIO_CN23XX_MAC_INT_OFFSET 0x20
#define LIO_CN23XX_PF_INT_OFFSET 0x10
/* 1 register (64-bit) for Interrupt Summary */
#define LIO_CN23XX_SLI_INT_SUM64 0x27000
/* 4 registers (64-bit) for Interrupt Enable for each Port */
#define LIO_CN23XX_SLI_INT_ENB64 0x27080
#define LIO_CN23XX_SLI_MAC_PF_INT_SUM64(mac, pf) \
(LIO_CN23XX_SLI_INT_SUM64 + \
((mac) * LIO_CN23XX_MAC_INT_OFFSET) + \
((pf) * LIO_CN23XX_PF_INT_OFFSET))
#define LIO_CN23XX_SLI_MAC_PF_INT_ENB64(mac, pf) \
(LIO_CN23XX_SLI_INT_ENB64 + \
((mac) * LIO_CN23XX_MAC_INT_OFFSET) + \
((pf) * LIO_CN23XX_PF_INT_OFFSET))
/* 1 register (64-bit) to indicate which Output Queue reached pkt threshold */
#define LIO_CN23XX_SLI_PKT_CNT_INT 0x29130
/* 1 register (64-bit) to indicate which Output Queue reached time threshold */
#define LIO_CN23XX_SLI_PKT_TIME_INT 0x29140
/*------------------ Interrupt Masks ----------------*/
#define LIO_CN23XX_INTR_PO_INT BIT_ULL(63)
#define LIO_CN23XX_INTR_PI_INT BIT_ULL(62)
#define LIO_CN23XX_INTR_RESEND BIT_ULL(60)
#define LIO_CN23XX_INTR_CINT_ENB BIT_ULL(48)
#define LIO_CN23XX_INTR_MIO_INT BIT(1)
#define LIO_CN23XX_INTR_PKT_TIME BIT(5)
#define LIO_CN23XX_INTR_M0UPB0_ERR BIT(8)
#define LIO_CN23XX_INTR_M0UPWI_ERR BIT(9)
#define LIO_CN23XX_INTR_M0UNB0_ERR BIT(10)
#define LIO_CN23XX_INTR_M0UNWI_ERR BIT(11)
#define LIO_CN23XX_INTR_DMA0_FORCE BIT_ULL(32)
#define LIO_CN23XX_INTR_DMA1_FORCE BIT_ULL(33)
#define LIO_CN23XX_INTR_DMA0_TIME BIT_ULL(36)
#define LIO_CN23XX_INTR_DMA1_TIME BIT_ULL(37)
#define LIO_CN23XX_INTR_DMAPF_ERR BIT_ULL(59)
#define LIO_CN23XX_INTR_PKTPF_ERR BIT_ULL(61)
#define LIO_CN23XX_INTR_PPPF_ERR BIT_ULL(63)
#define LIO_CN23XX_INTR_DMA0_DATA (LIO_CN23XX_INTR_DMA0_TIME)
#define LIO_CN23XX_INTR_DMA1_DATA (LIO_CN23XX_INTR_DMA1_TIME)
#define LIO_CN23XX_INTR_DMA_DATA \
(LIO_CN23XX_INTR_DMA0_DATA | LIO_CN23XX_INTR_DMA1_DATA)
/* By fault only TIME based */
#define LIO_CN23XX_INTR_PKT_DATA (LIO_CN23XX_INTR_PKT_TIME)
/* Sum of interrupts for error events */
#define LIO_CN23XX_INTR_ERR \
(LIO_CN23XX_INTR_M0UPB0_ERR | \
LIO_CN23XX_INTR_M0UPWI_ERR | \
LIO_CN23XX_INTR_M0UNB0_ERR | \
LIO_CN23XX_INTR_M0UNWI_ERR | \
LIO_CN23XX_INTR_DMAPF_ERR | \
LIO_CN23XX_INTR_PKTPF_ERR | \
LIO_CN23XX_INTR_PPPF_ERR)
/* Programmed Mask for Interrupt Sum */
#define LIO_CN23XX_INTR_MASK \
(LIO_CN23XX_INTR_DMA_DATA | \
LIO_CN23XX_INTR_DMA0_FORCE | \
LIO_CN23XX_INTR_DMA1_FORCE | \
LIO_CN23XX_INTR_MIO_INT | \
LIO_CN23XX_INTR_ERR)
/* 4 Registers (64 - bit) */
#define LIO_CN23XX_SLI_S2M_PORT_CTL_START 0x23D80
#define LIO_CN23XX_SLI_S2M_PORTX_CTL(port) \
(LIO_CN23XX_SLI_S2M_PORT_CTL_START + \
((port) * 0x10))
#define LIO_CN23XX_SLI_MAC_NUMBER 0x20050
/*
* PEM(0..3)_BAR1_INDEX(0..15)address is defined as
* addr = (0x00011800C0000100 |port <<24 |idx <<3 )
* Here, port is PEM(0..3) & idx is INDEX(0..15)
*/
#define LIO_CN23XX_PEM_BAR1_INDEX_START 0x00011800C0000100ULL
#define LIO_CN23XX_PEM_OFFSET 24
#define LIO_CN23XX_BAR1_INDEX_OFFSET 3
#define LIO_CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
(LIO_CN23XX_PEM_BAR1_INDEX_START + \
((port) << LIO_CN23XX_PEM_OFFSET) + \
((idx) << LIO_CN23XX_BAR1_INDEX_OFFSET))
/*############################ DPI #########################*/
/* 4 Registers (64-bit) */
#define LIO_CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL
#define LIO_CN23XX_DPI_SLI_PRTX_CFG(port) \
((IO_CN23XX_DPI_SLI_PRT_CFG_START + \
((port) * 0x8))
/*############################ RST #########################*/
#define LIO_CN23XX_RST_BOOT 0x0001180006001600ULL
#define LIO_CN23XX_RST_SOFT_RST 0x0001180006001680ULL
#define LIO_CN23XX_LMC0_RESET_CTL 0x0001180088000180ULL
#define LIO_CN23XX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
#endif /* __CN23XX_PF_REGS_H__ */

View File

@ -0,0 +1,837 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file lio_common.h
* \brief Common: Structures and macros used in PCI-NIC package by core and
* host driver.
*/
#ifndef __LIO_COMMON_H__
#define __LIO_COMMON_H__
#include "lio_config.h"
#define LIO_STR_HELPER(x) #x
#define LIO_STR(x) LIO_STR_HELPER(x)
#define LIO_BASE_MAJOR_VERSION 1
#define LIO_BASE_MINOR_VERSION 6
#define LIO_BASE_MICRO_VERSION 1
#define LIO_BASE_VERSION LIO_STR(LIO_BASE_MAJOR_VERSION) "." \
LIO_STR(LIO_BASE_MINOR_VERSION)
#define LIO_VERSION LIO_STR(LIO_BASE_MAJOR_VERSION) "." \
LIO_STR(LIO_BASE_MINOR_VERSION) \
"." LIO_STR(LIO_BASE_MICRO_VERSION)
struct lio_version {
uint16_t major;
uint16_t minor;
uint16_t micro;
uint16_t reserved;
};
/* Tag types used by Octeon cores in its work. */
enum lio_tag_type {
LIO_ORDERED_TAG = 0,
LIO_ATOMIC_TAG = 1,
LIO_NULL_TAG = 2,
LIO_NULL_NULL_TAG = 3
};
/* pre-defined host->NIC tag values */
#define LIO_CONTROL (0x11111110)
#define LIO_DATA(i) (0x11111111 + (i))
/*
* Opcodes used by host driver/apps to perform operations on the core.
* These are used to identify the major subsystem that the operation
* is for.
*/
#define LIO_OPCODE_NIC 1 /* used for NIC operations */
/*
* Subcodes are used by host driver/apps to identify the sub-operation
* for the core. They only need to by unique for a given subsystem.
*/
#define LIO_OPCODE_SUBCODE(op, sub) ((((op) & 0x0f) << 8) | ((sub) & 0x7f))
/* OPCODE_CORE subcodes. For future use. */
/* OPCODE_NIC subcodes */
/* This subcode is sent by core PCI driver to indicate cores are ready. */
#define LIO_OPCODE_NIC_CORE_DRV_ACTIVE 0x01
#define LIO_OPCODE_NIC_NW_DATA 0x02 /* network packet data */
#define LIO_OPCODE_NIC_CMD 0x03
#define LIO_OPCODE_NIC_INFO 0x04
#define LIO_OPCODE_NIC_PORT_STATS 0x05
#define LIO_OPCODE_NIC_INTRMOD_CFG 0x08
#define LIO_OPCODE_NIC_IF_CFG 0x09
#define LIO_OPCODE_NIC_INTRMOD_PARAMS 0x0B
/* Application codes advertised by the core driver initialization packet. */
#define LIO_DRV_APP_START 0x0
#define LIO_DRV_APP_COUNT 0x2
#define LIO_DRV_NIC_APP (LIO_DRV_APP_START + 0x1)
#define LIO_DRV_INVALID_APP (LIO_DRV_APP_START + 0x2)
#define LIO_DRV_APP_END (LIO_DRV_INVALID_APP - 1)
#define BYTES_PER_DHLEN_UNIT 8
#define SCR2_BIT_FW_LOADED 63
#define SCR2_BIT_FW_RELOADED 62
static inline uint32_t
lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
{
if ((index + count) >= max)
index = index + count - max;
else
index += count;
return (index);
}
#define LIO_BOARD_NAME 32
#define LIO_SERIAL_NUM_LEN 64
/*
* Structure used by core driver to send indication that the Octeon
* application is ready.
*/
struct lio_core_setup {
uint64_t corefreq;
char boardname[LIO_BOARD_NAME];
char board_serial_number[LIO_SERIAL_NUM_LEN];
uint64_t board_rev_major;
uint64_t board_rev_minor;
};
/*--------------------------- SCATTER GATHER ENTRY -----------------------*/
/*
* The Scatter-Gather List Entry. The scatter or gather component used with
* a Octeon input instruction has this format.
*/
struct lio_sg_entry {
/* The first 64 bit gives the size of data in each dptr. */
union {
uint16_t size[4];
uint64_t size64;
} u;
/* The 4 dptr pointers for this entry. */
uint64_t ptr[4];
};
#define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry))
/*
* \brief Add size to gather list
* @param sg_entry scatter/gather entry
* @param size size to add
* @param pos position to add it.
*/
static inline void
lio_add_sg_size(struct lio_sg_entry *sg_entry, uint16_t size, uint32_t pos)
{
#if BYTE_ORDER == BIG_ENDIAN
sg_entry->u.size[pos] = size;
#else /* BYTE_ORDER != BIG_ENDIAN */
sg_entry->u.size[3 - pos] = size;
#endif /* BYTE_ORDER == BIG_ENDIAN */
}
/*------------------------- End Scatter/Gather ---------------------------*/
#define LIO_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
#define LIO_MAX_FRM_SIZE (16000 + LIO_FRM_HEADER_SIZE)
#define LIO_DEFAULT_FRM_SIZE (1500 + LIO_FRM_HEADER_SIZE)
/* NIC Command types */
#define LIO_CMD_CHANGE_MTU 0x1
#define LIO_CMD_CHANGE_MACADDR 0x2
#define LIO_CMD_CHANGE_DEVFLAGS 0x3
#define LIO_CMD_RX_CTL 0x4
#define LIO_CMD_SET_MULTI_LIST 0x5
/* command for setting the speed, duplex & autoneg */
#define LIO_CMD_SET_SETTINGS 0x7
#define LIO_CMD_SET_FLOW_CTL 0x8
#define LIO_CMD_GPIO_ACCESS 0xA
#define LIO_CMD_LRO_ENABLE 0xB
#define LIO_CMD_LRO_DISABLE 0xC
#define LIO_CMD_SET_RSS 0xD
#define LIO_CMD_TNL_RX_CSUM_CTL 0x10
#define LIO_CMD_TNL_TX_CSUM_CTL 0x11
#define LIO_CMD_VERBOSE_ENABLE 0x14
#define LIO_CMD_VERBOSE_DISABLE 0x15
#define LIO_CMD_VLAN_FILTER_CTL 0x16
#define LIO_CMD_ADD_VLAN_FILTER 0x17
#define LIO_CMD_DEL_VLAN_FILTER 0x18
#define LIO_CMD_VXLAN_PORT_CONFIG 0x19
#define LIO_CMD_ID_ACTIVE 0x1a
#define LIO_CMD_SET_FNV 0x1d
#define LIO_CMD_PKT_STEERING_CTL 0x1e
#define LIO_CMD_QUEUE_COUNT_CTL 0x1f
#define LIO_CMD_VXLAN_PORT_ADD 0x0
#define LIO_CMD_VXLAN_PORT_DEL 0x1
#define LIO_CMD_RXCSUM_ENABLE 0x0
#define LIO_CMD_RXCSUM_DISABLE 0x1
#define LIO_CMD_TXCSUM_ENABLE 0x0
#define LIO_CMD_TXCSUM_DISABLE 0x1
#define LIO_CMD_FNV_ENABLE 0x1
#define LIO_CMD_FNV_DISABLE 0x0
#define LIO_CMD_PKT_STEERING_ENABLE 0x0
#define LIO_CMD_PKT_STEERING_DISABLE 0x1
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
#define LIO_L4SUM_VERIFIED 0x1
#define LIO_IPSUM_VERIFIED 0x2
/*LROIPV4 and LROIPV6 Flags*/
#define LIO_LROIPV4 0x1
#define LIO_LROIPV6 0x2
/* Interface flags communicated between host driver and core app. */
enum lio_ifflags {
LIO_IFFLAG_PROMISC = 0x01,
LIO_IFFLAG_ALLMULTI = 0x02,
LIO_IFFLAG_MULTICAST = 0x04,
LIO_IFFLAG_BROADCAST = 0x08,
LIO_IFFLAG_UNICAST = 0x10
};
/*
* wqe
* --------------- 0
* | wqe word0-3 |
* --------------- 32
* | PCI IH |
* --------------- 40
* | RPTR |
* --------------- 48
* | PCI IRH |
* --------------- 56
* | OCT_NET_CMD |
* --------------- 64
* | Addtl 8-BData |
* | |
* ---------------
*/
union octeon_cmd {
uint64_t cmd64;
struct {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t cmd:5;
uint64_t more:6; /* How many udd words follow the command */
uint64_t reserved:29;
uint64_t param1:16;
uint64_t param2:8;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t param2:8;
uint64_t param1:16;
uint64_t reserved:29;
uint64_t more:6;
uint64_t cmd:5;
#endif /* BYTE_ORDER == BIG_ENDIAN */
} s;
};
#define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
/* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
#define LIO_SOFTCMDRESP_IH3 (40 + 8)
#define LIO_PCICMD_O3 (24 + 8)
/* Instruction Header(DPI) - for OCTEON-III models */
struct octeon_instr_ih3 {
#if BYTE_ORDER == BIG_ENDIAN
/* Reserved3 */
uint64_t reserved3:1;
/* Gather indicator 1=gather */
uint64_t gather:1;
/* Data length OR no. of entries in gather list */
uint64_t dlengsz:14;
/* Front Data size */
uint64_t fsz:6;
/* Reserved2 */
uint64_t reserved2:4;
/* PKI port kind - PKIND */
uint64_t pkind:6;
/* Reserved1 */
uint64_t reserved1:32;
#else /* BYTE_ORDER != BIG_ENDIAN */
/* Reserved1 */
uint64_t reserved1:32;
/* PKI port kind - PKIND */
uint64_t pkind:6;
/* Reserved2 */
uint64_t reserved2:4;
/* Front Data size */
uint64_t fsz:6;
/* Data length OR no. of entries in gather list */
uint64_t dlengsz:14;
/* Gather indicator 1=gather */
uint64_t gather:1;
/* Reserved3 */
uint64_t reserved3:1;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
/* BIG ENDIAN format. */
struct octeon_instr_pki_ih3 {
#if BYTE_ORDER == BIG_ENDIAN
/* Wider bit */
uint64_t w:1;
/* Raw mode indicator 1 = RAW */
uint64_t raw:1;
/* Use Tag */
uint64_t utag:1;
/* Use QPG */
uint64_t uqpg:1;
/* Reserved2 */
uint64_t reserved2:1;
/* Parse Mode */
uint64_t pm:3;
/* Skip Length */
uint64_t sl:8;
/* Use Tag Type */
uint64_t utt:1;
/* Tag type */
uint64_t tagtype:2;
/* Reserved1 */
uint64_t reserved1:2;
/* QPG Value */
uint64_t qpg:11;
/* Tag Value */
uint64_t tag:32;
#else /* BYTE_ORDER != BIG_ENDIAN */
/* Tag Value */
uint64_t tag:32;
/* QPG Value */
uint64_t qpg:11;
/* Reserved1 */
uint64_t reserved1:2;
/* Tag type */
uint64_t tagtype:2;
/* Use Tag Type */
uint64_t utt:1;
/* Skip Length */
uint64_t sl:8;
/* Parse Mode */
uint64_t pm:3;
/* Reserved2 */
uint64_t reserved2:1;
/* Use QPG */
uint64_t uqpg:1;
/* Use Tag */
uint64_t utag:1;
/* Raw mode indicator 1 = RAW */
uint64_t raw:1;
/* Wider bit */
uint64_t w:1;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/* Input Request Header */
struct octeon_instr_irh {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t opcode:4;
uint64_t rflag:1;
uint64_t subcode:7;
uint64_t vlan:12;
uint64_t priority:3;
uint64_t reserved:5;
uint64_t ossp:32; /* opcode/subcode specific parameters */
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t ossp:32; /* opcode/subcode specific parameters */
uint64_t reserved:5;
uint64_t priority:3;
uint64_t vlan:12;
uint64_t subcode:7;
uint64_t rflag:1;
uint64_t opcode:4;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/* Return Data Parameters */
struct octeon_instr_rdp {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t reserved:49;
uint64_t pcie_port:3;
uint64_t rlen:12;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t rlen:12;
uint64_t pcie_port:3;
uint64_t reserved:49;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/* Receive Header */
union octeon_rh {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t rh64;
struct {
uint64_t opcode:4;
uint64_t subcode:8;
uint64_t len:3; /* additional 64-bit words */
uint64_t reserved:17;
uint64_t ossp:32; /* opcode/subcode specific parameters */
} r;
struct {
uint64_t opcode:4;
uint64_t subcode:8;
uint64_t len:3; /* additional 64-bit words */
uint64_t extra:28;
uint64_t vlan:12;
uint64_t priority:3;
uint64_t csum_verified:3;/* checksum verified. */
uint64_t has_hwtstamp:1; /* Has hardware timestamp. 1 = yes. */
uint64_t encap_on:1;
uint64_t has_hash:1; /* Has hash (rth or rss). 1 = yes. */
} r_dh;
struct {
uint64_t opcode:4;
uint64_t subcode:8;
uint64_t len:3; /* additional 64-bit words */
uint64_t reserved:11;
uint64_t num_gmx_ports:8;
uint64_t max_nic_ports:10;
uint64_t app_cap_flags:4;
uint64_t app_mode:8;
uint64_t pkind:8;
} r_core_drv_init;
struct {
uint64_t opcode:4;
uint64_t subcode:8;
uint64_t len:3; /* additional 64-bit words */
uint64_t reserved:8;
uint64_t extra:25;
uint64_t gmxport:16;
} r_nic_info;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t rh64;
struct {
uint64_t ossp:32; /* opcode/subcode specific parameters */
uint64_t reserved:17;
uint64_t len:3; /* additional 64-bit words */
uint64_t subcode:8;
uint64_t opcode:4;
} r;
struct {
uint64_t has_hash:1; /* Has hash (rth or rss). 1 = yes. */
uint64_t encap_on:1;
uint64_t has_hwtstamp:1; /* 1 = has hwtstamp */
uint64_t csum_verified:3; /* checksum verified. */
uint64_t priority:3;
uint64_t vlan:12;
uint64_t extra:28;
uint64_t len:3; /* additional 64-bit words */
uint64_t subcode:8;
uint64_t opcode:4;
} r_dh;
struct {
uint64_t pkind:8;
uint64_t app_mode:8;
uint64_t app_cap_flags:4;
uint64_t max_nic_ports:10;
uint64_t num_gmx_ports:8;
uint64_t reserved:11;
uint64_t len:3; /* additional 64-bit words */
uint64_t subcode:8;
uint64_t opcode:4;
} r_core_drv_init;
struct {
uint64_t gmxport:16;
uint64_t extra:25;
uint64_t reserved:8;
uint64_t len:3; /* additional 64-bit words */
uint64_t subcode:8;
uint64_t opcode:4;
} r_nic_info;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
#define OCTEON_RH_SIZE (sizeof(union octeon_rh))
union octeon_packet_params {
uint32_t pkt_params32;
struct {
#if BYTE_ORDER == BIG_ENDIAN
uint32_t reserved:24;
uint32_t ip_csum:1; /* Perform IP header checksum(s) */
/* Perform Outer transport header checksum */
uint32_t transport_csum:1;
/* Find tunnel, and perform transport csum. */
uint32_t tnl_csum:1;
uint32_t tsflag:1; /* Timestamp this packet */
uint32_t ipsec_ops:4; /* IPsec operation */
#else /* BYTE_ORDER != BIG_ENDIAN */
uint32_t ipsec_ops:4;
uint32_t tsflag:1;
uint32_t tnl_csum:1;
uint32_t transport_csum:1;
uint32_t ip_csum:1;
uint32_t reserved:24;
#endif /* BYTE_ORDER == BIG_ENDIAN */
} s;
};
/* Status of a RGMII Link on Octeon as seen by core driver. */
union octeon_link_status {
uint64_t link_status64;
struct {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t duplex:8;
uint64_t mtu:16;
uint64_t speed:16;
uint64_t link_up:1;
uint64_t autoneg:1;
uint64_t if_mode:5;
uint64_t pause:1;
uint64_t flashing:1;
uint64_t reserved:15;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t reserved:15;
uint64_t flashing:1;
uint64_t pause:1;
uint64_t if_mode:5;
uint64_t autoneg:1;
uint64_t link_up:1;
uint64_t speed:16;
uint64_t mtu:16;
uint64_t duplex:8;
#endif /* BYTE_ORDER == BIG_ENDIAN */
} s;
};
/* The txpciq info passed to host from the firmware */
union octeon_txpciq {
uint64_t txpciq64;
struct {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t q_no:8;
uint64_t port:8;
uint64_t pkind:6;
uint64_t use_qpg:1;
uint64_t qpg:11;
uint64_t aura_num:10;
uint64_t reserved:20;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t reserved:20;
uint64_t aura_num:10;
uint64_t qpg:11;
uint64_t use_qpg:1;
uint64_t pkind:6;
uint64_t port:8;
uint64_t q_no:8;
#endif /* BYTE_ORDER == BIG_ENDIAN */
} s;
};
/* The rxpciq info passed to host from the firmware */
union octeon_rxpciq {
uint64_t rxpciq64;
struct {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t q_no:8;
uint64_t reserved:56;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t reserved:56;
uint64_t q_no:8;
#endif /* BYTE_ORDER == BIG_ENDIAN */
} s;
};
/* Information for a OCTEON ethernet interface shared between core & host. */
struct octeon_link_info {
union octeon_link_status link;
uint64_t hw_addr;
#if BYTE_ORDER == BIG_ENDIAN
uint64_t gmxport:16;
uint64_t macaddr_is_admin_asgnd:1;
uint64_t vlan_is_admin_assigned:1;
uint64_t rsvd:30;
uint64_t num_txpciq:8;
uint64_t num_rxpciq:8;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t num_rxpciq:8;
uint64_t num_txpciq:8;
uint64_t rsvd:30;
uint64_t vlan_is_admin_assigned:1;
uint64_t macaddr_is_admin_asgnd:1;
uint64_t gmxport:16;
#endif /* BYTE_ORDER == BIG_ENDIAN */
union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_NICIF];
union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_NICIF];
};
struct octeon_if_cfg_info {
uint64_t iqmask; /* mask for IQs enabled for the port */
uint64_t oqmask; /* mask for OQs enabled for the port */
struct octeon_link_info linfo; /* initial link information */
char lio_firmware_version[32];
};
/* Stats for each NIC port in RX direction. */
struct octeon_rx_stats {
/* link-level stats */
uint64_t total_rcvd;
uint64_t bytes_rcvd;
uint64_t total_bcst;
uint64_t total_mcst;
uint64_t runts;
uint64_t ctl_rcvd;
uint64_t fifo_err; /* Accounts for over/under-run of buffers */
uint64_t dmac_drop;
uint64_t fcs_err;
uint64_t jabber_err;
uint64_t l2_err;
uint64_t frame_err;
/* firmware stats */
uint64_t fw_total_rcvd;
uint64_t fw_total_fwd;
uint64_t fw_total_fwd_bytes;
uint64_t fw_err_pko;
uint64_t fw_err_link;
uint64_t fw_err_drop;
uint64_t fw_rx_vxlan;
uint64_t fw_rx_vxlan_err;
/* LRO */
uint64_t fw_lro_pkts; /* Number of packets that are LROed */
uint64_t fw_lro_octs; /* Number of octets that are LROed */
uint64_t fw_total_lro; /* Number of LRO packets formed */
uint64_t fw_lro_aborts; /* Number of times lRO of packet aborted */
uint64_t fw_lro_aborts_port;
uint64_t fw_lro_aborts_seq;
uint64_t fw_lro_aborts_tsval;
uint64_t fw_lro_aborts_timer;
/* intrmod: packet forward rate */
uint64_t fwd_rate;
};
/* Stats for each NIC port in RX direction. */
struct octeon_tx_stats {
/* link-level stats */
uint64_t total_pkts_sent;
uint64_t total_bytes_sent;
uint64_t mcast_pkts_sent;
uint64_t bcast_pkts_sent;
uint64_t ctl_sent;
uint64_t one_collision_sent; /* Packets sent after one collision */
uint64_t multi_collision_sent; /* Packets sent after multiple collision */
uint64_t max_collision_fail; /* Packets not sent due to max collisions */
uint64_t max_deferral_fail; /* Packets not sent due to max deferrals */
uint64_t fifo_err; /* Accounts for over/under-run of buffers */
uint64_t runts;
uint64_t total_collisions; /* Total number of collisions detected */
/* firmware stats */
uint64_t fw_total_sent;
uint64_t fw_total_fwd;
uint64_t fw_total_fwd_bytes;
uint64_t fw_err_pko;
uint64_t fw_err_link;
uint64_t fw_err_drop;
uint64_t fw_err_tso;
uint64_t fw_tso; /* number of tso requests */
uint64_t fw_tso_fwd; /* number of packets segmented in tso */
uint64_t fw_tx_vxlan;
uint64_t fw_err_pki;
};
struct octeon_link_stats {
struct octeon_rx_stats fromwire;
struct octeon_tx_stats fromhost;
};
static inline int
lio_opcode_slow_path(union octeon_rh *rh)
{
uint16_t subcode1, subcode2;
subcode1 = LIO_OPCODE_SUBCODE((rh)->r.opcode, (rh)->r.subcode);
subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE_NIC, LIO_OPCODE_NIC_NW_DATA);
return (subcode2 != subcode1);
}
struct octeon_mdio_cmd {
uint64_t op;
uint64_t mdio_addr;
uint64_t value1;
uint64_t value2;
uint64_t value3;
};
struct octeon_intrmod_cfg {
uint64_t rx_enable;
uint64_t tx_enable;
uint64_t check_intrvl;
uint64_t maxpkt_ratethr;
uint64_t minpkt_ratethr;
uint64_t rx_maxcnt_trigger;
uint64_t rx_mincnt_trigger;
uint64_t rx_maxtmr_trigger;
uint64_t rx_mintmr_trigger;
uint64_t tx_mincnt_trigger;
uint64_t tx_maxcnt_trigger;
uint64_t rx_frames;
uint64_t tx_frames;
uint64_t rx_usecs;
};
#define LIO_BASE_QUEUE_NOT_REQUESTED 65535
union octeon_if_cfg {
uint64_t if_cfg64;
struct {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t base_queue:16;
uint64_t num_iqueues:16;
uint64_t num_oqueues:16;
uint64_t gmx_port_id:8;
uint64_t vf_id:8;
#else /* BYTE_ORDER != BIG_ENDIAN */
uint64_t vf_id:8;
uint64_t gmx_port_id:8;
uint64_t num_oqueues:16;
uint64_t num_iqueues:16;
uint64_t base_queue:16;
#endif /* BYTE_ORDER == BIG_ENDIAN */
} s;
};
#endif /* __LIO_COMMON_H__ */

View File

@ -0,0 +1,439 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file lio_config.h
* \brief Host Driver: Configuration data structures for the host driver.
*/
#ifndef __LIO_CONFIG_H__
#define __LIO_CONFIG_H__
/*--------------------------CONFIG VALUES------------------------*/
/*
* The following macros affect the way the driver data structures
* are generated for Octeon devices.
* They can be modified.
*/
/*
* Maximum octeon devices defined as LIO_MAX_IF to support
* multiple(<= LIO_MAX_IF) Miniports
*/
#define LIO_MAX_IF 128
#define LIO_MAX_DEVICES LIO_MAX_IF
#define LIO_MAX_MULTICAST_ADDR 32
/* CN23xx IQ configuration macros */
#define LIO_CN23XX_PF_MAX_RINGS 64
#define LIO_BR_SIZE 4096
#define LIO_CN23XX_PF_MAX_INPUT_QUEUES LIO_CN23XX_PF_MAX_RINGS
#define LIO_CN23XX_MAX_IQ_DESCRIPTORS 2048
#define LIO_CN23XX_DEFAULT_IQ_DESCRIPTORS 512
#define LIO_CN23XX_MIN_IQ_DESCRIPTORS 128
#define LIO_CN23XX_DB_MIN 1
#define LIO_CN23XX_DB_TIMEOUT 1
#define LIO_CN23XX_PF_MAX_OUTPUT_QUEUES LIO_CN23XX_PF_MAX_RINGS
#define LIO_CN23XX_MAX_OQ_DESCRIPTORS 2048
#define LIO_CN23XX_DEFAULT_OQ_DESCRIPTORS 512
#define LIO_CN23XX_MIN_OQ_DESCRIPTORS 128
#define LIO_CN23XX_OQ_BUF_SIZE MCLBYTES
#define LIO_CN23XX_OQ_PKTS_PER_INTR 128
#define LIO_CN23XX_OQ_REFIL_THRESHOLD 16
#define LIO_CN23XX_OQ_INTR_PKT 64
#define LIO_CN23XX_OQ_INTR_TIME 100
#define LIO_CN23XX_DEFAULT_NUM_PORTS 1
#define LIO_CN23XX_CFG_IO_QUEUES LIO_CN23XX_PF_MAX_RINGS
#define LIO_CN23XX_DEF_IQ_INTR_THRESHOLD 32
#define LIO_CN23XX_PKI_MAX_FRAME_SIZE 65535
#define LIO_CN23XX_RAW_FRONT_SIZE 48
/*
* this is the max jabber value.Any packets greater than this size sent over
* DPI will be truncated.
*/
#define LIO_CN23XX_MAX_INPUT_JABBER (LIO_CN23XX_PKI_MAX_FRAME_SIZE - \
LIO_CN23XX_RAW_FRONT_SIZE)
/* common OCTEON configuration macros */
#define LIO_64BYTE_INSTR 64
#define LIO_MAX_TXQS_PER_INTF 8
#define LIO_MAX_RXQS_PER_INTF 8
#define LIO_DEF_TXQS_PER_INTF 4
#define LIO_DEF_RXQS_PER_INTF 4
/* Macros to get octeon config params */
#define LIO_GET_IQ_CFG(cfg) ((cfg)->iq)
#define LIO_GET_IQ_MAX_Q_CFG(cfg) ((cfg)->iq.max_iqs)
#define LIO_GET_IQ_INSTR_TYPE_CFG(cfg) ((cfg)->iq.instr_type)
#define LIO_GET_IQ_INTR_PKT_CFG(cfg) ((cfg)->iq.iq_intr_pkt)
#define LIO_GET_OQ_MAX_Q_CFG(cfg) ((cfg)->oq.max_oqs)
#define LIO_GET_OQ_PKTS_PER_INTR_CFG(cfg) ((cfg)->oq.pkts_per_intr)
#define LIO_GET_OQ_REFILL_THRESHOLD_CFG(cfg) ((cfg)->oq.refill_threshold)
#define LIO_GET_OQ_INTR_PKT_CFG(cfg) ((cfg)->oq.oq_intr_pkt)
#define LIO_GET_OQ_INTR_TIME_CFG(cfg) ((cfg)->oq.oq_intr_time)
#define LIO_GET_NUM_NIC_PORTS_CFG(cfg) ((cfg)->num_nic_ports)
#define LIO_GET_NUM_DEF_TX_DESCS_CFG(cfg) ((cfg)->num_def_tx_descs)
#define LIO_GET_NUM_DEF_RX_DESCS_CFG(cfg) ((cfg)->num_def_rx_descs)
#define LIO_GET_DEF_RX_BUF_SIZE_CFG(cfg) ((cfg)->def_rx_buf_size)
#define LIO_GET_NUM_RX_DESCS_NIC_IF_CFG(cfg, idx) \
((cfg)->nic_if_cfg[idx].num_rx_descs)
#define LIO_GET_NUM_TX_DESCS_NIC_IF_CFG(cfg, idx) \
((cfg)->nic_if_cfg[idx].num_tx_descs)
#define LIO_GET_NUM_RX_BUF_SIZE_NIC_IF_CFG(cfg, idx) \
((cfg)->nic_if_cfg[idx].rx_buf_size)
#define LIO_GET_IS_SLI_BP_ON_CFG(cfg) ((cfg)->misc.enable_sli_oq_bp)
/* Max IOQs per OCTEON Link */
#define LIO_MAX_IOQS_PER_NICIF 64
#define LIO_SET_NUM_RX_DESCS_NIC_IF(cfg, idx, value) \
((cfg)->nic_if_cfg[idx].num_rx_descs = value)
#define LIO_SET_NUM_TX_DESCS_NIC_IF(cfg, idx, value) \
((cfg)->nic_if_cfg[idx].num_tx_descs = value)
/* TX/RX process pkt budget */
#define LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET 64
#define LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET 64
enum lio_card_type {
LIO_23XX /* 23xx */
};
#define LIO_23XX_NAME "23xx"
/*
* Structure to define the configuration attributes for each Input queue.
* Applicable to all Octeon processors
*/
struct lio_iq_config {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t reserved:16;
/* Tx interrupt packets. Applicable to 23xx only */
uint64_t iq_intr_pkt:16;
/* Minimum ticks to wait before checking for pending instructions. */
uint64_t db_timeout:16;
/*
* Minimum number of commands pending to be posted to Octeon
* before driver hits the Input queue doorbell.
*/
uint64_t db_min:8;
/* Command size - 32 or 64 bytes */
uint64_t instr_type:32;
/*
* Pending list size (usually set to the sum of the size of all Input
* queues)
*/
uint64_t pending_list_size:32;
/* Max number of IQs available */
uint64_t max_iqs:8;
#else /* BYTE_ORDER != BIG_ENDIAN */
/* Max number of IQs available */
uint64_t max_iqs:8;
/*
* Pending list size (usually set to the sum of the size of all Input
* queues)
*/
uint64_t pending_list_size:32;
/* Command size - 32 or 64 bytes */
uint64_t instr_type:32;
/*
* Minimum number of commands pending to be posted to Octeon
* before driver hits the Input queue doorbell.
*/
uint64_t db_min:8;
/* Minimum ticks to wait before checking for pending instructions. */
uint64_t db_timeout:16;
/* Tx interrupt packets. Applicable to 23xx only */
uint64_t iq_intr_pkt:16;
uint64_t reserved:16;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/*
* Structure to define the configuration attributes for each Output queue.
* Applicable to all Octeon processors
*/
struct lio_oq_config {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t reserved:16;
uint64_t pkts_per_intr:16;
/*
* Interrupt Coalescing (Time Interval). Octeon will interrupt the
* host if atleast one packet was sent in the time interval specified
* by this field. The driver uses time interval interrupt coalescing
* by default. The time is specified in microseconds.
*/
uint64_t oq_intr_time:16;
/*
* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
* only if it sent as many packets as specified by this field.
* The driver
* usually does not use packet count interrupt coalescing.
*/
uint64_t oq_intr_pkt:16;
/*
* The number of buffers that were consumed during packet processing by
* the driver on this Output queue before the driver attempts to
* replenish
* the descriptor ring with new buffers.
*/
uint64_t refill_threshold:16;
/* Max number of OQs available */
uint64_t max_oqs:8;
#else /* BYTE_ORDER != BIG_ENDIAN */
/* Max number of OQs available */
uint64_t max_oqs:8;
/*
* The number of buffers that were consumed during packet processing by
* the driver on this Output queue before the driver attempts to
* replenish
* the descriptor ring with new buffers.
*/
uint64_t refill_threshold:16;
/*
* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
* only if it sent as many packets as specified by this field.
* The driver
* usually does not use packet count interrupt coalescing.
*/
uint64_t oq_intr_pkt:16;
/*
* Interrupt Coalescing (Time Interval). Octeon will interrupt the
* host if atleast one packet was sent in the time interval specified
* by this field. The driver uses time interval interrupt coalescing
* by default. The time is specified in microseconds.
*/
uint64_t oq_intr_time:16;
uint64_t pkts_per_intr:16;
uint64_t reserved:16;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/*
* This structure conatins the NIC link configuration attributes,
* common for all the OCTEON Modles.
*/
struct lio_nic_if_config {
#if BYTE_ORDER == BIG_ENDIAN
uint64_t reserved:56;
uint64_t base_queue:16;
uint64_t gmx_port_id:8;
/*
* mbuf size, We need not change buf size even for Jumbo frames.
* Octeon can send jumbo frames in 4 consecutive descriptors,
*/
uint64_t rx_buf_size:16;
/* Num of desc for tx rings */
uint64_t num_tx_descs:16;
/* Num of desc for rx rings */
uint64_t num_rx_descs:16;
/* Actual configured value. Range could be: 1...max_rxqs */
uint64_t num_rxqs:16;
/* Max Rxqs: Half for each of the two ports :max_oq/2 */
uint64_t max_rxqs:16;
/* Actual configured value. Range could be: 1...max_txqs */
uint64_t num_txqs:16;
/* Max Txqs: Half for each of the two ports :max_iq/2 */
uint64_t max_txqs:16;
#else /* BYTE_ORDER != BIG_ENDIAN */
/* Max Txqs: Half for each of the two ports :max_iq/2 */
uint64_t max_txqs:16;
/* Actual configured value. Range could be: 1...max_txqs */
uint64_t num_txqs:16;
/* Max Rxqs: Half for each of the two ports :max_oq/2 */
uint64_t max_rxqs:16;
/* Actual configured value. Range could be: 1...max_rxqs */
uint64_t num_rxqs:16;
/* Num of desc for rx rings */
uint64_t num_rx_descs:16;
/* Num of desc for tx rings */
uint64_t num_tx_descs:16;
/*
* mbuf size, We need not change buf size even for Jumbo frames.
* Octeon can send jumbo frames in 4 consecutive descriptors,
*/
uint64_t rx_buf_size:16;
uint64_t gmx_port_id:8;
uint64_t base_queue:16;
uint64_t reserved:56;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/*
* Structure to define the configuration attributes for meta data.
* Applicable to all Octeon processors.
*/
struct lio_misc_config {
#if BYTE_ORDER == BIG_ENDIAN
/* Host link status polling period */
uint64_t host_link_query_interval:32;
/* Oct link status polling period */
uint64_t oct_link_query_interval:32;
uint64_t enable_sli_oq_bp:1;
/* Control IQ Group */
uint64_t ctrlq_grp:4;
#else /* BYTE_ORDER != BIG_ENDIAN */
/* Control IQ Group */
uint64_t ctrlq_grp:4;
/* BP for SLI OQ */
uint64_t enable_sli_oq_bp:1;
/* Host link status polling period */
uint64_t oct_link_query_interval:32;
/* Oct link status polling period */
uint64_t host_link_query_interval:32;
#endif /* BYTE_ORDER == BIG_ENDIAN */
};
/* Structure to define the configuration for all OCTEON processors. */
struct lio_config {
uint16_t card_type;
char *card_name;
/* Input Queue attributes. */
struct lio_iq_config iq;
/* Output Queue attributes. */
struct lio_oq_config oq;
/* NIC Port Configuration */
struct lio_nic_if_config nic_if_cfg[LIO_MAX_IF];
/* Miscellaneous attributes */
struct lio_misc_config misc;
int num_nic_ports;
int num_def_tx_descs;
/* Num of desc for rx rings */
int num_def_rx_descs;
int def_rx_buf_size;
};
/* The following config values are fixed and should not be modified. */
/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */
#define LIO_MAX_BAR1_MAP_INDEX 2
/*
* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
* NoResponse Lists are now maintained with each IQ. (Dec' 2007).
*/
#define LIO_MAX_RESPONSE_LISTS 4
/*
* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
* dispatch table.
*/
#define LIO_OPCODE_MASK_BITS 6
/* Mask for the 6-bit lookup hash */
#define LIO_OPCODE_MASK 0x3f
/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */
#define LIO_DISPATCH_LIST_SIZE BIT(LIO_OPCODE_MASK_BITS)
#define LIO_MAX_INSTR_QUEUES(oct) LIO_CN23XX_PF_MAX_INPUT_QUEUES
#define LIO_MAX_OUTPUT_QUEUES(oct) LIO_CN23XX_PF_MAX_OUTPUT_QUEUES
#define LIO_MAX_POSSIBLE_INSTR_QUEUES LIO_CN23XX_PF_MAX_INPUT_QUEUES
#define LIO_MAX_POSSIBLE_OUTPUT_QUEUES LIO_CN23XX_PF_MAX_OUTPUT_QUEUES
#endif /* __LIO_CONFIG_H__ */

View File

@ -0,0 +1,941 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/*
* @file lio_console.c
*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_image.h"
#include "lio_mem_ops.h"
#include "lio_main.h"
static void lio_get_uboot_version(struct octeon_device *oct);
static void lio_remote_lock(void);
static void lio_remote_unlock(void);
static uint64_t cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
const char *name,
uint32_t flags);
static int lio_console_read(struct octeon_device *oct,
uint32_t console_num, char *buffer,
uint32_t buf_size);
#define CAST_ULL(v) ((unsigned long long)(v))
#define LIO_BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008
#define LIO_BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004
#define LIO_BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000
#define LIO_BOOTLOADER_PCI_READ_DESC_ADDR 0x0006c100
#define LIO_BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN 248
#define LIO_PCI_IO_BUF_OWNER_OCTEON 0x00000001
#define LIO_PCI_IO_BUF_OWNER_HOST 0x00000002
#define LIO_PCI_CONSOLE_BLOCK_NAME "__pci_console"
#define LIO_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */
/*
* First three members of cvmx_bootmem_desc are left in original positions
* for backwards compatibility. Assumes big endian target
*/
struct cvmx_bootmem_desc {
/* lock to control access to list */
uint32_t lock;
/* flags for indicating various conditions */
uint32_t flags;
uint64_t head_addr;
/* incremented changed when incompatible changes made */
uint32_t major_version;
/*
* incremented changed when compatible changes made, reset to zero
* when major incremented
*/
uint32_t minor_version;
uint64_t app_data_addr;
uint64_t app_data_size;
/* number of elements in named blocks array */
uint32_t nb_num_blocks;
/* length of name array in bootmem blocks */
uint32_t named_block_name_len;
/* address of named memory block descriptors */
uint64_t named_block_array_addr;
};
/*
* Structure that defines a single console.
*
* Note: when read_index == write_index, the buffer is empty. The actual usable
* size of each console is console_buf_size -1;
*/
struct lio_pci_console {
uint64_t input_base_addr;
uint32_t input_read_index;
uint32_t input_write_index;
uint64_t output_base_addr;
uint32_t output_read_index;
uint32_t output_write_index;
uint32_t lock;
uint32_t buf_size;
};
/*
* This is the main container structure that contains all the information
* about all PCI consoles. The address of this structure is passed to
* various routines that operation on PCI consoles.
*/
struct lio_pci_console_desc {
uint32_t major_version;
uint32_t minor_version;
uint32_t lock;
uint32_t flags;
uint32_t num_consoles;
uint32_t pad;
/* must be 64 bit aligned here... */
/* Array of addresses of octeon_pci_console structures */
uint64_t console_addr_array[1];
/* Implicit storage for console_addr_array */
};
/*
* This macro returns the size of a member of a structure. Logically it is
* the same as "sizeof(s::field)" in C++, but C lacks the "::" operator.
*/
#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
/*
* This function is the implementation of the get macros defined
* for individual structure members. The argument are generated
* by the macros inorder to read only the needed memory.
*
* @param oct Pointer to current octeon device
* @param base 64bit physical address of the complete structure
* @param offset Offset from the beginning of the structure to the member being
* accessed.
* @param size Size of the structure member.
*
* @return Value of the structure member promoted into a uint64_t.
*/
static inline uint64_t
__cvmx_bootmem_desc_get(struct octeon_device *oct, uint64_t base,
uint32_t offset, uint32_t size)
{
base = (1ull << 63) | (base + offset);
switch (size) {
case 4:
return (lio_read_device_mem32(oct, base));
case 8:
return (lio_read_device_mem64(oct, base));
default:
return (0);
}
}
/*
* This function retrieves the string name of a named block. It is
* more complicated than a simple memcpy() since the named block
* descriptor may not be directly accessible.
*
* @param oct Pointer to current octeon device
* @param addr Physical address of the named block descriptor
* @param str String to receive the named block string name
* @param len Length of the string buffer, which must match the length
* stored in the bootmem descriptor.
*/
static void
lio_bootmem_named_get_name(struct octeon_device *oct, uint64_t addr, char *str,
uint32_t len)
{
addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
lio_pci_read_core_mem(oct, addr, (uint8_t *) str, len);
str[len] = 0;
}
/* See header file for descriptions of functions */
/*
* Check the version information on the bootmem descriptor
*
* @param oct Pointer to current octeon device
* @param exact_match
* Exact major version to check against. A zero means
* check that the version supports named blocks.
*
* @return Zero if the version is correct. Negative if the version is
* incorrect. Failures also cause a message to be displayed.
*/
static int
__cvmx_bootmem_check_version(struct octeon_device *oct, uint32_t exact_match)
{
uint32_t major_version;
uint32_t minor_version;
if (!oct->bootmem_desc_addr)
oct->bootmem_desc_addr =
lio_read_device_mem64(oct,
LIO_BOOTLOADER_PCI_READ_DESC_ADDR);
major_version = (uint32_t) __cvmx_bootmem_desc_get(oct,
oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc, major_version),
SIZEOF_FIELD(struct cvmx_bootmem_desc, major_version));
minor_version = (uint32_t) __cvmx_bootmem_desc_get(oct,
oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc, minor_version),
SIZEOF_FIELD(struct cvmx_bootmem_desc, minor_version));
lio_dev_dbg(oct, "%s: major_version=%d\n", __func__, major_version);
if ((major_version > 3) ||
(exact_match && major_version != exact_match)) {
lio_dev_err(oct, "bootmem ver mismatch %d.%d addr:0x%llx\n",
major_version, minor_version,
CAST_ULL(oct->bootmem_desc_addr));
return (-1);
} else {
return (0);
}
}
static const struct cvmx_bootmem_named_block_desc *
__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct,
const char *name, uint32_t flags)
{
struct cvmx_bootmem_named_block_desc *desc =
&oct->bootmem_named_block_desc;
uint64_t named_addr;
named_addr = cvmx_bootmem_phy_named_block_find(oct, name,
flags);
if (named_addr) {
desc->base_addr = __cvmx_bootmem_desc_get(oct, named_addr,
offsetof(struct cvmx_bootmem_named_block_desc,
base_addr),
SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc,
base_addr));
desc->size = __cvmx_bootmem_desc_get(oct, named_addr,
offsetof(struct cvmx_bootmem_named_block_desc, size),
SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc,
size));
strncpy(desc->name, name, sizeof(desc->name));
desc->name[sizeof(desc->name) - 1] = 0;
return (&oct->bootmem_named_block_desc);
} else {
return (NULL);
}
}
static uint64_t
cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, const char *name,
uint32_t flags)
{
uint64_t result = 0;
if (!__cvmx_bootmem_check_version(oct, 3)) {
uint32_t i;
uint64_t named_block_array_addr =
__cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc,
named_block_array_addr),
SIZEOF_FIELD(struct cvmx_bootmem_desc,
named_block_array_addr));
uint32_t num_blocks =
(uint32_t) __cvmx_bootmem_desc_get(oct,
oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc,
nb_num_blocks),
SIZEOF_FIELD(struct cvmx_bootmem_desc,
nb_num_blocks));
uint32_t name_length =
(uint32_t) __cvmx_bootmem_desc_get(oct,
oct->bootmem_desc_addr,
offsetof(struct cvmx_bootmem_desc,
named_block_name_len),
SIZEOF_FIELD(struct cvmx_bootmem_desc,
named_block_name_len));
uint64_t named_addr = named_block_array_addr;
for (i = 0; i < num_blocks; i++) {
uint64_t named_size =
__cvmx_bootmem_desc_get(oct, named_addr,
offsetof(struct cvmx_bootmem_named_block_desc,
size),
SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc,
size));
if (name && named_size) {
char *name_tmp = malloc(name_length + 1,
M_DEVBUF, M_NOWAIT |
M_ZERO);
if (!name_tmp)
break;
lio_bootmem_named_get_name(oct, named_addr,
name_tmp,
name_length);
if (!strncmp(name, name_tmp, name_length)) {
result = named_addr;
free(name_tmp, M_DEVBUF);
break;
}
free(name_tmp, M_DEVBUF);
} else if (!name && !named_size) {
result = named_addr;
break;
}
named_addr +=
sizeof(struct cvmx_bootmem_named_block_desc);
}
}
return (result);
}
/*
* Find a named block on the remote Octeon
*
* @param oct Pointer to current octeon device
* @param name Name of block to find
* @param base_addr Address the block is at (OUTPUT)
* @param size The size of the block (OUTPUT)
*
* @return Zero on success, One on failure.
*/
static int
lio_named_block_find(struct octeon_device *oct, const char *name,
uint64_t * base_addr, uint64_t * size)
{
const struct cvmx_bootmem_named_block_desc *named_block;
lio_remote_lock();
named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0);
lio_remote_unlock();
if (named_block != NULL) {
*base_addr = named_block->base_addr;
*size = named_block->size;
return (0);
}
return (1);
}
static void
lio_remote_lock(void)
{
/* fill this in if any sharing is needed */
}
static void
lio_remote_unlock(void)
{
/* fill this in if any sharing is needed */
}
int
lio_console_send_cmd(struct octeon_device *oct, char *cmd_str,
uint32_t wait_hundredths)
{
uint32_t len = (uint32_t) strlen(cmd_str);
lio_dev_dbg(oct, "sending \"%s\" to bootloader\n", cmd_str);
if (len > LIO_BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) {
lio_dev_err(oct, "Command string too long, max length is: %d\n",
LIO_BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1);
return (-1);
}
if (lio_wait_for_bootloader(oct, wait_hundredths)) {
lio_dev_err(oct, "Bootloader not ready for command.\n");
return (-1);
}
/* Write command to bootloader */
lio_remote_lock();
lio_pci_write_core_mem(oct, LIO_BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR,
(uint8_t *) cmd_str, len);
lio_write_device_mem32(oct, LIO_BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR,
len);
lio_write_device_mem32(oct, LIO_BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR,
LIO_PCI_IO_BUF_OWNER_OCTEON);
/*
* Bootloader should accept command very quickly if it really was
* ready
*/
if (lio_wait_for_bootloader(oct, 200)) {
lio_remote_unlock();
lio_dev_err(oct, "Bootloader did not accept command.\n");
return (-1);
}
lio_remote_unlock();
return (0);
}
int
lio_wait_for_bootloader(struct octeon_device *oct,
uint32_t wait_time_hundredths)
{
lio_dev_dbg(oct, "waiting %d0 ms for bootloader\n",
wait_time_hundredths);
if (lio_mem_access_ok(oct))
return (-1);
while (wait_time_hundredths > 0 &&
lio_read_device_mem32(oct,
LIO_BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR) !=
LIO_PCI_IO_BUF_OWNER_HOST) {
if (--wait_time_hundredths <= 0)
return (-1);
lio_sleep_timeout(10);
}
return (0);
}
static void
lio_console_handle_result(struct octeon_device *oct, size_t console_num)
{
struct lio_console *console;
console = &oct->console[console_num];
console->waiting = 0;
}
static char console_buffer[LIO_MAX_CONSOLE_READ_BYTES];
static void
lio_output_console_line(struct octeon_device *oct, struct lio_console *console,
size_t console_num, char *console_buffer,
int32_t bytes_read)
{
size_t len;
int32_t i;
char *line;
line = console_buffer;
for (i = 0; i < bytes_read; i++) {
/* Output a line at a time, prefixed */
if (console_buffer[i] == '\n') {
console_buffer[i] = '\0';
/* We need to output 'line', prefaced by 'leftover'.
* However, it is possible we're being called to
* output 'leftover' by itself (in the case of nothing
* having been read from the console).
*
* To avoid duplication, check for this condition.
*/
if (console->leftover[0] &&
(line != console->leftover)) {
if (console->print)
(*console->print)(oct,
(uint32_t)console_num,
console->leftover,line);
console->leftover[0] = '\0';
} else {
if (console->print)
(*console->print)(oct,
(uint32_t)console_num,
line, NULL);
}
line = &console_buffer[i + 1];
}
}
/* Save off any leftovers */
if (line != &console_buffer[bytes_read]) {
console_buffer[bytes_read] = '\0';
len = strlen(console->leftover);
strncpy(&console->leftover[len], line,
sizeof(console->leftover) - len);
}
}
static void
lio_check_console(void *arg)
{
struct lio_console *console;
struct lio_callout *console_callout = arg;
struct octeon_device *oct =
(struct octeon_device *)console_callout->ctxptr;
size_t len;
uint32_t console_num = (uint32_t) console_callout->ctxul;
int32_t bytes_read, total_read, tries;
console = &oct->console[console_num];
tries = 0;
total_read = 0;
if (callout_pending(&console_callout->timer) ||
(callout_active(&console_callout->timer) == 0))
return;
do {
/*
* Take console output regardless of whether it will be
* logged
*/
bytes_read = lio_console_read(oct, console_num, console_buffer,
sizeof(console_buffer) - 1);
if (bytes_read > 0) {
total_read += bytes_read;
if (console->waiting)
lio_console_handle_result(oct, console_num);
if (console->print) {
lio_output_console_line(oct, console,
console_num,
console_buffer,
bytes_read);
}
} else if (bytes_read < 0) {
lio_dev_err(oct, "Error reading console %u, ret=%d\n",
console_num, bytes_read);
}
tries++;
} while ((bytes_read > 0) && (tries < 16));
/*
* If nothing is read after polling the console, output any leftovers
* if any
*/
if (console->print && (total_read == 0) && (console->leftover[0])) {
/* append '\n' as terminator for 'output_console_line' */
len = strlen(console->leftover);
console->leftover[len] = '\n';
lio_output_console_line(oct, console, console_num,
console->leftover, (int32_t)(len + 1));
console->leftover[0] = '\0';
}
callout_schedule(&oct->console_timer[console_num].timer,
lio_ms_to_ticks(LIO_CONSOLE_POLL_INTERVAL_MS));
}
int
lio_init_consoles(struct octeon_device *oct)
{
uint64_t addr, size;
int ret = 0;
ret = lio_mem_access_ok(oct);
if (ret) {
lio_dev_err(oct, "Memory access not okay'\n");
return (ret);
}
ret = lio_named_block_find(oct, LIO_PCI_CONSOLE_BLOCK_NAME, &addr,
&size);
if (ret) {
lio_dev_err(oct, "Could not find console '%s'\n",
LIO_PCI_CONSOLE_BLOCK_NAME);
return (ret);
}
/*
* Use BAR1_INDEX15 to create a static mapping to a region of
* Octeon's DRAM that contains the PCI console named block.
*/
oct->console_nb_info.bar1_index = 15;
oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index,
1);
oct->console_nb_info.dram_region_base = addr & 0xFFFFFFFFFFC00000ULL;
/*
* num_consoles > 0, is an indication that the consoles are
* accessible
*/
oct->num_consoles = lio_read_device_mem32(oct,
addr + offsetof(struct lio_pci_console_desc,
num_consoles));
oct->console_desc_addr = addr;
lio_dev_dbg(oct, "Initialized consoles. %d available\n",
oct->num_consoles);
return (ret);
}
int
lio_add_console(struct octeon_device *oct, uint32_t console_num, char *dbg_enb)
{
struct callout *timer;
struct lio_console *console;
uint64_t coreaddr;
int ret = 0;
if (console_num >= oct->num_consoles) {
lio_dev_err(oct, "trying to read from console number %d when only 0 to %d exist\n",
console_num, oct->num_consoles);
} else {
console = &oct->console[console_num];
console->waiting = 0;
coreaddr = oct->console_desc_addr + console_num * 8 +
offsetof(struct lio_pci_console_desc,
console_addr_array);
console->addr = lio_read_device_mem64(oct, coreaddr);
coreaddr = console->addr + offsetof(struct lio_pci_console,
buf_size);
console->buffer_size = lio_read_device_mem32(oct, coreaddr);
coreaddr = console->addr + offsetof(struct lio_pci_console,
input_base_addr);
console->input_base_addr = lio_read_device_mem64(oct, coreaddr);
coreaddr = console->addr + offsetof(struct lio_pci_console,
output_base_addr);
console->output_base_addr =
lio_read_device_mem64(oct, coreaddr);
console->leftover[0] = '\0';
timer = &oct->console_timer[console_num].timer;
if (oct->uboot_len == 0)
lio_get_uboot_version(oct);
callout_init(timer, 0);
oct->console_timer[console_num].ctxptr = (void *)oct;
oct->console_timer[console_num].ctxul = console_num;
callout_reset(timer,
lio_ms_to_ticks(LIO_CONSOLE_POLL_INTERVAL_MS),
lio_check_console, timer);
/* an empty string means use default debug console enablement */
if (dbg_enb && !dbg_enb[0])
dbg_enb = "setenv pci_console_active 1";
if (dbg_enb)
ret = lio_console_send_cmd(oct, dbg_enb, 2000);
console->active = 1;
}
return (ret);
}
/*
* Removes all consoles
*
* @param oct octeon device
*/
void
lio_remove_consoles(struct octeon_device *oct)
{
struct lio_console *console;
uint32_t i;
for (i = 0; i < oct->num_consoles; i++) {
console = &oct->console[i];
if (!console->active)
continue;
callout_stop(&oct->console_timer[i].timer);
console->addr = 0;
console->buffer_size = 0;
console->input_base_addr = 0;
console->output_base_addr = 0;
}
oct->num_consoles = 0;
}
static inline int
lio_console_free_bytes(uint32_t buffer_size, uint32_t wr_idx, uint32_t rd_idx)
{
if (rd_idx >= buffer_size || wr_idx >= buffer_size)
return (-1);
return (((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size);
}
static inline int
lio_console_avail_bytes(uint32_t buffer_size, uint32_t wr_idx, uint32_t rd_idx)
{
if (rd_idx >= buffer_size || wr_idx >= buffer_size)
return (-1);
return (buffer_size - 1 -
lio_console_free_bytes(buffer_size, wr_idx, rd_idx));
}
static int
lio_console_read(struct octeon_device *oct, uint32_t console_num, char *buffer,
uint32_t buf_size)
{
struct lio_console *console;
int bytes_to_read;
uint32_t rd_idx, wr_idx;
if (console_num >= oct->num_consoles) {
lio_dev_err(oct, "Attempted to read from disabled console %d\n",
console_num);
return (0);
}
console = &oct->console[console_num];
/*
* Check to see if any data is available. Maybe optimize this with
* 64-bit read.
*/
rd_idx = lio_read_device_mem32(oct, console->addr +
offsetof(struct lio_pci_console, output_read_index));
wr_idx = lio_read_device_mem32(oct, console->addr +
offsetof(struct lio_pci_console, output_write_index));
bytes_to_read = lio_console_avail_bytes(console->buffer_size,
wr_idx, rd_idx);
if (bytes_to_read <= 0)
return (bytes_to_read);
bytes_to_read = min(bytes_to_read, buf_size);
/*
* Check to see if what we want to read is not contiguous, and limit
* ourselves to the contiguous block
*/
if (rd_idx + bytes_to_read >= console->buffer_size)
bytes_to_read = console->buffer_size - rd_idx;
lio_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
(uint8_t *) buffer, bytes_to_read);
lio_write_device_mem32(oct, console->addr +
offsetof(struct lio_pci_console,
output_read_index),
(rd_idx + bytes_to_read) % console->buffer_size);
return (bytes_to_read);
}
static void
lio_get_uboot_version(struct octeon_device *oct)
{
struct lio_console *console;
int32_t bytes_read, total_read, tries;
uint32_t console_num = 0;
int i, ret = 0;
ret = lio_console_send_cmd(oct, "setenv stdout pci", 50);
console = &oct->console[console_num];
tries = 0;
total_read = 0;
ret = lio_console_send_cmd(oct, "version", 1);
do {
/*
* Take console output regardless of whether it will be
* logged
*/
bytes_read = lio_console_read(oct,
console_num, oct->uboot_version +
total_read,
OCTEON_UBOOT_BUFFER_SIZE - 1 -
total_read);
if (bytes_read > 0) {
oct->uboot_version[bytes_read] = 0x0;
total_read += bytes_read;
if (console->waiting)
lio_console_handle_result(oct, console_num);
} else if (bytes_read < 0) {
lio_dev_err(oct, "Error reading console %u, ret=%d\n",
console_num, bytes_read);
}
tries++;
} while ((bytes_read > 0) && (tries < 16));
/*
* If nothing is read after polling the console, output any leftovers
* if any
*/
if ((total_read == 0) && (console->leftover[0])) {
lio_dev_dbg(oct, "%u: %s\n", console_num, console->leftover);
console->leftover[0] = '\0';
}
ret = lio_console_send_cmd(oct, "setenv stdout serial", 50);
/* U-Boot */
for (i = 0; i < (OCTEON_UBOOT_BUFFER_SIZE - 9); i++) {
if (oct->uboot_version[i] == 'U' &&
oct->uboot_version[i + 2] == 'B' &&
oct->uboot_version[i + 3] == 'o' &&
oct->uboot_version[i + 4] == 'o' &&
oct->uboot_version[i + 5] == 't') {
oct->uboot_sidx = i;
i++;
for (; oct->uboot_version[i] != 0x0; i++) {
if (oct->uboot_version[i] == 'm' &&
oct->uboot_version[i + 1] == 'i' &&
oct->uboot_version[i + 2] == 'p' &&
oct->uboot_version[i + 3] == 's') {
oct->uboot_eidx = i - 1;
oct->uboot_version[i - 1] = 0x0;
oct->uboot_len = oct->uboot_eidx -
oct->uboot_sidx + 1;
lio_dev_info(oct, "%s\n",
&oct->uboot_version
[oct->uboot_sidx]);
return;
}
}
}
}
}
#define FBUF_SIZE (4 * 1024 * 1024)
int
lio_download_firmware(struct octeon_device *oct, const uint8_t * data,
size_t size)
{
struct lio_firmware_file_header *h;
uint64_t load_addr;
uint32_t crc32_result, i, image_len, rem;
int ret = 0;
if (size < sizeof(struct lio_firmware_file_header)) {
lio_dev_err(oct, "Firmware file too small (%d < %d).\n",
(uint32_t) size,
(uint32_t) sizeof(struct lio_firmware_file_header));
return (-EINVAL);
}
h = __DECONST(struct lio_firmware_file_header *, data);
if (be32toh(h->magic) != LIO_NIC_MAGIC) {
lio_dev_err(oct, "Unrecognized firmware file.\n");
return (-EINVAL);
}
crc32_result = crc32(data, sizeof(struct lio_firmware_file_header) -
sizeof(uint32_t));
if (crc32_result != be32toh(h->crc32)) {
lio_dev_err(oct, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
crc32_result, be32toh(h->crc32));
return (-EINVAL);
}
if (memcmp(LIO_BASE_VERSION, h->version,
strlen(LIO_BASE_VERSION))) {
lio_dev_err(oct, "Unmatched firmware version. Expected %s.x, got %s.\n",
LIO_BASE_VERSION, h->version);
return (-EINVAL);
}
if (be32toh(h->num_images) > LIO_MAX_IMAGES) {
lio_dev_err(oct, "Too many images in firmware file (%d).\n",
be32toh(h->num_images));
return (-EINVAL);
}
lio_dev_info(oct, "Firmware version: %s\n", h->version);
snprintf(oct->fw_info.lio_firmware_version, 32, "LIQUIDIO: %s",
h->version);
data += sizeof(struct lio_firmware_file_header);
lio_dev_info(oct, "Loading %d image(s)\n", be32toh(h->num_images));
/* load all images */
for (i = 0; i < be32toh(h->num_images); i++) {
load_addr = be64toh(h->desc[i].addr);
image_len = be32toh(h->desc[i].len);
lio_dev_info(oct, "Loading firmware %d at %llx\n", image_len,
(unsigned long long)load_addr);
/* Write in 4MB chunks */
rem = image_len;
while (rem) {
if (rem < FBUF_SIZE)
size = rem;
else
size = FBUF_SIZE;
/* download the image */
lio_pci_write_core_mem(oct, load_addr,
__DECONST(uint8_t *, data),
(uint32_t) size);
data += size;
rem -= (uint32_t) size;
load_addr += size;
}
}
lio_dev_info(oct, "Writing boot command: %s\n", h->bootcmd);
/* Invoke the bootcmd */
ret = lio_console_send_cmd(oct, h->bootcmd, 50);
return (0);
}

View File

@ -0,0 +1,153 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_ctrl.h"
#include "lio_main.h"
int
lio_send_data_pkt(struct octeon_device *oct, struct lio_data_pkt *ndata)
{
int ring_doorbell = 1;
return (lio_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
ndata->buf, ndata->datasize, ndata->reqtype));
}
static void
lio_ctrl_callback(struct octeon_device *oct, uint32_t status, void *sc_ptr)
{
struct lio_soft_command *sc = (struct lio_soft_command *)sc_ptr;
struct lio_ctrl_pkt *nctrl;
nctrl = (struct lio_ctrl_pkt *)sc->ctxptr;
/*
* Call the callback function if status is OK.
* Status is OK only if a response was expected and core returned
* success.
* If no response was expected, status is OK if the command was posted
* successfully.
*/
if (!status && nctrl->cb_fn)
nctrl->cb_fn(nctrl);
lio_free_soft_command(oct, sc);
}
static inline struct lio_soft_command *
lio_alloc_ctrl_pkt_sc(struct octeon_device *oct, struct lio_ctrl_pkt *nctrl)
{
struct lio_soft_command *sc = NULL;
uint32_t datasize = 0, rdatasize, uddsize = 0;
uint8_t *data;
uddsize = (uint32_t)(nctrl->ncmd.s.more * 8);
datasize = OCTEON_CMD_SIZE + uddsize;
rdatasize = (nctrl->wait_time) ? 16 : 0;
sc = lio_alloc_soft_command(oct, datasize, rdatasize,
sizeof(struct lio_ctrl_pkt));
if (sc == NULL)
return (NULL);
memcpy(sc->ctxptr, nctrl, sizeof(struct lio_ctrl_pkt));
data = (uint8_t *)sc->virtdptr;
memcpy(data, &nctrl->ncmd, OCTEON_CMD_SIZE);
lio_swap_8B_data((uint64_t *)data, (OCTEON_CMD_SIZE >> 3));
if (uddsize) {
/* Endian-Swap for UDD should have been done by caller. */
memcpy(data + OCTEON_CMD_SIZE, nctrl->udd, uddsize);
}
sc->iq_no = (uint32_t)nctrl->iq_no;
lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
0, 0);
sc->callback = lio_ctrl_callback;
sc->callback_arg = sc;
sc->wait_time = nctrl->wait_time;
return (sc);
}
int
lio_send_ctrl_pkt(struct octeon_device *oct, struct lio_ctrl_pkt *nctrl)
{
struct lio_soft_command *sc = NULL;
int retval;
mtx_lock(&oct->cmd_resp_wqlock);
/*
* Allow only rx ctrl command to stop traffic on the chip
* during offline operations
*/
if ((oct->cmd_resp_state == LIO_DRV_OFFLINE) &&
(nctrl->ncmd.s.cmd != LIO_CMD_RX_CTL)) {
mtx_unlock(&oct->cmd_resp_wqlock);
lio_dev_err(oct, "%s cmd:%d not processed since driver offline\n",
__func__, nctrl->ncmd.s.cmd);
return (-1);
}
sc = lio_alloc_ctrl_pkt_sc(oct, nctrl);
if (sc == NULL) {
lio_dev_err(oct, "%s soft command alloc failed\n", __func__);
mtx_unlock(&oct->cmd_resp_wqlock);
return (-1);
}
retval = lio_send_soft_command(oct, sc);
if (retval == LIO_IQ_SEND_FAILED) {
lio_free_soft_command(oct, sc);
lio_dev_err(oct, "%s pf_num:%d soft command:%d send failed status: %x\n",
__func__, oct->pf_num, nctrl->ncmd.s.cmd, retval);
mtx_unlock(&oct->cmd_resp_wqlock);
return (-1);
}
mtx_unlock(&oct->cmd_resp_wqlock);
return (retval);
}

View File

@ -0,0 +1,243 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file lio_ctrl.h
* \brief Host NIC Driver: Routine to send network data &
* control packet to Octeon.
*/
#ifndef __LIO_CTRL_H__
#define __LIO_CTRL_H__
/* Maximum number of 8-byte words can be sent in a NIC control message. */
#define LIO_MAX_NCTRL_UDD 32
typedef void (*lio_ctrl_pkt_cb_fn_t)(void *);
/*
* Structure of control information passed by the NIC module to the OSI
* layer when sending control commands to Octeon device software.
*/
struct lio_ctrl_pkt {
/* Command to be passed to the Octeon device software. */
union octeon_cmd ncmd;
/* Send buffer */
void *data;
uint64_t dmadata;
/* Response buffer */
void *rdata;
uint64_t dmardata;
/* Additional data that may be needed by some commands. */
uint64_t udd[LIO_MAX_NCTRL_UDD];
/* Input queue to use to send this command. */
uint64_t iq_no;
/*
* Time to wait for Octeon software to respond to this control command.
* If wait_time is 0, OSI assumes no response is expected.
*/
size_t wait_time;
/* The network device that issued the control command. */
struct lio *lio;
/* Callback function called when the command has been fetched */
lio_ctrl_pkt_cb_fn_t cb_fn;
};
/*
* Structure of data information passed by the NIC module to the OSI
* layer when forwarding data to Octeon device software.
*/
struct lio_data_pkt {
/*
* Pointer to information maintained by NIC module for this packet. The
* OSI layer passes this as-is to the driver.
*/
void *buf;
/* Type of buffer passed in "buf" above. */
uint32_t reqtype;
/* Total data bytes to be transferred in this command. */
uint32_t datasize;
/* Command to be passed to the Octeon device software. */
union lio_instr_64B cmd;
/* Input queue to use to send this command. */
uint32_t q_no;
};
/*
* Structure passed by NIC module to OSI layer to prepare a command to send
* network data to Octeon.
*/
union lio_cmd_setup {
struct {
uint32_t iq_no:8;
uint32_t gather:1;
uint32_t timestamp:1;
uint32_t ip_csum:1;
uint32_t transport_csum:1;
uint32_t tnl_csum:1;
uint32_t rsvd:19;
union {
uint32_t datasize;
uint32_t gatherptrs;
} u;
} s;
uint64_t cmd_setup64;
};
static inline int
lio_iq_is_full(struct octeon_device *oct, uint32_t q_no)
{
return (atomic_load_acq_int(&oct->instr_queue[q_no]->instr_pending) >=
(oct->instr_queue[q_no]->max_count - 2));
}
static inline void
lio_prepare_pci_cmd_o3(struct octeon_device *oct, union lio_instr_64B *cmd,
union lio_cmd_setup *setup, uint32_t tag)
{
union octeon_packet_params packet_params;
struct octeon_instr_irh *irh;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_pki_ih3 *pki_ih3;
int port;
bzero(cmd, sizeof(union lio_instr_64B));
ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
/*
* assume that rflag is cleared so therefore front data will only have
* irh and ossp[1] and ossp[2] for a total of 24 bytes
*/
ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
/* PKI IH */
ih3->fsz = LIO_PCICMD_O3;
if (!setup->s.gather) {
ih3->dlengsz = setup->s.u.datasize;
} else {
ih3->gather = 1;
ih3->dlengsz = setup->s.u.gatherptrs;
}
pki_ih3->w = 1;
pki_ih3->raw = 0;
pki_ih3->utag = 0;
pki_ih3->utt = 1;
pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
if (tag)
pki_ih3->tag = tag;
else
pki_ih3->tag = LIO_DATA(port);
pki_ih3->tagtype = LIO_ORDERED_TAG;
pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
pki_ih3->pm = 0x0; /* parse from L2 */
/* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1 */
pki_ih3->sl = 32;
irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
irh->opcode = LIO_OPCODE_NIC;
irh->subcode = LIO_OPCODE_NIC_NW_DATA;
packet_params.pkt_params32 = 0;
packet_params.s.ip_csum = setup->s.ip_csum;
packet_params.s.transport_csum = setup->s.transport_csum;
packet_params.s.tnl_csum = setup->s.tnl_csum;
packet_params.s.tsflag = setup->s.timestamp;
irh->ossp = packet_params.pkt_params32;
}
/*
* Utility function to prepare a 64B NIC instruction based on a setup command
* @param oct - Pointer to current octeon device
* @param cmd - pointer to instruction to be filled in.
* @param setup - pointer to the setup structure
* @param q_no - which queue for back pressure
*
* Assumes the cmd instruction is pre-allocated, but no fields are filled in.
*/
static inline void
lio_prepare_pci_cmd(struct octeon_device *oct, union lio_instr_64B *cmd,
union lio_cmd_setup *setup, uint32_t tag)
{
lio_prepare_pci_cmd_o3(oct, cmd, setup, tag);
}
/*
* Send a NIC data packet to the device
* @param oct - octeon device pointer
* @param ndata - control structure with queueing, and buffer information
*
* @returns LIO_IQ_FAILED if it failed to add to the input queue.
* LIO_IQ_STOP if it the queue should be stopped,
* and LIO_IQ_SEND_OK if it sent okay.
*/
int lio_send_data_pkt(struct octeon_device *oct,
struct lio_data_pkt *ndata);
/*
* Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timeout, and callback info
* @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
* queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
*/
int lio_send_ctrl_pkt(struct octeon_device *oct,
struct lio_ctrl_pkt *nctrl);
#endif /* __LIO_CTRL_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,888 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/*
* \brief Host Driver: This file defines the octeon device structure.
*/
#ifndef _LIO_DEVICE_H_
#define _LIO_DEVICE_H_
#include <sys/endian.h> /* for BYTE_ORDER */
/* PCI VendorId Device Id */
#define LIO_CN23XX_PF_PCIID 0x9702177d
/*
* Driver identifies chips by these Ids, created by clubbing together
* DeviceId+RevisionId; Where Revision Id is not used to distinguish
* between chips, a value of 0 is used for revision id.
*/
#define LIO_CN23XX_PF_VID 0x9702
#define LIO_CN2350_10G_SUBDEVICE 0x03
#define LIO_CN2350_10G_SUBDEVICE1 0x04
#define LIO_CN2360_10G_SUBDEVICE 0x05
#define LIO_CN2350_25G_SUBDEVICE 0x07
#define LIO_CN2360_25G_SUBDEVICE 0x06
/* Endian-swap modes supported by Octeon. */
enum lio_pci_swap_mode {
LIO_PCI_PASSTHROUGH = 0,
LIO_PCI_SWAP_64BIT = 1,
LIO_PCI_SWAP_32BIT = 2,
LIO_PCI_LW_SWAP_32BIT = 3
};
enum {
LIO_CFG_TYPE_DEFAULT = 0,
LIO_NUM_CFGS,
};
#define OCTEON_OUTPUT_INTR (2)
#define OCTEON_ALL_INTR 0xff
/*--------------- PCI BAR1 index registers -------------*/
/* BAR1 Mask */
#define LIO_PCI_BAR1_ENABLE_CA 1
#define LIO_PCI_BAR1_ENDIAN_MODE LIO_PCI_SWAP_64BIT
#define LIO_PCI_BAR1_ENTRY_VALID 1
#define LIO_PCI_BAR1_MASK ((LIO_PCI_BAR1_ENABLE_CA << 3) | \
(LIO_PCI_BAR1_ENDIAN_MODE << 1) | \
LIO_PCI_BAR1_ENTRY_VALID)
/*
* Octeon Device state.
* Each octeon device goes through each of these states
* as it is initialized.
*/
#define LIO_DEV_BEGIN_STATE 0x0
#define LIO_DEV_PCI_ENABLE_DONE 0x1
#define LIO_DEV_PCI_MAP_DONE 0x2
#define LIO_DEV_DISPATCH_INIT_DONE 0x3
#define LIO_DEV_INSTR_QUEUE_INIT_DONE 0x4
#define LIO_DEV_SC_BUFF_POOL_INIT_DONE 0x5
#define LIO_DEV_MSIX_ALLOC_VECTOR_DONE 0x6
#define LIO_DEV_RESP_LIST_INIT_DONE 0x7
#define LIO_DEV_DROQ_INIT_DONE 0x8
#define LIO_DEV_INTR_SET_DONE 0xa
#define LIO_DEV_IO_QUEUES_DONE 0xb
#define LIO_DEV_CONSOLE_INIT_DONE 0xc
#define LIO_DEV_HOST_OK 0xd
#define LIO_DEV_CORE_OK 0xe
#define LIO_DEV_RUNNING 0xf
#define LIO_DEV_IN_RESET 0x10
#define LIO_DEV_STATE_INVALID 0x11
#define LIO_DEV_STATES LIO_DEV_STATE_INVALID
/*
* Octeon Device interrupts
* These interrupt bits are set in int_status filed of
* octeon_device structure
*/
#define LIO_DEV_INTR_DMA0_FORCE 0x01
#define LIO_DEV_INTR_DMA1_FORCE 0x02
#define LIO_DEV_INTR_PKT_DATA 0x04
#define LIO_RESET_MSECS (3000)
/*---------------------------DISPATCH LIST-------------------------------*/
/*
* The dispatch list entry.
* The driver keeps a record of functions registered for each
* response header opcode in this structure. Since the opcode is
* hashed to index into the driver's list, more than one opcode
* can hash to the same entry, in which case the list field points
* to a linked list with the other entries.
*/
struct lio_dispatch {
/* Singly-linked tail queue node for this entry */
struct lio_stailq_node node;
/* Singly-linked tail queue head for this entry */
struct lio_stailq_head head;
/* The opcode for which the dispatch function & arg should be used */
uint16_t opcode;
/* The function to be called for a packet received by the driver */
lio_dispatch_fn_t dispatch_fn;
/*
* The application specified argument to be passed to the above
* function along with the received packet
*/
void *arg;
};
/* The dispatch list structure. */
struct lio_dispatch_list {
/* access to dispatch list must be atomic */
struct mtx lock;
/* Count of dispatch functions currently registered */
uint32_t count;
/* The list of dispatch functions */
struct lio_dispatch *dlist;
};
/*----------------------- THE OCTEON DEVICE ---------------------------*/
#define LIO_MEM_REGIONS 3
/*
* PCI address space information.
* Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
* Octeon gets mapped to different physical address spaces in
* the kernel.
*/
struct lio_mem_bus_space {
struct resource *pci_mem;
bus_space_tag_t tag;
bus_space_handle_t handle;
};
#define LIO_MAX_MAPS 32
struct lio_io_enable {
uint64_t iq;
uint64_t oq;
uint64_t iq64B;
};
struct lio_reg_list {
uint32_t pci_win_wr_addr;
uint32_t pci_win_rd_addr_hi;
uint32_t pci_win_rd_addr_lo;
uint32_t pci_win_rd_addr;
uint32_t pci_win_wr_data_hi;
uint32_t pci_win_wr_data_lo;
uint32_t pci_win_wr_data;
uint32_t pci_win_rd_data;
};
#define LIO_MAX_CONSOLE_READ_BYTES 512
typedef int (*octeon_console_print_fn)(struct octeon_device *oct,
uint32_t num, char *pre, char *suf);
struct lio_console {
uint32_t active;
uint32_t waiting;
uint64_t addr;
uint32_t buffer_size;
uint64_t input_base_addr;
uint64_t output_base_addr;
octeon_console_print_fn print;
char leftover[LIO_MAX_CONSOLE_READ_BYTES];
};
struct lio_board_info {
char name[LIO_BOARD_NAME];
char serial_number[LIO_SERIAL_NUM_LEN];
uint64_t major;
uint64_t minor;
};
struct lio_fn_list {
void (*setup_iq_regs) (struct octeon_device *, uint32_t);
void (*setup_oq_regs) (struct octeon_device *, uint32_t);
void (*process_interrupt_regs) (void *);
uint64_t (*msix_interrupt_handler) (void *);
int (*soft_reset) (struct octeon_device *);
int (*setup_device_regs) (struct octeon_device *);
void (*bar1_idx_setup) (struct octeon_device *, uint64_t,
uint32_t, int);
void (*bar1_idx_write) (struct octeon_device *, uint32_t,
uint32_t);
uint32_t (*bar1_idx_read) (struct octeon_device *, uint32_t);
uint32_t (*update_iq_read_idx) (struct lio_instr_queue *);
void (*enable_interrupt) (struct octeon_device *, uint8_t);
void (*disable_interrupt) (struct octeon_device *, uint8_t);
int (*enable_io_queues) (struct octeon_device *);
void (*disable_io_queues) (struct octeon_device *);
};
/* Must be multiple of 8, changing breaks ABI */
#define LIO_BOOTMEM_NAME_LEN 128
/*
* Structure for named memory blocks
* Number of descriptors
* available can be changed without affecting compatibility,
* but name length changes require a bump in the bootmem
* descriptor version
* Note: This structure must be naturally 64 bit aligned, as a single
* memory image will be used by both 32 and 64 bit programs.
*/
struct cvmx_bootmem_named_block_desc {
/* Base address of named block */
uint64_t base_addr;
/* Size actually allocated for named block */
uint64_t size;
/* name of named block */
char name[LIO_BOOTMEM_NAME_LEN];
};
struct lio_fw_info {
uint32_t max_nic_ports; /* max nic ports for the device */
uint32_t num_gmx_ports; /* num gmx ports */
uint64_t app_cap_flags; /* firmware cap flags */
/*
* The core application is running in this mode.
* See octeon-drv-opcodes.h for values.
*/
uint32_t app_mode;
char lio_firmware_version[32];
};
struct lio_callout {
struct callout timer;
void *ctxptr;
uint64_t ctxul;
};
#define LIO_NIC_STARTER_TIMEOUT 30000 /* 30000ms (30s) */
struct lio_tq {
struct taskqueue *tq;
struct timeout_task work;
void *ctxptr;
uint64_t ctxul;
};
struct lio_if_props {
/*
* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
int rx_on;
int gmxport;
struct ifnet *ifp;
};
#define LIO_MSIX_PO_INT 0x1
#define LIO_MSIX_PI_INT 0x2
struct lio_pf_vf_hs_word {
#if BYTE_ORDER == LITTLE_ENDIAN
/* PKIND value assigned for the DPI interface */
uint64_t pkind:8;
/* OCTEON core clock multiplier */
uint64_t core_tics_per_us:16;
/* OCTEON coprocessor clock multiplier */
uint64_t coproc_tics_per_us:16;
/* app that currently running on OCTEON */
uint64_t app_mode:8;
/* RESERVED */
uint64_t reserved:16;
#else /* BYTE_ORDER != LITTLE_ENDIAN */
/* RESERVED */
uint64_t reserved:16;
/* app that currently running on OCTEON */
uint64_t app_mode:8;
/* OCTEON coprocessor clock multiplier */
uint64_t coproc_tics_per_us:16;
/* OCTEON core clock multiplier */
uint64_t core_tics_per_us:16;
/* PKIND value assigned for the DPI interface */
uint64_t pkind:8;
#endif /* BYTE_ORDER == LITTLE_ENDIAN */
};
struct lio_sriov_info {
/* Actual rings left for PF device */
uint32_t num_pf_rings;
/* SRN of PF usable IO queues */
uint32_t pf_srn;
/* total pf rings */
uint32_t trs;
};
struct lio_ioq_vector {
struct octeon_device *oct_dev;
struct resource *msix_res;
void *tag;
int droq_index;
int vector;
cpuset_t affinity_mask;
uint32_t ioq_num;
};
/*
* The Octeon device.
* Each Octeon device has this structure to represent all its
* components.
*/
struct octeon_device {
/* Lock for PCI window configuration accesses */
struct mtx pci_win_lock;
/* Lock for memory accesses */
struct mtx mem_access_lock;
/* PCI device pointer */
device_t device;
/* Chip specific information. */
void *chip;
/* Number of interfaces detected in this octeon device. */
uint32_t ifcount;
struct lio_if_props props;
/* Octeon Chip type. */
uint16_t chip_id;
uint16_t rev_id;
uint16_t subdevice_id;
uint16_t pf_num;
/* This device's id - set by the driver. */
uint32_t octeon_id;
/* This device's PCIe port used for traffic. */
uint16_t pcie_port;
uint16_t flags;
#define LIO_FLAG_MSIX_ENABLED (uint32_t)(1 << 2)
/* The state of this device */
volatile int status;
/* memory mapped io range */
struct lio_mem_bus_space mem_bus_space[LIO_MEM_REGIONS];
struct lio_reg_list reg_list;
struct lio_fn_list fn_list;
struct lio_board_info boardinfo;
uint32_t num_iqs;
/* The pool containing pre allocated buffers used for soft commands */
struct lio_sc_buffer_pool sc_buf_pool;
/* The input instruction queues */
struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
/* The doubly-linked list of instruction response */
struct lio_response_list response_list[LIO_MAX_RESPONSE_LISTS];
uint32_t num_oqs;
/* The DROQ output queues */
struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];
struct lio_io_enable io_qmask;
/* List of dispatch functions */
struct lio_dispatch_list dispatch;
uint32_t int_status;
/* Physical location of the cvmx_bootmem_desc_t in octeon memory */
uint64_t bootmem_desc_addr;
/*
* Placeholder memory for named blocks.
* Assumes single-threaded access
*/
struct cvmx_bootmem_named_block_desc bootmem_named_block_desc;
/* Address of consoles descriptor */
uint64_t console_desc_addr;
/* Number of consoles available. 0 means they are inaccessible */
uint32_t num_consoles;
/* Console caches */
struct lio_console console[LIO_MAX_MAPS];
/* Console named block info */
struct {
uint64_t dram_region_base;
int bar1_index;
} console_nb_info;
/* Coprocessor clock rate. */
uint64_t coproc_clock_rate;
/*
* The core application is running in this mode. See lio_common.h
* for values.
*/
uint32_t app_mode;
struct lio_fw_info fw_info;
/* The name given to this device. */
char device_name[32];
struct lio_tq dma_comp_tq;
/* Lock for dma response list */
struct mtx cmd_resp_wqlock;
uint32_t cmd_resp_state;
struct lio_tq check_db_tq[LIO_MAX_POSSIBLE_INSTR_QUEUES];
struct lio_callout console_timer[LIO_MAX_MAPS];
int num_msix_irqs;
/* For PF, there is one non-ioq interrupt handler */
struct resource *msix_res;
int aux_vector;
void *tag;
#define INTRNAMSIZ (32)
#define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ)
struct lio_sriov_info sriov_info;
struct lio_pf_vf_hs_word pfvf_hsword;
int msix_on;
/* IOq information of it's corresponding MSI-X interrupt. */
struct lio_ioq_vector *ioq_vector;
int rx_pause;
int tx_pause;
/* TX/RX process pkt budget */
uint32_t rx_budget;
uint32_t tx_budget;
struct octeon_link_stats link_stats; /* stastics from firmware */
struct proc *watchdog_task;
volatile bool cores_crashed;
uint32_t rx_coalesce_usecs;
uint32_t rx_max_coalesced_frames;
uint32_t tx_max_coalesced_frames;
#define OCTEON_UBOOT_BUFFER_SIZE 512
char uboot_version[OCTEON_UBOOT_BUFFER_SIZE];
int uboot_len;
int uboot_sidx, uboot_eidx;
struct {
int bus;
int dev;
int func;
} loc;
volatile int *adapter_refcount; /* reference count of adapter */
};
#define LIO_DRV_ONLINE 1
#define LIO_DRV_OFFLINE 2
#define LIO_CN23XX_PF(oct) ((oct)->chip_id == LIO_CN23XX_PF_VID)
#define LIO_CHIP_CONF(oct, TYPE) \
(((struct lio_ ## TYPE *)((oct)->chip))->conf)
#define MAX_IO_PENDING_PKT_COUNT 100
/*------------------ Function Prototypes ----------------------*/
/* Initialize device list memory */
void lio_init_device_list(int conf_type);
/* Free memory for Input and Output queue structures for a octeon device */
void lio_free_device_mem(struct octeon_device *oct);
/*
* Look up a free entry in the octeon_device table and allocate resources
* for the octeon_device structure for an octeon device. Called at init
* time.
*/
struct octeon_device *lio_allocate_device(device_t device);
/*
* Register a device's bus location at initialization time.
* @param oct - pointer to the octeon device structure.
* @param bus - PCIe bus #
* @param dev - PCIe device #
* @param func - PCIe function #
* @param is_pf - TRUE for PF, FALSE for VF
* @return reference count of device's adapter
*/
int lio_register_device(struct octeon_device *oct, int bus, int dev,
int func, int is_pf);
/*
* Deregister a device at de-initialization time.
* @param oct - pointer to the octeon device structure.
* @return reference count of device's adapter
*/
int lio_deregister_device(struct octeon_device *oct);
/*
* Initialize the driver's dispatch list which is a mix of a hash table
* and a linked list. This is done at driver load time.
* @param octeon_dev - pointer to the octeon device structure.
* @return 0 on success, else -ve error value
*/
int lio_init_dispatch_list(struct octeon_device *octeon_dev);
/*
* Delete the driver's dispatch list and all registered entries.
* This is done at driver unload time.
* @param octeon_dev - pointer to the octeon device structure.
*/
void lio_delete_dispatch_list(struct octeon_device *octeon_dev);
/*
* Initialize the core device fields with the info returned by the FW.
* @param recv_info - Receive info structure
* @param buf - Receive buffer
*/
int lio_core_drv_init(struct lio_recv_info *recv_info, void *buf);
/*
* Gets the dispatch function registered to receive packets with a
* given opcode/subcode.
* @param octeon_dev - the octeon device pointer.
* @param opcode - the opcode for which the dispatch function
* is to checked.
* @param subcode - the subcode for which the dispatch function
* is to checked.
*
* @return Success: lio_dispatch_fn_t (dispatch function pointer)
* @return Failure: NULL
*
* Looks up the dispatch list to get the dispatch function for a
* given opcode.
*/
lio_dispatch_fn_t lio_get_dispatch(struct octeon_device *octeon_dev,
uint16_t opcode, uint16_t subcode);
/*
* Get the octeon device pointer.
* @param octeon_id - The id for which the octeon device pointer is required.
* @return Success: Octeon device pointer.
* @return Failure: NULL.
*/
struct octeon_device *lio_get_device(uint32_t octeon_id);
/*
* Get the octeon id assigned to the octeon device passed as argument.
* This function is exported to other modules.
* @param dev - octeon device pointer passed as a void *.
* @return octeon device id
*/
int lio_get_device_id(void *dev);
static inline uint16_t
OCTEON_MAJOR_REV(struct octeon_device *oct)
{
uint16_t rev = (oct->rev_id & 0xC) >> 2;
return ((rev == 0) ? 1 : rev);
}
static inline uint16_t
OCTEON_MINOR_REV(struct octeon_device *oct)
{
return (oct->rev_id & 0x3);
}
/*
* Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
*
* This routine is called to read from the indirectly accessed
* Octeon registers that are visible through a PCI BAR0 mapped window
* register.
* @return - 64 bit value read from the register.
*/
uint64_t lio_pci_readq(struct octeon_device *oct, uint64_t addr);
/*
* Write windowed register.
* @param oct - pointer to the Octeon device.
* @param val - Value to write
* @param addr - Address of the register to write
*
* This routine is called to write to the indirectly accessed
* Octeon registers that are visible through a PCI BAR0 mapped window
* register.
* @return Nothing.
*/
void lio_pci_writeq(struct octeon_device *oct, uint64_t val, uint64_t addr);
/*
* Checks if memory access is okay
*
* @param oct which octeon to send to
* @return Zero on success, negative on failure.
*/
int lio_mem_access_ok(struct octeon_device *oct);
/*
* Waits for DDR initialization.
*
* @param oct which octeon to send to
* @param timeout_in_ms pointer to how long to wait until DDR is initialized
* in ms.
* If contents are 0, it waits until contents are non-zero
* before starting to check.
* @return Zero on success, negative on failure.
*/
int lio_wait_for_ddr_init(struct octeon_device *oct,
unsigned long *timeout_in_ms);
/*
* Wait for u-boot to boot and be waiting for a command.
*
* @param wait_time_hundredths
* Maximum time to wait
*
* @return Zero on success, negative on failure.
*/
int lio_wait_for_bootloader(struct octeon_device *oct,
uint32_t wait_time_hundredths);
/*
* Initialize console access
*
* @param oct which octeon initialize
* @return Zero on success, negative on failure.
*/
int lio_init_consoles(struct octeon_device *oct);
/*
* Adds access to a console to the device.
*
* @param oct: which octeon to add to
* @param console_num: which console
* @param dbg_enb: ptr to debug enablement string, one of:
* * NULL for no debug output (i.e. disabled)
* * empty string enables debug output (via default method)
* * specific string to enable debug console output
*
* @return Zero on success, negative on failure.
*/
int lio_add_console(struct octeon_device *oct, uint32_t console_num,
char *dbg_enb);
/* write or read from a console */
int lio_console_write(struct octeon_device *oct, uint32_t console_num,
char *buffer, uint32_t write_request_size,
uint32_t flags);
/* Removes all attached consoles. */
void lio_remove_consoles(struct octeon_device *oct);
/*
* Send a string to u-boot on console 0 as a command.
*
* @param oct which octeon to send to
* @param cmd_str String to send
* @param wait_hundredths Time to wait for u-boot to accept the command.
*
* @return Zero on success, negative on failure.
*/
int lio_console_send_cmd(struct octeon_device *oct, char *cmd_str,
uint32_t wait_hundredths);
/*
* Parses, validates, and downloads firmware, then boots associated cores.
* @param oct which octeon to download firmware to
* @param data - The complete firmware file image
* @param size - The size of the data
*
* @return 0 if success.
* -EINVAL if file is incompatible or badly formatted.
* -ENODEV if no handler was found for the application type or an
* invalid octeon id was passed.
*/
int lio_download_firmware(struct octeon_device *oct, const uint8_t *data,
size_t size);
char *lio_get_state_string(volatile int *state_ptr);
/*
* Sets up instruction queues for the device
* @param oct which octeon to setup
*
* @return 0 if success. 1 if fails
*/
int lio_setup_instr_queue0(struct octeon_device *oct);
/*
* Sets up output queues for the device
* @param oct which octeon to setup
*
* @return 0 if success. 1 if fails
*/
int lio_setup_output_queue0(struct octeon_device *oct);
int lio_get_tx_qsize(struct octeon_device *oct, uint32_t q_no);
int lio_get_rx_qsize(struct octeon_device *oct, uint32_t q_no);
/*
* Retrieve the config for the device
* @param oct which octeon
* @param card_type type of card
*
* @returns pointer to configuration
*/
void *lio_get_config_info(struct octeon_device *oct, uint16_t card_type);
/*
* Gets the octeon device configuration
* @return - pointer to the octeon configuration struture
*/
struct lio_config *lio_get_conf(struct octeon_device *oct);
void lio_free_ioq_vector(struct octeon_device *oct);
int lio_allocate_ioq_vector(struct octeon_device *oct);
void lio_enable_irq(struct lio_droq *droq, struct lio_instr_queue *iq);
static inline uint32_t
lio_read_pci_cfg(struct octeon_device *oct, uint32_t reg)
{
return (pci_read_config(oct->device, reg, 4));
}
static inline void
lio_write_pci_cfg(struct octeon_device *oct, uint32_t reg, uint32_t value)
{
pci_write_config(oct->device, reg, value, 4);
}
static inline uint8_t
lio_read_csr8(struct octeon_device *oct, uint32_t reg)
{
return (bus_space_read_1(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg));
}
static inline void
lio_write_csr8(struct octeon_device *oct, uint32_t reg, uint8_t val)
{
bus_space_write_1(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg, val);
}
static inline uint16_t
lio_read_csr16(struct octeon_device *oct, uint32_t reg)
{
return (bus_space_read_2(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg));
}
static inline void
lio_write_csr16(struct octeon_device *oct, uint32_t reg, uint16_t val)
{
bus_space_write_2(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg, val);
}
static inline uint32_t
lio_read_csr32(struct octeon_device *oct, uint32_t reg)
{
return (bus_space_read_4(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg));
}
static inline void
lio_write_csr32(struct octeon_device *oct, uint32_t reg, uint32_t val)
{
bus_space_write_4(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg, val);
}
static inline uint64_t
lio_read_csr64(struct octeon_device *oct, uint32_t reg)
{
return (bus_space_read_8(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg));
}
static inline void
lio_write_csr64(struct octeon_device *oct, uint32_t reg, uint64_t val)
{
bus_space_write_8(oct->mem_bus_space[0].tag,
oct->mem_bus_space[0].handle, reg, val);
}
#endif /* _LIO_DEVICE_H_ */

View File

@ -0,0 +1,868 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_main.h"
#include "cn23xx_pf_device.h"
#include "lio_network.h"
struct __dispatch {
struct lio_stailq_node node;
struct lio_recv_info *rinfo;
lio_dispatch_fn_t disp_fn;
};
void *lio_get_dispatch_arg(struct octeon_device *oct,
uint16_t opcode, uint16_t subcode);
/*
* Get the argument that the user set when registering dispatch
* function for a given opcode/subcode.
* @param octeon_dev - the octeon device pointer.
* @param opcode - the opcode for which the dispatch argument
* is to be checked.
* @param subcode - the subcode for which the dispatch argument
* is to be checked.
* @return Success: void * (argument to the dispatch function)
* @return Failure: NULL
*
*/
void *
lio_get_dispatch_arg(struct octeon_device *octeon_dev,
uint16_t opcode, uint16_t subcode)
{
struct lio_stailq_node *dispatch;
void *fn_arg = NULL;
int idx;
uint16_t combined_opcode;
combined_opcode = LIO_OPCODE_SUBCODE(opcode, subcode);
idx = combined_opcode & LIO_OPCODE_MASK;
mtx_lock(&octeon_dev->dispatch.lock);
if (octeon_dev->dispatch.count == 0) {
mtx_unlock(&octeon_dev->dispatch.lock);
return (NULL);
}
if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
fn_arg = octeon_dev->dispatch.dlist[idx].arg;
} else {
STAILQ_FOREACH(dispatch,
&octeon_dev->dispatch.dlist[idx].head, entries) {
if (((struct lio_dispatch *)dispatch)->opcode ==
combined_opcode) {
fn_arg = ((struct lio_dispatch *)dispatch)->arg;
break;
}
}
}
mtx_unlock(&octeon_dev->dispatch.lock);
return (fn_arg);
}
/*
* Check for packets on Droq. This function should be called with lock held.
* @param droq - Droq on which count is checked.
* @return Returns packet count.
*/
uint32_t
lio_droq_check_hw_for_pkts(struct lio_droq *droq)
{
struct octeon_device *oct = droq->oct_dev;
uint32_t last_count;
uint32_t pkt_count = 0;
pkt_count = lio_read_csr32(oct, droq->pkts_sent_reg);
last_count = pkt_count - droq->pkt_count;
droq->pkt_count = pkt_count;
/* we shall write to cnts at the end of processing */
if (last_count)
atomic_add_int(&droq->pkts_pending, last_count);
return (last_count);
}
static void
lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
{
uint32_t count = 0;
/*
* max_empty_descs is the max. no. of descs that can have no buffers.
* If the empty desc count goes beyond this value, we cannot safely
* read in a 64K packet sent by Octeon
* (64K is max pkt size from Octeon)
*/
droq->max_empty_descs = 0;
do {
droq->max_empty_descs++;
count += droq->buffer_size;
} while (count < (64 * 1024));
droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
}
static void
lio_droq_reset_indices(struct lio_droq *droq)
{
droq->read_idx = 0;
droq->refill_idx = 0;
droq->refill_count = 0;
atomic_store_rel_int(&droq->pkts_pending, 0);
}
static void
lio_droq_destroy_ring_buffers(struct octeon_device *oct,
struct lio_droq *droq)
{
uint32_t i;
for (i = 0; i < droq->max_count; i++) {
if (droq->recv_buf_list[i].buffer != NULL) {
lio_recv_buffer_free(droq->recv_buf_list[i].buffer);
droq->recv_buf_list[i].buffer = NULL;
}
}
lio_droq_reset_indices(droq);
}
static int
lio_droq_setup_ring_buffers(struct octeon_device *oct,
struct lio_droq *droq)
{
struct lio_droq_desc *desc_ring = droq->desc_ring;
void *buf;
uint32_t i;
for (i = 0; i < droq->max_count; i++) {
buf = lio_recv_buffer_alloc(droq->buffer_size);
if (buf == NULL) {
lio_dev_err(oct, "%s buffer alloc failed\n",
__func__);
droq->stats.rx_alloc_failure++;
return (-ENOMEM);
}
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = ((struct mbuf *)buf)->m_data;
desc_ring[i].info_ptr = 0;
desc_ring[i].buffer_ptr =
lio_map_ring(oct->device, droq->recv_buf_list[i].buffer,
droq->buffer_size);
}
lio_droq_reset_indices(droq);
lio_droq_compute_max_packet_bufs(droq);
return (0);
}
int
lio_delete_droq(struct octeon_device *oct, uint32_t q_no)
{
struct lio_droq *droq = oct->droq[q_no];
lio_dev_dbg(oct, "%s[%d]\n", __func__, q_no);
while (taskqueue_cancel(droq->droq_taskqueue, &droq->droq_task, NULL))
taskqueue_drain(droq->droq_taskqueue, &droq->droq_task);
taskqueue_free(droq->droq_taskqueue);
droq->droq_taskqueue = NULL;
lio_droq_destroy_ring_buffers(oct, droq);
free(droq->recv_buf_list, M_DEVBUF);
if (droq->desc_ring != NULL)
lio_dma_free((droq->max_count * LIO_DROQ_DESC_SIZE),
droq->desc_ring);
oct->io_qmask.oq &= ~(1ULL << q_no);
bzero(oct->droq[q_no], sizeof(struct lio_droq));
oct->num_oqs--;
return (0);
}
void
lio_droq_bh(void *ptr, int pending __unused)
{
struct lio_droq *droq = ptr;
struct octeon_device *oct = droq->oct_dev;
struct lio_instr_queue *iq = oct->instr_queue[droq->q_no];
int reschedule, tx_done = 1;
reschedule = lio_droq_process_packets(oct, droq, oct->rx_budget);
if (atomic_load_acq_int(&iq->instr_pending))
tx_done = lio_flush_iq(oct, iq, oct->tx_budget);
if (reschedule || !tx_done)
taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task);
else
lio_enable_irq(droq, iq);
}
int
lio_init_droq(struct octeon_device *oct, uint32_t q_no,
uint32_t num_descs, uint32_t desc_size, void *app_ctx)
{
struct lio_droq *droq;
unsigned long size;
uint32_t c_buf_size = 0, c_num_descs = 0, c_pkts_per_intr = 0;
uint32_t c_refill_threshold = 0, desc_ring_size = 0;
lio_dev_dbg(oct, "%s[%d]\n", __func__, q_no);
droq = oct->droq[q_no];
bzero(droq, LIO_DROQ_SIZE);
droq->oct_dev = oct;
droq->q_no = q_no;
if (app_ctx != NULL)
droq->app_ctx = app_ctx;
else
droq->app_ctx = (void *)(size_t)q_no;
c_num_descs = num_descs;
c_buf_size = desc_size;
if (LIO_CN23XX_PF(oct)) {
struct lio_config *conf23 = LIO_CHIP_CONF(oct, cn23xx_pf);
c_pkts_per_intr =
(uint32_t)LIO_GET_OQ_PKTS_PER_INTR_CFG(conf23);
c_refill_threshold =
(uint32_t)LIO_GET_OQ_REFILL_THRESHOLD_CFG(conf23);
} else {
return (1);
}
droq->max_count = c_num_descs;
droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
droq->desc_ring = lio_dma_alloc(desc_ring_size, &droq->desc_ring_dma);
if (droq->desc_ring == NULL) {
lio_dev_err(oct, "Output queue %d ring alloc failed\n", q_no);
return (1);
}
lio_dev_dbg(oct, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", q_no,
droq->desc_ring, droq->desc_ring_dma);
lio_dev_dbg(oct, "droq[%d]: num_desc: %d\n", q_no, droq->max_count);
size = droq->max_count * LIO_DROQ_RECVBUF_SIZE;
droq->recv_buf_list =
(struct lio_recv_buffer *)malloc(size, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (droq->recv_buf_list == NULL) {
lio_dev_err(oct, "Output queue recv buf list alloc failed\n");
goto init_droq_fail;
}
if (lio_droq_setup_ring_buffers(oct, droq))
goto init_droq_fail;
droq->pkts_per_intr = c_pkts_per_intr;
droq->refill_threshold = c_refill_threshold;
lio_dev_dbg(oct, "DROQ INIT: max_empty_descs: %d\n",
droq->max_empty_descs);
mtx_init(&droq->lock, "droq_lock", NULL, MTX_DEF);
STAILQ_INIT(&droq->dispatch_stq_head);
oct->fn_list.setup_oq_regs(oct, q_no);
oct->io_qmask.oq |= BIT_ULL(q_no);
/*
* Initialize the taskqueue that handles
* output queue packet processing.
*/
lio_dev_dbg(oct, "Initializing droq%d taskqueue\n", q_no);
TASK_INIT(&droq->droq_task, 0, lio_droq_bh, (void *)droq);
droq->droq_taskqueue = taskqueue_create_fast("lio_droq_task", M_NOWAIT,
taskqueue_thread_enqueue,
&droq->droq_taskqueue);
taskqueue_start_threads_cpuset(&droq->droq_taskqueue, 1, PI_NET,
&oct->ioq_vector[q_no].affinity_mask,
"lio%d_droq%d_task", oct->octeon_id,
q_no);
return (0);
init_droq_fail:
lio_delete_droq(oct, q_no);
return (1);
}
/*
* lio_create_recv_info
* Parameters:
* octeon_dev - pointer to the octeon device structure
* droq - droq in which the packet arrived.
* buf_cnt - no. of buffers used by the packet.
* idx - index in the descriptor for the first buffer in the packet.
* Description:
* Allocates a recv_info_t and copies the buffer addresses for packet data
* into the recv_pkt space which starts at an 8B offset from recv_info_t.
* Flags the descriptors for refill later. If available descriptors go
* below the threshold to receive a 64K pkt, new buffers are first allocated
* before the recv_pkt_t is created.
* This routine will be called in interrupt context.
* Returns:
* Success: Pointer to recv_info_t
* Failure: NULL.
* Locks:
* The droq->lock is held when this routine is called.
*/
static inline struct lio_recv_info *
lio_create_recv_info(struct octeon_device *octeon_dev, struct lio_droq *droq,
uint32_t buf_cnt, uint32_t idx)
{
struct lio_droq_info *info;
struct lio_recv_pkt *recv_pkt;
struct lio_recv_info *recv_info;
uint32_t bytes_left, i;
info = (struct lio_droq_info *)droq->recv_buf_list[idx].data;
recv_info = lio_alloc_recv_info(sizeof(struct __dispatch));
if (recv_info == NULL)
return (NULL);
recv_pkt = recv_info->recv_pkt;
recv_pkt->rh = info->rh;
recv_pkt->length = (uint32_t)info->length;
recv_pkt->buffer_count = (uint16_t)buf_cnt;
recv_pkt->octeon_id = (uint16_t)octeon_dev->octeon_id;
i = 0;
bytes_left = (uint32_t)info->length;
while (buf_cnt) {
recv_pkt->buffer_size[i] = (bytes_left >= droq->buffer_size) ?
droq->buffer_size : bytes_left;
recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
droq->recv_buf_list[idx].buffer = NULL;
idx = lio_incr_index(idx, 1, droq->max_count);
bytes_left -= droq->buffer_size;
i++;
buf_cnt--;
}
return (recv_info);
}
/*
* If we were not able to refill all buffers, try to move around
* the buffers that were not dispatched.
*/
static inline uint32_t
lio_droq_refill_pullup_descs(struct lio_droq *droq,
struct lio_droq_desc *desc_ring)
{
uint32_t desc_refilled = 0;
uint32_t refill_index = droq->refill_idx;
while (refill_index != droq->read_idx) {
if (droq->recv_buf_list[refill_index].buffer != NULL) {
droq->recv_buf_list[droq->refill_idx].buffer =
droq->recv_buf_list[refill_index].buffer;
droq->recv_buf_list[droq->refill_idx].data =
droq->recv_buf_list[refill_index].data;
desc_ring[droq->refill_idx].buffer_ptr =
desc_ring[refill_index].buffer_ptr;
droq->recv_buf_list[refill_index].buffer = NULL;
desc_ring[refill_index].buffer_ptr = 0;
do {
droq->refill_idx =
lio_incr_index(droq->refill_idx, 1,
droq->max_count);
desc_refilled++;
droq->refill_count--;
} while (droq->recv_buf_list[droq->refill_idx].buffer !=
NULL);
}
refill_index = lio_incr_index(refill_index, 1, droq->max_count);
} /* while */
return (desc_refilled);
}
/*
* lio_droq_refill
* Parameters:
* droq - droq in which descriptors require new buffers.
* Description:
* Called during normal DROQ processing in interrupt mode or by the poll
* thread to refill the descriptors from which buffers were dispatched
* to upper layers. Attempts to allocate new buffers. If that fails, moves
* up buffers (that were not dispatched) to form a contiguous ring.
* Returns:
* No of descriptors refilled.
* Locks:
* This routine is called with droq->lock held.
*/
uint32_t
lio_droq_refill(struct octeon_device *octeon_dev, struct lio_droq *droq)
{
struct lio_droq_desc *desc_ring;
void *buf = NULL;
uint32_t desc_refilled = 0;
uint8_t *data;
desc_ring = droq->desc_ring;
while (droq->refill_count && (desc_refilled < droq->max_count)) {
/*
* If a valid buffer exists (happens if there is no dispatch),
* reuse
* the buffer, else allocate.
*/
if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
buf = lio_recv_buffer_alloc(droq->buffer_size);
/*
* If a buffer could not be allocated, no point in
* continuing
*/
if (buf == NULL) {
droq->stats.rx_alloc_failure++;
break;
}
droq->recv_buf_list[droq->refill_idx].buffer = buf;
data = ((struct mbuf *)buf)->m_data;
} else {
data = ((struct mbuf *)droq->recv_buf_list
[droq->refill_idx].buffer)->m_data;
}
droq->recv_buf_list[droq->refill_idx].data = data;
desc_ring[droq->refill_idx].buffer_ptr =
lio_map_ring(octeon_dev->device,
droq->recv_buf_list[droq->refill_idx].buffer,
droq->buffer_size);
droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
droq->max_count);
desc_refilled++;
droq->refill_count--;
}
if (droq->refill_count)
desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
/*
* if droq->refill_count
* The refill count would not change in pass two. We only moved buffers
* to close the gap in the ring, but we would still have the same no. of
* buffers to refill.
*/
return (desc_refilled);
}
static inline uint32_t
lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
{
return ((total_len + buf_size - 1) / buf_size);
}
static int
lio_droq_dispatch_pkt(struct octeon_device *oct, struct lio_droq *droq,
union octeon_rh *rh, struct lio_droq_info *info)
{
struct lio_recv_info *rinfo;
lio_dispatch_fn_t disp_fn;
uint32_t cnt;
cnt = lio_droq_get_bufcount(droq->buffer_size, (uint32_t)info->length);
disp_fn = lio_get_dispatch(oct, (uint16_t)rh->r.opcode,
(uint16_t)rh->r.subcode);
if (disp_fn) {
rinfo = lio_create_recv_info(oct, droq, cnt, droq->read_idx);
if (rinfo != NULL) {
struct __dispatch *rdisp = rinfo->rsvd;
rdisp->rinfo = rinfo;
rdisp->disp_fn = disp_fn;
rinfo->recv_pkt->rh = *rh;
STAILQ_INSERT_TAIL(&droq->dispatch_stq_head,
&rdisp->node, entries);
} else {
droq->stats.dropped_nomem++;
}
} else {
lio_dev_err(oct, "DROQ: No dispatch function (opcode %u/%u)\n",
(unsigned int)rh->r.opcode,
(unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
}
return (cnt);
}
static inline void
lio_droq_drop_packets(struct octeon_device *oct, struct lio_droq *droq,
uint32_t cnt)
{
struct lio_droq_info *info;
uint32_t i = 0, buf_cnt;
for (i = 0; i < cnt; i++) {
info = (struct lio_droq_info *)
droq->recv_buf_list[droq->read_idx].data;
lio_swap_8B_data((uint64_t *)info, 2);
if (info->length) {
info->length += 8;
droq->stats.bytes_received += info->length;
buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
(uint32_t)info->length);
} else {
lio_dev_err(oct, "DROQ: In drop: pkt with len 0\n");
buf_cnt = 1;
}
droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
droq->max_count);
droq->refill_count += buf_cnt;
}
}
static uint32_t
lio_droq_fast_process_packets(struct octeon_device *oct, struct lio_droq *droq,
uint32_t pkts_to_process)
{
struct lio_droq_info *info;
union octeon_rh *rh;
uint32_t pkt, pkt_count, total_len = 0;
pkt_count = pkts_to_process;
for (pkt = 0; pkt < pkt_count; pkt++) {
struct mbuf *nicbuf = NULL;
uint32_t pkt_len = 0;
info = (struct lio_droq_info *)
droq->recv_buf_list[droq->read_idx].data;
lio_swap_8B_data((uint64_t *)info, 2);
if (!info->length) {
lio_dev_err(oct,
"DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
droq->q_no, droq->read_idx, pkt_count);
hexdump((uint8_t *)info, LIO_DROQ_INFO_SIZE, NULL,
HD_OMIT_CHARS);
pkt++;
lio_incr_index(droq->read_idx, 1, droq->max_count);
droq->refill_count++;
break;
}
rh = &info->rh;
info->length += 8;
rh->r_dh.len += (LIO_DROQ_INFO_SIZE + 7) / 8;
total_len += (uint32_t)info->length;
if (lio_opcode_slow_path(rh)) {
uint32_t buf_cnt;
buf_cnt = lio_droq_dispatch_pkt(oct, droq, rh, info);
droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
droq->max_count);
droq->refill_count += buf_cnt;
} else {
if (info->length <= droq->buffer_size) {
pkt_len = (uint32_t)info->length;
nicbuf = droq->recv_buf_list[
droq->read_idx].buffer;
nicbuf->m_len = pkt_len;
droq->recv_buf_list[droq->read_idx].buffer =
NULL;
droq->read_idx =
lio_incr_index(droq->read_idx,
1, droq->max_count);
droq->refill_count++;
} else {
bool secondary_frag = false;
pkt_len = 0;
while (pkt_len < info->length) {
int frag_len, idx = droq->read_idx;
struct mbuf *buffer;
frag_len =
((pkt_len + droq->buffer_size) >
info->length) ?
((uint32_t)info->length -
pkt_len) : droq->buffer_size;
buffer = ((struct mbuf *)
droq->recv_buf_list[idx].
buffer);
buffer->m_len = frag_len;
if (__predict_true(secondary_frag)) {
m_cat(nicbuf, buffer);
} else {
nicbuf = buffer;
secondary_frag = true;
}
droq->recv_buf_list[droq->read_idx].
buffer = NULL;
pkt_len += frag_len;
droq->read_idx =
lio_incr_index(droq->read_idx,
1,
droq->max_count);
droq->refill_count++;
}
}
if (nicbuf != NULL) {
if (droq->ops.fptr != NULL) {
droq->ops.fptr(nicbuf, pkt_len, rh,
droq, droq->ops.farg);
} else {
lio_recv_buffer_free(nicbuf);
}
}
}
if (droq->refill_count >= droq->refill_threshold) {
int desc_refilled = lio_droq_refill(oct, droq);
/*
* Flush the droq descriptor data to memory to be sure
* that when we update the credits the data in memory
* is accurate.
*/
wmb();
lio_write_csr32(oct, droq->pkts_credit_reg,
desc_refilled);
/* make sure mmio write completes */
__compiler_membar();
}
} /* for (each packet)... */
/* Increment refill_count by the number of buffers processed. */
droq->stats.pkts_received += pkt;
droq->stats.bytes_received += total_len;
tcp_lro_flush_all(&droq->lro);
if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
lio_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
droq->stats.dropped_toomany += (pkts_to_process - pkt);
return (pkts_to_process);
}
return (pkt);
}
int
lio_droq_process_packets(struct octeon_device *oct, struct lio_droq *droq,
uint32_t budget)
{
struct lio_stailq_node *tmp, *tmp2;
uint32_t pkt_count = 0, pkts_processed = 0;
/* Grab the droq lock */
mtx_lock(&droq->lock);
lio_droq_check_hw_for_pkts(droq);
pkt_count = atomic_load_acq_int(&droq->pkts_pending);
if (!pkt_count) {
mtx_unlock(&droq->lock);
return (0);
}
if (pkt_count > budget)
pkt_count = budget;
pkts_processed = lio_droq_fast_process_packets(oct, droq, pkt_count);
atomic_subtract_int(&droq->pkts_pending, pkts_processed);
/* Release the lock */
mtx_unlock(&droq->lock);
STAILQ_FOREACH_SAFE(tmp, &droq->dispatch_stq_head, entries, tmp2) {
struct __dispatch *rdisp = (struct __dispatch *)tmp;
STAILQ_REMOVE_HEAD(&droq->dispatch_stq_head, entries);
rdisp->disp_fn(rdisp->rinfo, lio_get_dispatch_arg(oct,
(uint16_t)rdisp->rinfo->recv_pkt->rh.r.opcode,
(uint16_t)rdisp->rinfo->recv_pkt->rh.r.subcode));
}
/* If there are packets pending. schedule tasklet again */
if (atomic_load_acq_int(&droq->pkts_pending))
return (1);
return (0);
}
int
lio_register_droq_ops(struct octeon_device *oct, uint32_t q_no,
struct lio_droq_ops *ops)
{
struct lio_droq *droq;
struct lio_config *lio_cfg = NULL;
lio_cfg = lio_get_conf(oct);
if (lio_cfg == NULL)
return (-EINVAL);
if (ops == NULL) {
lio_dev_err(oct, "%s: droq_ops pointer is NULL\n", __func__);
return (-EINVAL);
}
if (q_no >= LIO_GET_OQ_MAX_Q_CFG(lio_cfg)) {
lio_dev_err(oct, "%s: droq id (%d) exceeds MAX (%d)\n",
__func__, q_no, (oct->num_oqs - 1));
return (-EINVAL);
}
droq = oct->droq[q_no];
mtx_lock(&droq->lock);
memcpy(&droq->ops, ops, sizeof(struct lio_droq_ops));
mtx_unlock(&droq->lock);
return (0);
}
int
lio_unregister_droq_ops(struct octeon_device *oct, uint32_t q_no)
{
struct lio_droq *droq;
struct lio_config *lio_cfg = NULL;
lio_cfg = lio_get_conf(oct);
if (lio_cfg == NULL)
return (-EINVAL);
if (q_no >= LIO_GET_OQ_MAX_Q_CFG(lio_cfg)) {
lio_dev_err(oct, "%s: droq id (%d) exceeds MAX (%d)\n",
__func__, q_no, oct->num_oqs - 1);
return (-EINVAL);
}
droq = oct->droq[q_no];
if (droq == NULL) {
lio_dev_info(oct, "Droq id (%d) not available.\n", q_no);
return (0);
}
mtx_lock(&droq->lock);
droq->ops.fptr = NULL;
droq->ops.farg = NULL;
droq->ops.drop_on_max = 0;
mtx_unlock(&droq->lock);
return (0);
}
int
lio_create_droq(struct octeon_device *oct, uint32_t q_no, uint32_t num_descs,
uint32_t desc_size, void *app_ctx)
{
if (oct->droq[q_no]->oct_dev != NULL) {
lio_dev_dbg(oct, "Droq already in use. Cannot create droq %d again\n",
q_no);
return (1);
}
/* Initialize the Droq */
if (lio_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
bzero(oct->droq[q_no], sizeof(struct lio_droq));
goto create_droq_fail;
}
oct->num_oqs++;
lio_dev_dbg(oct, "%s: Total number of OQ: %d\n", __func__,
oct->num_oqs);
/* Global Droq register settings */
/*
* As of now not required, as setting are done for all 32 Droqs at
* the same time.
*/
return (0);
create_droq_fail:
return (-ENOMEM);
}

View File

@ -0,0 +1,432 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file lio_droq.h
* \brief Implementation of Octeon Output queues. "Output" is with
* respect to the Octeon device on the NIC. From this driver's point of
* view they are ingress queues.
*/
#ifndef __LIO_DROQ_H__
#define __LIO_DROQ_H__
/*
* Octeon descriptor format.
* The descriptor ring is made of descriptors which have 2 64-bit values:
* -# Physical (bus) address of the data buffer.
* -# Physical (bus) address of a lio_droq_info structure.
* The Octeon device DMA's incoming packets and its information at the address
* given by these descriptor fields.
*/
struct lio_droq_desc {
/* The buffer pointer */
uint64_t buffer_ptr;
/* The Info pointer */
uint64_t info_ptr;
};
#define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc))
/*
* Information about packet DMA'ed by Octeon.
* The format of the information available at Info Pointer after Octeon
* has posted a packet. Not all descriptors have valid information. Only
* the Info field of the first descriptor for a packet has information
* about the packet.
*/
struct lio_droq_info {
/* The Length of the packet. */
uint64_t length;
/* The Output Receive Header. */
union octeon_rh rh;
};
#define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info))
/*
* Pointer to data buffer.
* Driver keeps a pointer to the data buffer that it made available to
* the Octeon device. Since the descriptor ring keeps physical (bus)
* addresses, this field is required for the driver to keep track of
* the virtual address pointers.
*/
struct lio_recv_buffer {
/* Packet buffer, including metadata. */
void *buffer;
/* Data in the packet buffer. */
uint8_t *data;
};
#define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer))
/* Output Queue statistics. Each output queue has four stats fields. */
struct lio_droq_stats {
/* Number of packets received in this queue. */
uint64_t pkts_received;
/* Bytes received by this queue. */
uint64_t bytes_received;
/* Packets dropped due to no dispatch function. */
uint64_t dropped_nodispatch;
/* Packets dropped due to no memory available. */
uint64_t dropped_nomem;
/* Packets dropped due to large number of pkts to process. */
uint64_t dropped_toomany;
/* Number of packets sent to stack from this queue. */
uint64_t rx_pkts_received;
/* Number of Bytes sent to stack from this queue. */
uint64_t rx_bytes_received;
/* Num of Packets dropped due to receive path failures. */
uint64_t rx_dropped;
uint64_t rx_vxlan;
/* Num of failures of lio_recv_buffer_alloc() */
uint64_t rx_alloc_failure;
};
/*
* The maximum number of buffers that can be dispatched from the
* output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that
* max packet size from DROQ is 64K.
*/
#define LIO_MAX_RECV_BUFS 64
/*
* Receive Packet format used when dispatching output queue packets
* with non-raw opcodes.
* The received packet will be sent to the upper layers using this
* structure which is passed as a parameter to the dispatch function
*/
struct lio_recv_pkt {
/* Number of buffers in this received packet */
uint16_t buffer_count;
/* Id of the device that is sending the packet up */
uint16_t octeon_id;
/* Length of data in the packet buffer */
uint32_t length;
/* The receive header */
union octeon_rh rh;
/* Pointer to the OS-specific packet buffer */
struct mbuf *buffer_ptr[LIO_MAX_RECV_BUFS];
/* Size of the buffers pointed to by ptr's in buffer_ptr */
uint32_t buffer_size[LIO_MAX_RECV_BUFS];
};
#define LIO_RECV_PKT_SIZE (sizeof(struct lio_recv_pkt))
/*
* The first parameter of a dispatch function.
* For a raw mode opcode, the driver dispatches with the device
* pointer in this structure.
* For non-raw mode opcode, the driver dispatches the recv_pkt
* created to contain the buffers with data received from Octeon.
* ---------------------
* | *recv_pkt ----|---
* |-------------------| |
* | 0 or more bytes | |
* | reserved by driver| |
* |-------------------|<-/
* | lio_recv_pkt |
* | |
* |___________________|
*/
struct lio_recv_info {
void *rsvd;
struct lio_recv_pkt *recv_pkt;
};
#define LIO_RECV_INFO_SIZE (sizeof(struct lio_recv_info))
/*
* Allocate a recv_info structure. The recv_pkt pointer in the recv_info
* structure is filled in before this call returns.
* @param extra_bytes - extra bytes to be allocated at the end of the recv info
* structure.
* @return - pointer to a newly allocated recv_info structure.
*/
static inline struct lio_recv_info *
lio_alloc_recv_info(int extra_bytes)
{
struct lio_recv_info *recv_info;
uint8_t *buf;
buf = malloc(LIO_RECV_PKT_SIZE + LIO_RECV_INFO_SIZE +
extra_bytes, M_DEVBUF, M_NOWAIT | M_ZERO);
if (buf == NULL)
return (NULL);
recv_info = (struct lio_recv_info *)buf;
recv_info->recv_pkt = (struct lio_recv_pkt *)(buf + LIO_RECV_INFO_SIZE);
recv_info->rsvd = NULL;
if (extra_bytes)
recv_info->rsvd = buf + LIO_RECV_INFO_SIZE + LIO_RECV_PKT_SIZE;
return (recv_info);
}
/*
* Free a recv_info structure.
* @param recv_info - Pointer to receive_info to be freed
*/
static inline void
lio_free_recv_info(struct lio_recv_info *recv_info)
{
free(recv_info, M_DEVBUF);
}
typedef int (*lio_dispatch_fn_t)(struct lio_recv_info *, void *);
/*
* Used by NIC module to register packet handler and to get device
* information for each octeon device.
*/
struct lio_droq_ops {
/*
* This registered function will be called by the driver with
* the pointer to buffer from droq and length of
* data in the buffer. The receive header gives the port
* number to the caller. Function pointer is set by caller.
*/
void (*fptr) (void *, uint32_t, union octeon_rh *, void *,
void *);
void *farg;
/*
* Flag indicating if the DROQ handler should drop packets that
* it cannot handle in one iteration. Set by caller.
*/
uint32_t drop_on_max;
};
/*
* The Descriptor Ring Output Queue structure.
* This structure has all the information required to implement a
* Octeon DROQ.
*/
struct lio_droq {
/* A lock to protect access to this ring. */
struct mtx lock;
uint32_t q_no;
uint32_t pkt_count;
struct lio_droq_ops ops;
struct octeon_device *oct_dev;
/* The 8B aligned descriptor ring starts at this address. */
struct lio_droq_desc *desc_ring;
/* Index in the ring where the driver should read the next packet */
uint32_t read_idx;
/*
* Index in the ring where the driver will refill the descriptor's
* buffer
*/
uint32_t refill_idx;
/* Packets pending to be processed */
volatile int pkts_pending;
/* Number of descriptors in this ring. */
uint32_t max_count;
/* The number of descriptors pending refill. */
uint32_t refill_count;
uint32_t pkts_per_intr;
uint32_t refill_threshold;
/*
* The max number of descriptors in DROQ without a buffer.
* This field is used to keep track of empty space threshold. If the
* refill_count reaches this value, the DROQ cannot accept a max-sized
* (64K) packet.
*/
uint32_t max_empty_descs;
/*
* The receive buffer list. This list has the virtual addresses of
* the buffers.
*/
struct lio_recv_buffer *recv_buf_list;
/* The size of each buffer pointed by the buffer pointer. */
uint32_t buffer_size;
/*
* Offset to packet credit register.
* Host writes number of info/buffer ptrs available to this register
*/
uint32_t pkts_credit_reg;
/*
* Offset packet sent register.
* Octeon writes the number of packets DMA'ed to host memory
* in this register.
*/
uint32_t pkts_sent_reg;
struct lio_stailq_head dispatch_stq_head;
/* Statistics for this DROQ. */
struct lio_droq_stats stats;
/* DMA mapped address of the DROQ descriptor ring. */
vm_paddr_t desc_ring_dma;
/* application context */
void *app_ctx;
uint32_t cpu_id;
struct task droq_task;
struct taskqueue *droq_taskqueue;
struct lro_ctrl lro;
};
#define LIO_DROQ_SIZE (sizeof(struct lio_droq))
/*
* Allocates space for the descriptor ring for the droq and sets the
* base addr, num desc etc in Octeon registers.
*
* @param oct_dev - pointer to the octeon device structure
* @param q_no - droq no.
* @param app_ctx - pointer to application context
* @return Success: 0 Failure: 1
*/
int lio_init_droq(struct octeon_device *oct_dev,
uint32_t q_no, uint32_t num_descs, uint32_t desc_size,
void *app_ctx);
/*
* Frees the space for descriptor ring for the droq.
*
* @param oct_dev - pointer to the octeon device structure
* @param q_no - droq no.
* @return: Success: 0 Failure: 1
*/
int lio_delete_droq(struct octeon_device *oct_dev, uint32_t q_no);
/*
* Register a change in droq operations. The ops field has a pointer to a
* function which will called by the DROQ handler for all packets arriving
* on output queues given by q_no irrespective of the type of packet.
* The ops field also has a flag which if set tells the DROQ handler to
* drop packets if it receives more than what it can process in one
* invocation of the handler.
* @param oct - octeon device
* @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
* @param ops - the droq_ops settings for this queue
* @return - 0 on success, -ENODEV or -EINVAL on error.
*/
int lio_register_droq_ops(struct octeon_device *oct, uint32_t q_no,
struct lio_droq_ops *ops);
/*
* Resets the function pointer and flag settings made by
* lio_register_droq_ops(). After this routine is called, the DROQ handler
* will lookup dispatch function for each arriving packet on the output queue
* given by q_no.
* @param oct - octeon device
* @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
* @return - 0 on success, -ENODEV or -EINVAL on error.
*/
int lio_unregister_droq_ops(struct octeon_device *oct, uint32_t q_no);
/*
* Register a dispatch function for a opcode/subcode. The driver will call
* this dispatch function when it receives a packet with the given
* opcode/subcode in its output queues along with the user specified
* argument.
* @param oct - the octeon device to register with.
* @param opcode - the opcode for which the dispatch will be registered.
* @param subcode - the subcode for which the dispatch will be registered
* @param fn - the dispatch function.
* @param fn_arg - user specified that will be passed along with the
* dispatch function by the driver.
* @return Success: 0; Failure: 1
*/
int lio_register_dispatch_fn(struct octeon_device *oct, uint16_t opcode,
uint16_t subcode, lio_dispatch_fn_t fn,
void *fn_arg);
/*
* Remove registration for an opcode/subcode. This will delete the mapping for
* an opcode/subcode. The dispatch function will be unregistered and will no
* longer be called if a packet with the opcode/subcode arrives in the driver
* output queues.
* @param oct - the octeon device to unregister from.
* @param opcode - the opcode to be unregistered.
* @param subcode - the subcode to be unregistered.
*
* @return Success: 0; Failure: 1
*/
int lio_unregister_dispatch_fn(struct octeon_device *oct, uint16_t opcode,
uint16_t subcode);
uint32_t lio_droq_check_hw_for_pkts(struct lio_droq *droq);
int lio_create_droq(struct octeon_device *oct, uint32_t q_no,
uint32_t num_descs, uint32_t desc_size, void *app_ctx);
int lio_droq_process_packets(struct octeon_device *oct,
struct lio_droq *droq, uint32_t budget);
uint32_t lio_droq_refill(struct octeon_device *octeon_dev,
struct lio_droq *droq);
void lio_droq_bh(void *ptr, int pending __unused);
#endif /* __LIO_DROQ_H__ */

View File

@ -0,0 +1,69 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _LIO_IMAGE_H_
#define _LIO_IMAGE_H_
#define LIO_MAX_FW_FILENAME_LEN 256
#define LIO_FW_BASE_NAME "lio_"
#define LIO_FW_NAME_SUFFIX ".bin"
#define LIO_FW_NAME_TYPE_NIC "nic"
#define LIO_FW_NAME_TYPE_NONE "none"
#define LIO_MAX_FIRMWARE_VERSION_LEN 16
#define LIO_MAX_BOOTCMD_LEN 1024
#define LIO_MAX_IMAGES 16
#define LIO_NIC_MAGIC 0x434E4943 /* "CNIC" */
struct lio_firmware_desc {
__be64 addr;
__be32 len;
__be32 crc32; /* crc32 of image */
};
/*
* Following the header is a list of 64-bit aligned binary images,
* as described by the desc field.
* Numeric fields are in network byte order.
*/
struct lio_firmware_file_header {
__be32 magic;
char version[LIO_MAX_FIRMWARE_VERSION_LEN];
char bootcmd[LIO_MAX_BOOTCMD_LEN];
__be32 num_images;
struct lio_firmware_desc desc[LIO_MAX_IMAGES];
__be32 pad;
__be32 crc32; /* header checksum */
};
#endif /* _LIO_IMAGE_H_ */

View File

@ -0,0 +1,342 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file lio_iq.h
* \brief Host Driver: Implementation of Octeon input queues. "Input" is
* with respect to the Octeon device on the NIC. From this driver's
* point of view they are egress queues.
*/
#ifndef __LIO_IQ_H__
#define __LIO_IQ_H__
#define LIO_IQ_SEND_OK 0
#define LIO_IQ_SEND_STOP 1
#define LIO_IQ_SEND_FAILED -1
/*------------------------- INSTRUCTION QUEUE --------------------------*/
#define LIO_REQTYPE_NONE 0
#define LIO_REQTYPE_NORESP_NET 1
#define LIO_REQTYPE_NORESP_NET_SG 2
#define LIO_REQTYPE_RESP_NET 3
#define LIO_REQTYPE_SOFT_COMMAND 4
/*
* This structure is used by NIC driver to store information required
* to free the mbuf when the packet has been fetched by Octeon.
* Bytes offset below assume worst-case of a 64-bit system.
*/
struct lio_mbuf_free_info {
/* Pointer to mbuf. */
struct mbuf *mb;
/* Pointer to gather list. */
struct lio_gather *g;
bus_dmamap_t map;
};
struct lio_request_list {
uint32_t reqtype;
void *buf;
bus_dmamap_t map;
struct lio_mbuf_free_info finfo;
};
/* Input Queue statistics. Each input queue has four stats fields. */
struct lio_iq_stats {
uint64_t instr_posted; /**< Instructions posted to this queue. */
uint64_t instr_processed; /**< Instructions processed in this queue. */
uint64_t instr_dropped; /**< Instructions that could not be processed */
uint64_t bytes_sent; /**< Bytes sent through this queue. */
uint64_t sgentry_sent; /**< Gather entries sent through this queue. */
uint64_t tx_done; /**< Num of packets sent to network. */
uint64_t tx_iq_busy; /**< Numof times this iq was found to be full. */
uint64_t tx_dropped; /**< Numof pkts dropped dueto xmitpath errors. */
uint64_t tx_tot_bytes; /**< Total count of bytes sento to network. */
uint64_t tx_gso; /* count of tso */
uint64_t tx_vxlan; /* tunnel */
uint64_t tx_dmamap_fail;
uint64_t tx_restart;
uint64_t mbuf_defrag_failed;
};
/*
* The instruction (input) queue.
* The input queue is used to post raw (instruction) mode data or packet
* data to Octeon device from the host. Each input queue for
* a Octeon device has one such structure to represent it.
*/
struct lio_instr_queue {
struct octeon_device *oct_dev;
/* A lock to protect access to the input ring. */
struct mtx lock;
/* A lock to protect while enqueue to the input ring. */
struct mtx enq_lock;
/* A lock to protect while posting on the ring. */
struct mtx post_lock;
uint32_t pkt_in_done;
/* A lock to protect access to the input ring. */
struct mtx iq_flush_running_lock;
/* Flag that indicates if the queue uses 64 byte commands. */
uint32_t iqcmd_64B:1;
/* Queue info. */
union octeon_txpciq txpciq;
uint32_t rsvd:17;
uint32_t status:8;
/* Maximum no. of instructions in this queue. */
uint32_t max_count;
/* Index in input ring where the driver should write the next packet */
uint32_t host_write_index;
/*
* Index in input ring where Octeon is expected to read the next
* packet.
*/
uint32_t octeon_read_index;
/*
* This index aids in finding the window in the queue where Octeon
* has read the commands.
*/
uint32_t flush_index;
/* This field keeps track of the instructions pending in this queue. */
volatile int instr_pending;
uint32_t reset_instr_cnt;
/* Pointer to the Virtual Base addr of the input ring. */
uint8_t *base_addr;
bus_dma_tag_t txtag;
struct lio_request_list *request_list;
struct buf_ring *br;
/* Octeon doorbell register for the ring. */
uint32_t doorbell_reg;
/* Octeon instruction count register for this ring. */
uint32_t inst_cnt_reg;
/* Number of instructions pending to be posted to Octeon. */
uint32_t fill_cnt;
/* The last time that the doorbell was rung. */
uint64_t last_db_time;
/*
* The doorbell timeout. If the doorbell was not rung for this time
* and fill_cnt is non-zero, ring the doorbell again.
*/
uint32_t db_timeout;
/* Statistics for this input queue. */
struct lio_iq_stats stats;
/* DMA mapped base address of the input descriptor ring. */
uint64_t base_addr_dma;
/* Application context */
void *app_ctx;
/* network stack queue index */
int q_index;
/* os ifidx associated with this queue */
int ifidx;
};
/*---------------------- INSTRUCTION FORMAT ----------------------------*/
struct lio_instr3_64B {
/* Pointer where the input data is available. */
uint64_t dptr;
/* Instruction Header. */
uint64_t ih3;
/* Instruction Header. */
uint64_t pki_ih3;
/* Input Request Header. */
uint64_t irh;
/* opcode/subcode specific parameters */
uint64_t ossp[2];
/* Return Data Parameters */
uint64_t rdp;
/*
* Pointer where the response for a RAW mode packet will be written
* by Octeon.
*/
uint64_t rptr;
};
union lio_instr_64B {
struct lio_instr3_64B cmd3;
};
/* The size of each buffer in soft command buffer pool */
#define LIO_SOFT_COMMAND_BUFFER_SIZE 2048
struct lio_soft_command {
/* Soft command buffer info. */
struct lio_stailq_node node;
uint64_t dma_addr;
uint32_t size;
/* Command and return status */
union lio_instr_64B cmd;
#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
uint64_t *status_word;
/* Data buffer info */
void *virtdptr;
uint64_t dmadptr;
uint32_t datasize;
/* Return buffer info */
void *virtrptr;
uint64_t dmarptr;
uint32_t rdatasize;
/* Context buffer info */
void *ctxptr;
uint32_t ctxsize;
/* Time out and callback */
int wait_time;
int timeout;
uint32_t iq_no;
void (*callback) (struct octeon_device *, uint32_t,
void *);
void *callback_arg;
};
/* Maximum number of buffers to allocate into soft command buffer pool */
#define LIO_MAX_SOFT_COMMAND_BUFFERS 256
/* Head of a soft command buffer pool. */
struct lio_sc_buffer_pool {
/* List structure to add delete pending entries to */
struct lio_stailq_head head;
/* A lock for this response list */
struct mtx lock;
volatile uint32_t alloc_buf_count;
};
#define LIO_INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count)
int lio_setup_sc_buffer_pool(struct octeon_device *oct);
int lio_free_sc_buffer_pool(struct octeon_device *oct);
struct lio_soft_command *lio_alloc_soft_command(struct octeon_device *oct,
uint32_t datasize,
uint32_t rdatasize,
uint32_t ctxsize);
void lio_free_soft_command(struct octeon_device *oct,
struct lio_soft_command *sc);
/*
* lio_init_instr_queue()
* @param octeon_dev - pointer to the octeon device structure.
* @param txpciq - queue to be initialized (0 <= q_no <= 3).
*
* Called at driver init time for each input queue. iq_conf has the
* configuration parameters for the queue.
*
* @return Success: 0 Failure: 1
*/
int lio_init_instr_queue(struct octeon_device *octeon_dev,
union octeon_txpciq txpciq, uint32_t num_descs);
/*
* lio_delete_instr_queue()
* @param octeon_dev - pointer to the octeon device structure.
* @param iq_no - queue to be deleted
*
* Called at driver unload time for each input queue. Deletes all
* allocated resources for the input queue.
*
* @return Success: 0 Failure: 1
*/
int lio_delete_instr_queue(struct octeon_device *octeon_dev,
uint32_t iq_no);
int lio_wait_for_instr_fetch(struct octeon_device *oct);
int lio_process_iq_request_list(struct octeon_device *oct,
struct lio_instr_queue *iq,
uint32_t budget);
int lio_send_command(struct octeon_device *oct, uint32_t iq_no,
uint32_t force_db, void *cmd, void *buf,
uint32_t datasize, uint32_t reqtype);
void lio_prepare_soft_command(struct octeon_device *oct,
struct lio_soft_command *sc,
uint8_t opcode, uint8_t subcode,
uint32_t irh_ossp, uint64_t ossp0,
uint64_t ossp1);
int lio_send_soft_command(struct octeon_device *oct,
struct lio_soft_command *sc);
int lio_setup_iq(struct octeon_device *oct, int ifidx,
int q_index, union octeon_txpciq iq_no,
uint32_t num_descs);
int lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq,
uint32_t budget);
#endif /* __LIO_IQ_H__ */

View File

@ -0,0 +1,248 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_mem_ops.h"
#define MEMOPS_IDX LIO_MAX_BAR1_MAP_INDEX
#if BYTE_ORDER == BIG_ENDIAN
static inline void
lio_toggle_bar1_swapmode(struct octeon_device *oct, uint32_t idx)
{
uint32_t mask;
mask = oct->fn_list.bar1_idx_read(oct, idx);
mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
oct->fn_list.bar1_idx_write(oct, idx, mask);
}
#else /* BYTE_ORDER != BIG_ENDIAN */
#define lio_toggle_bar1_swapmode(oct, idx)
#endif /* BYTE_ORDER == BIG_ENDIAN */
static inline void
lio_write_bar1_mem8(struct octeon_device *oct, uint32_t reg, uint64_t val)
{
bus_space_write_1(oct->mem_bus_space[1].tag,
oct->mem_bus_space[1].handle, reg, val);
}
static inline uint64_t
lio_read_bar1_mem64(struct octeon_device *oct, uint32_t reg)
{
return (bus_space_read_8(oct->mem_bus_space[1].tag,
oct->mem_bus_space[1].handle, reg));
}
static inline void
lio_write_bar1_mem64(struct octeon_device *oct, uint32_t reg, uint64_t val)
{
bus_space_write_8(oct->mem_bus_space[1].tag,
oct->mem_bus_space[1].handle, reg, val);
}
static void
lio_pci_fastwrite(struct octeon_device *oct, uint32_t offset,
uint8_t *hostbuf, uint32_t len)
{
while ((len) && ((unsigned long)offset) & 7) {
lio_write_bar1_mem8(oct, offset++, *(hostbuf++));
len--;
}
lio_toggle_bar1_swapmode(oct, MEMOPS_IDX);
while (len >= 8) {
lio_write_bar1_mem64(oct, offset, *((uint64_t *)hostbuf));
offset += 8;
hostbuf += 8;
len -= 8;
}
lio_toggle_bar1_swapmode(oct, MEMOPS_IDX);
while (len--)
lio_write_bar1_mem8(oct, offset++, *(hostbuf++));
}
static inline uint64_t
lio_read_bar1_mem8(struct octeon_device *oct, uint32_t reg)
{
return (bus_space_read_1(oct->mem_bus_space[1].tag,
oct->mem_bus_space[1].handle, reg));
}
static void
lio_pci_fastread(struct octeon_device *oct, uint32_t offset,
uint8_t *hostbuf, uint32_t len)
{
while ((len) && ((unsigned long)offset) & 7) {
*(hostbuf++) = lio_read_bar1_mem8(oct, offset++);
len--;
}
lio_toggle_bar1_swapmode(oct, MEMOPS_IDX);
while (len >= 8) {
*((uint64_t *)hostbuf) = lio_read_bar1_mem64(oct, offset);
offset += 8;
hostbuf += 8;
len -= 8;
}
lio_toggle_bar1_swapmode(oct, MEMOPS_IDX);
while (len--)
*(hostbuf++) = lio_read_bar1_mem8(oct, offset++);
}
/* Core mem read/write with temporary bar1 settings. */
/* op = 1 to read, op = 0 to write. */
static void
lio_pci_rw_core_mem(struct octeon_device *oct, uint64_t addr,
uint8_t *hostbuf, uint32_t len, uint32_t op)
{
uint64_t static_mapping_base;
uint32_t copy_len = 0, index_reg_val = 0;
uint32_t offset;
static_mapping_base = oct->console_nb_info.dram_region_base;
if (static_mapping_base && static_mapping_base ==
(addr & 0xFFFFFFFFFFC00000ULL)) {
int bar1_index = oct->console_nb_info.bar1_index;
offset = (bar1_index << 22) + (addr & 0x3fffff);
if (op)
lio_pci_fastread(oct, offset, hostbuf, len);
else
lio_pci_fastwrite(oct, offset, hostbuf, len);
return;
}
mtx_lock(&oct->mem_access_lock);
/* Save the original index reg value. */
index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
do {
oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
offset = (MEMOPS_IDX << 22) + (addr & 0x3fffff);
/*
* If operation crosses a 4MB boundary, split the transfer
* at the 4MB boundary.
*/
if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
copy_len = (uint32_t)(((addr & ~(0x3fffff)) +
(MEMOPS_IDX << 22)) - addr);
} else {
copy_len = len;
}
if (op) { /* read from core */
lio_pci_fastread(oct, offset, hostbuf,
copy_len);
} else {
lio_pci_fastwrite(oct, offset, hostbuf,
copy_len);
}
len -= copy_len;
addr += copy_len;
hostbuf += copy_len;
} while (len);
oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
mtx_unlock(&oct->mem_access_lock);
}
void
lio_pci_read_core_mem(struct octeon_device *oct, uint64_t coreaddr,
uint8_t *buf, uint32_t len)
{
lio_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
}
void
lio_pci_write_core_mem(struct octeon_device *oct, uint64_t coreaddr,
uint8_t *buf, uint32_t len)
{
lio_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
}
uint64_t
lio_read_device_mem64(struct octeon_device *oct, uint64_t coreaddr)
{
__be64 ret;
lio_pci_rw_core_mem(oct, coreaddr, (uint8_t *)&ret, 8, 1);
return (be64toh(ret));
}
uint32_t
lio_read_device_mem32(struct octeon_device *oct, uint64_t coreaddr)
{
__be32 ret;
lio_pci_rw_core_mem(oct, coreaddr, (uint8_t *)&ret, 4, 1);
return (be32toh(ret));
}
void
lio_write_device_mem32(struct octeon_device *oct, uint64_t coreaddr,
uint32_t val)
{
__be32 t = htobe32(val);
lio_pci_rw_core_mem(oct, coreaddr, (uint8_t *)&t, 4, 0);
}

View File

@ -0,0 +1,85 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/*
* \file lio_mem_ops.h
* \brief Host Driver: Routines used to read/write Octeon memory.
*/
#ifndef __LIO_MEM_OPS_H__
#define __LIO_MEM_OPS_H__
/*
* Read a 64-bit value from a BAR1 mapped core memory address.
* @param oct - pointer to the octeon device.
* @param core_addr - the address to read from.
*
* The range_idx gives the BAR1 index register for the range of address
* in which core_addr is mapped.
*
* @return 64-bit value read from Core memory
*/
uint64_t lio_read_device_mem64(struct octeon_device *oct,
uint64_t core_addr);
/*
* Read a 32-bit value from a BAR1 mapped core memory address.
* @param oct - pointer to the octeon device.
* @param core_addr - the address to read from.
*
* @return 32-bit value read from Core memory
*/
uint32_t lio_read_device_mem32(struct octeon_device *oct,
uint64_t core_addr);
/*
* Write a 32-bit value to a BAR1 mapped core memory address.
* @param oct - pointer to the octeon device.
* @param core_addr - the address to write to.
* @param val - 32-bit value to write.
*/
void lio_write_device_mem32(struct octeon_device *oct,
uint64_t core_addr, uint32_t val);
/* Read multiple bytes from Octeon memory. */
void lio_pci_read_core_mem(struct octeon_device *oct,
uint64_t coreaddr, uint8_t *buf,
uint32_t len);
/* Write multiple bytes into Octeon memory. */
void lio_pci_write_core_mem(struct octeon_device *oct,
uint64_t coreaddr, uint8_t *buf,
uint32_t len);
#endif /* __LIO_MEM_OPS_H__ */

View File

@ -0,0 +1,857 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_main.h"
#include "lio_network.h"
#include "cn23xx_pf_device.h"
#include "lio_rxtx.h"
struct lio_iq_post_status {
int status;
int index;
};
static void lio_check_db_timeout(void *arg, int pending);
static void __lio_check_db_timeout(struct octeon_device *oct,
uint64_t iq_no);
/* Return 0 on success, 1 on failure */
int
lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
uint32_t num_descs)
{
struct lio_instr_queue *iq;
struct lio_iq_config *conf = NULL;
struct lio_tq *db_tq;
struct lio_request_list *request_buf;
bus_size_t max_size;
uint32_t iq_no = (uint32_t)txpciq.s.q_no;
uint32_t q_size;
int error, i;
if (LIO_CN23XX_PF(oct))
conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)));
if (conf == NULL) {
lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id);
return (1);
}
q_size = (uint32_t)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
iq->oct_dev = oct;
max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs;
error = bus_dma_tag_create(bus_get_dma_tag(oct->device), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
max_size, /* maxsize */
LIO_MAX_SG, /* nsegments */
PAGE_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&iq->txtag);
if (error) {
lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
iq_no);
return (1);
}
iq->base_addr = lio_dma_alloc(q_size, &iq->base_addr_dma);
if (!iq->base_addr) {
lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
iq_no);
return (1);
}
iq->max_count = num_descs;
/*
* Initialize a list to holds requests that have been posted to
* Octeon but has yet to be fetched by octeon
*/
iq->request_list = malloc(sizeof(*iq->request_list) * num_descs,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (iq->request_list == NULL) {
lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n",
iq_no);
return (1);
}
lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %lx count: %d\n",
iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
/* Create the descriptor buffer dma maps */
request_buf = iq->request_list;
for (i = 0; i < num_descs; i++, request_buf++) {
error = bus_dmamap_create(iq->txtag, 0, &request_buf->map);
if (error) {
lio_dev_err(oct, "Unable to create TX DMA map\n");
return (1);
}
}
iq->txpciq.txpciq64 = txpciq.txpciq64;
iq->fill_cnt = 0;
iq->host_write_index = 0;
iq->octeon_read_index = 0;
iq->flush_index = 0;
iq->last_db_time = 0;
iq->db_timeout = (uint32_t)conf->db_timeout;
atomic_store_rel_int(&iq->instr_pending, 0);
/* Initialize the lock for this instruction queue */
mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF);
mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF);
mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF);
mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL,
MTX_DEF);
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
db_tq = &oct->check_db_tq[iq_no];
db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK,
taskqueue_thread_enqueue, &db_tq->tq);
if (db_tq->tq == NULL) {
lio_dev_err(oct, "check db wq create failed for iq %d\n",
iq_no);
return (1);
}
TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
(void *)db_tq);
db_tq->ctxul = iq_no;
db_tq->ctxptr = oct;
taskqueue_start_threads(&db_tq->tq, 1, PI_NET,
"lio%d_check_db_timeout:%d",
oct->octeon_id, iq_no);
taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1);
/* Allocate a buf ring */
oct->instr_queue[iq_no]->br =
buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK,
&oct->instr_queue[iq_no]->enq_lock);
if (oct->instr_queue[iq_no]->br == NULL) {
lio_dev_err(oct, "Critical Failure setting up buf ring\n");
return (1);
}
return (0);
}
int
lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no)
{
struct lio_instr_queue *iq = oct->instr_queue[iq_no];
struct lio_request_list *request_buf;
struct lio_mbuf_free_info *finfo;
uint64_t desc_size = 0, q_size;
int i;
lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no);
if (oct->check_db_tq[iq_no].tq != NULL) {
while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq,
&oct->check_db_tq[iq_no].work,
NULL))
taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq,
&oct->check_db_tq[iq_no].work);
taskqueue_free(oct->check_db_tq[iq_no].tq);
oct->check_db_tq[iq_no].tq = NULL;
}
if (LIO_CN23XX_PF(oct))
desc_size =
LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf));
request_buf = iq->request_list;
for (i = 0; i < iq->max_count; i++, request_buf++) {
if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) ||
(request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) {
if (request_buf->buf != NULL) {
finfo = request_buf->buf;
bus_dmamap_sync(iq->txtag, request_buf->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(iq->txtag,
request_buf->map);
m_freem(finfo->mb);
request_buf->buf = NULL;
if (request_buf->map != NULL) {
bus_dmamap_destroy(iq->txtag,
request_buf->map);
request_buf->map = NULL;
}
} else if (request_buf->map != NULL) {
bus_dmamap_unload(iq->txtag, request_buf->map);
bus_dmamap_destroy(iq->txtag, request_buf->map);
request_buf->map = NULL;
}
}
}
if (iq->br != NULL) {
buf_ring_free(iq->br, M_DEVBUF);
iq->br = NULL;
}
if (iq->request_list != NULL) {
free(iq->request_list, M_DEVBUF);
iq->request_list = NULL;
}
if (iq->txtag != NULL) {
bus_dma_tag_destroy(iq->txtag);
iq->txtag = NULL;
}
if (iq->base_addr) {
q_size = iq->max_count * desc_size;
lio_dma_free((uint32_t)q_size, iq->base_addr);
oct->io_qmask.iq &= ~(1ULL << iq_no);
bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue));
oct->num_iqs--;
return (0);
}
return (1);
}
/* Return 0 on success, 1 on failure */
int
lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index,
union octeon_txpciq txpciq, uint32_t num_descs)
{
uint32_t iq_no = (uint32_t)txpciq.s.q_no;
if (oct->instr_queue[iq_no]->oct_dev != NULL) {
lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n",
iq_no);
oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
return (0);
}
oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->ifidx = ifidx;
if (lio_init_instr_queue(oct, txpciq, num_descs)) {
lio_delete_instr_queue(oct, iq_no);
return (1);
}
oct->num_iqs++;
if (oct->fn_list.enable_io_queues(oct))
return (1);
return (0);
}
int
lio_wait_for_instr_fetch(struct octeon_device *oct)
{
int i, retry = 1000, pending, instr_cnt = 0;
do {
instr_cnt = 0;
for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
pending = atomic_load_acq_int(
&oct->instr_queue[i]->instr_pending);
if (pending)
__lio_check_db_timeout(oct, i);
instr_cnt += pending;
}
if (instr_cnt == 0)
break;
lio_sleep_timeout(1);
} while (retry-- && instr_cnt);
return (instr_cnt);
}
static inline void
lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq)
{
if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) {
lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt);
/* make sure doorbell write goes through */
__compiler_membar();
iq->fill_cnt = 0;
iq->last_db_time = ticks;
return;
}
}
static inline void
__lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
{
uint8_t *iqptr, cmdsize;
cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
memcpy(iqptr, cmd, cmdsize);
}
static inline struct lio_iq_post_status
__lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
{
struct lio_iq_post_status st;
st.status = LIO_IQ_SEND_OK;
/*
* This ensures that the read index does not wrap around to the same
* position if queue gets full before Octeon could fetch any instr.
*/
if (atomic_load_acq_int(&iq->instr_pending) >=
(int32_t)(iq->max_count - 1)) {
st.status = LIO_IQ_SEND_FAILED;
st.index = -1;
return (st);
}
if (atomic_load_acq_int(&iq->instr_pending) >=
(int32_t)(iq->max_count - 2))
st.status = LIO_IQ_SEND_STOP;
__lio_copy_cmd_into_iq(iq, cmd);
/* "index" is returned, host_write_index is modified. */
st.index = iq->host_write_index;
iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
iq->max_count);
iq->fill_cnt++;
/*
* Flush the command into memory. We need to be sure the data is in
* memory before indicating that the instruction is pending.
*/
wmb();
atomic_add_int(&iq->instr_pending, 1);
return (st);
}
static inline void
__lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf,
int reqtype)
{
iq->request_list[idx].buf = buf;
iq->request_list[idx].reqtype = reqtype;
}
/* Can only run in process context */
int
lio_process_iq_request_list(struct octeon_device *oct,
struct lio_instr_queue *iq, uint32_t budget)
{
struct lio_soft_command *sc;
struct octeon_instr_irh *irh = NULL;
struct lio_mbuf_free_info *finfo;
void *buf;
uint32_t inst_count = 0;
uint32_t old = iq->flush_index;
int reqtype;
while (old != iq->octeon_read_index) {
reqtype = iq->request_list[old].reqtype;
buf = iq->request_list[old].buf;
finfo = buf;
if (reqtype == LIO_REQTYPE_NONE)
goto skip_this;
switch (reqtype) {
case LIO_REQTYPE_NORESP_NET:
lio_free_mbuf(iq, buf);
break;
case LIO_REQTYPE_NORESP_NET_SG:
lio_free_sgmbuf(iq, buf);
break;
case LIO_REQTYPE_RESP_NET:
case LIO_REQTYPE_SOFT_COMMAND:
sc = buf;
if (LIO_CN23XX_PF(oct))
irh = (struct octeon_instr_irh *)
&sc->cmd.cmd3.irh;
if (irh->rflag) {
/*
* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
* process sc. Add sc to the ordered soft
* command response list because we expect
* a response from Octeon.
*/
mtx_lock(&oct->response_list
[LIO_ORDERED_SC_LIST].lock);
atomic_add_int(&oct->response_list
[LIO_ORDERED_SC_LIST].
pending_req_count, 1);
STAILQ_INSERT_TAIL(&oct->response_list
[LIO_ORDERED_SC_LIST].
head, &sc->node, entries);
mtx_unlock(&oct->response_list
[LIO_ORDERED_SC_LIST].lock);
} else {
if (sc->callback != NULL) {
/* This callback must not sleep */
sc->callback(oct, LIO_REQUEST_DONE,
sc->callback_arg);
}
}
break;
default:
lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n",
__func__, reqtype, buf, old);
}
iq->request_list[old].buf = NULL;
iq->request_list[old].reqtype = 0;
skip_this:
inst_count++;
old = lio_incr_index(old, 1, iq->max_count);
if ((budget) && (inst_count >= budget))
break;
}
iq->flush_index = old;
return (inst_count);
}
/* Can only be called from process context */
int
lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq,
uint32_t budget)
{
uint32_t inst_processed = 0;
uint32_t tot_inst_processed = 0;
int tx_done = 1;
if (!mtx_trylock(&iq->iq_flush_running_lock))
return (tx_done);
mtx_lock(&iq->lock);
iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
do {
/* Process any outstanding IQ packets. */
if (iq->flush_index == iq->octeon_read_index)
break;
if (budget)
inst_processed =
lio_process_iq_request_list(oct, iq,
budget -
tot_inst_processed);
else
inst_processed =
lio_process_iq_request_list(oct, iq, 0);
if (inst_processed) {
atomic_subtract_int(&iq->instr_pending, inst_processed);
iq->stats.instr_processed += inst_processed;
}
tot_inst_processed += inst_processed;
inst_processed = 0;
} while (tot_inst_processed < budget);
if (budget && (tot_inst_processed >= budget))
tx_done = 0;
iq->last_db_time = ticks;
mtx_unlock(&iq->lock);
mtx_unlock(&iq->iq_flush_running_lock);
return (tx_done);
}
/*
* Process instruction queue after timeout.
* This routine gets called from a taskqueue or when removing the module.
*/
static void
__lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no)
{
struct lio_instr_queue *iq;
uint64_t next_time;
if (oct == NULL)
return;
iq = oct->instr_queue[iq_no];
if (iq == NULL)
return;
if (atomic_load_acq_int(&iq->instr_pending)) {
/* If ticks - last_db_time < db_timeout do nothing */
next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout);
if (!lio_check_timeout(ticks, next_time))
return;
iq->last_db_time = ticks;
/* Flush the instruction queue */
lio_flush_iq(oct, iq, 0);
lio_enable_irq(NULL, iq);
}
if (oct->props.ifp != NULL && iq->br != NULL) {
if (mtx_trylock(&iq->enq_lock)) {
if (!drbr_empty(oct->props.ifp, iq->br))
lio_mq_start_locked(oct->props.ifp, iq);
mtx_unlock(&iq->enq_lock);
}
}
}
/*
* Called by the Poll thread at regular intervals to check the instruction
* queue for commands to be posted and for commands that were fetched by Octeon.
*/
static void
lio_check_db_timeout(void *arg, int pending)
{
struct lio_tq *db_tq = (struct lio_tq *)arg;
struct octeon_device *oct = db_tq->ctxptr;
uint64_t iq_no = db_tq->ctxul;
uint32_t delay = 10;
__lio_check_db_timeout(oct, iq_no);
taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work,
lio_ms_to_ticks(delay));
}
int
lio_send_command(struct octeon_device *oct, uint32_t iq_no,
uint32_t force_db, void *cmd, void *buf,
uint32_t datasize, uint32_t reqtype)
{
struct lio_iq_post_status st;
struct lio_instr_queue *iq = oct->instr_queue[iq_no];
/*
* Get the lock and prevent other tasks and tx interrupt handler
* from running.
*/
mtx_lock(&iq->post_lock);
st = __lio_post_command2(iq, cmd);
if (st.status != LIO_IQ_SEND_FAILED) {
__lio_add_to_request_list(iq, st.index, buf, reqtype);
LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
if (force_db || (st.status == LIO_IQ_SEND_STOP))
lio_ring_doorbell(oct, iq);
} else {
LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
mtx_unlock(&iq->post_lock);
/*
* This is only done here to expedite packets being flushed for
* cases where there are no IQ completion interrupts.
*/
return (st.status);
}
void
lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc,
uint8_t opcode, uint8_t subcode, uint32_t irh_ossp,
uint64_t ossp0, uint64_t ossp1)
{
struct lio_config *lio_cfg;
struct octeon_instr_ih3 *ih3;
struct octeon_instr_pki_ih3 *pki_ih3;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__));
KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__));
lio_cfg = lio_get_conf(oct);
if (LIO_CN23XX_PF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
pki_ih3->w = 1;
pki_ih3->raw = 1;
pki_ih3->utag = 1;
pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
pki_ih3->utt = 1;
pki_ih3->tag = LIO_CONTROL;
pki_ih3->tagtype = LIO_ATOMIC_TAG;
pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
pki_ih3->pm = 0x7;
pki_ih3->sl = 8;
if (sc->datasize)
ih3->dlengsz = sc->datasize;
irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
irh->opcode = opcode;
irh->subcode = subcode;
/* opcode/subcode specific parameters (ossp) */
irh->ossp = irh_ossp;
sc->cmd.cmd3.ossp[0] = ossp0;
sc->cmd.cmd3.ossp[1] = ossp1;
if (sc->rdatasize) {
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = sc->rdatasize;
irh->rflag = 1;
/* PKI IH3 */
/* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
ih3->fsz = LIO_SOFTCMDRESP_IH3;
} else {
irh->rflag = 0;
/* PKI IH3 */
/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
ih3->fsz = LIO_PCICMD_O3;
}
}
}
int
lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc)
{
struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
uint32_t len = 0;
if (LIO_CN23XX_PF(oct)) {
ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
if (ih3->dlengsz) {
KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL",
__func__, __LINE__));
sc->cmd.cmd3.dptr = sc->dmadptr;
}
irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
if (irh->rflag) {
KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL",
__func__, __LINE__));
KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL",
__func__, __LINE__));
*sc->status_word = COMPLETION_WORD_INIT;
sc->cmd.cmd3.rptr = sc->dmarptr;
}
len = (uint32_t)ih3->dlengsz;
}
if (sc->wait_time)
sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time);
return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
len, LIO_REQTYPE_SOFT_COMMAND));
}
int
lio_setup_sc_buffer_pool(struct octeon_device *oct)
{
struct lio_soft_command *sc;
uint64_t dma_addr;
int i;
STAILQ_INIT(&oct->sc_buf_pool.head);
mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF);
atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0);
for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) {
sc = (struct lio_soft_command *)
lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, &dma_addr);
if (sc == NULL) {
lio_free_sc_buffer_pool(oct);
return (1);
}
sc->dma_addr = dma_addr;
sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
}
return (0);
}
int
lio_free_sc_buffer_pool(struct octeon_device *oct)
{
struct lio_stailq_node *tmp, *tmp2;
struct lio_soft_command *sc;
mtx_lock(&oct->sc_buf_pool.lock);
STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) {
sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head,
struct lio_soft_command, node);
STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries);
lio_dma_free(sc->size, sc);
}
STAILQ_INIT(&oct->sc_buf_pool.head);
mtx_unlock(&oct->sc_buf_pool.lock);
return (0);
}
struct lio_soft_command *
lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize,
uint32_t rdatasize, uint32_t ctxsize)
{
struct lio_soft_command *sc = NULL;
struct lio_stailq_node *tmp;
uint64_t dma_addr;
uint32_t size;
uint32_t offset = sizeof(struct lio_soft_command);
KASSERT((offset + datasize + rdatasize + ctxsize) <=
LIO_SOFT_COMMAND_BUFFER_SIZE,
("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE",
__func__, __LINE__));
mtx_lock(&oct->sc_buf_pool.lock);
if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) {
mtx_unlock(&oct->sc_buf_pool.lock);
return (NULL);
}
tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries);
STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries);
atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1);
mtx_unlock(&oct->sc_buf_pool.lock);
sc = (struct lio_soft_command *)tmp;
dma_addr = sc->dma_addr;
size = sc->size;
bzero(sc, sc->size);
sc->dma_addr = dma_addr;
sc->size = size;
if (ctxsize) {
sc->ctxptr = (uint8_t *)sc + offset;
sc->ctxsize = ctxsize;
}
/* Start data at 128 byte boundary */
offset = (offset + ctxsize + 127) & 0xffffff80;
if (datasize) {
sc->virtdptr = (uint8_t *)sc + offset;
sc->dmadptr = dma_addr + offset;
sc->datasize = datasize;
}
/* Start rdata at 128 byte boundary */
offset = (offset + datasize + 127) & 0xffffff80;
if (rdatasize) {
KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__,
__LINE__));
sc->virtrptr = (uint8_t *)sc + offset;
sc->dmarptr = dma_addr + offset;
sc->rdatasize = rdatasize;
sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
rdatasize - 8);
}
return (sc);
}
void
lio_free_soft_command(struct octeon_device *oct,
struct lio_soft_command *sc)
{
mtx_lock(&oct->sc_buf_pool.lock);
STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1);
mtx_unlock(&oct->sc_buf_pool.lock);
}

View File

@ -0,0 +1,208 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_main.h"
static void lio_poll_req_completion(void *arg, int pending);
int
lio_setup_response_list(struct octeon_device *oct)
{
struct lio_tq *ctq;
int i, ret = 0;
for (i = 0; i < LIO_MAX_RESPONSE_LISTS; i++) {
STAILQ_INIT(&oct->response_list[i].head);
mtx_init(&oct->response_list[i].lock, "response_list_lock",
NULL, MTX_DEF);
atomic_store_rel_int(&oct->response_list[i].pending_req_count,
0);
}
mtx_init(&oct->cmd_resp_wqlock, "cmd_resp_wqlock", NULL, MTX_DEF);
ctq = &oct->dma_comp_tq;
ctq->tq = taskqueue_create("lio_dma_comp", M_WAITOK,
taskqueue_thread_enqueue, &ctq->tq);
if (ctq->tq == NULL) {
lio_dev_err(oct, "failed to create wq thread\n");
return (-ENOMEM);
}
TIMEOUT_TASK_INIT(ctq->tq, &ctq->work, 0, lio_poll_req_completion,
(void *)ctq);
ctq->ctxptr = oct;
oct->cmd_resp_state = LIO_DRV_ONLINE;
taskqueue_start_threads(&ctq->tq, 1, PI_NET, "lio%d_dma_comp",
oct->octeon_id);
taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));
return (ret);
}
void
lio_delete_response_list(struct octeon_device *oct)
{
if (oct->dma_comp_tq.tq != NULL) {
while (taskqueue_cancel_timeout(oct->dma_comp_tq.tq,
&oct->dma_comp_tq.work, NULL))
taskqueue_drain_timeout(oct->dma_comp_tq.tq,
&oct->dma_comp_tq.work);
taskqueue_free(oct->dma_comp_tq.tq);
oct->dma_comp_tq.tq = NULL;
}
}
int
lio_process_ordered_list(struct octeon_device *octeon_dev,
uint32_t force_quit)
{
struct lio_response_list *ordered_sc_list;
struct lio_soft_command *sc;
uint64_t status64;
uint32_t status;
int request_complete = 0;
int resp_to_process;
resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
ordered_sc_list = &octeon_dev->response_list[LIO_ORDERED_SC_LIST];
do {
mtx_lock(&ordered_sc_list->lock);
if (STAILQ_EMPTY(&ordered_sc_list->head)) {
/*
* ordered_sc_list is empty; there is nothing to
* process
*/
mtx_unlock(&ordered_sc_list->lock);
return (1);
}
sc = LIO_STAILQ_FIRST_ENTRY(&ordered_sc_list->head,
struct lio_soft_command, node);
status = LIO_REQUEST_PENDING;
/*
* check if octeon has finished DMA'ing a response to where
* rptr is pointing to
*/
status64 = *sc->status_word;
if (status64 != COMPLETION_WORD_INIT) {
/*
* This logic ensures that all 64b have been written.
* 1. check byte 0 for non-FF
* 2. if non-FF, then swap result from BE to host order
* 3. check byte 7 (swapped to 0) for non-FF
* 4. if non-FF, use the low 32-bit status code
* 5. if either byte 0 or byte 7 is FF, don't use status
*/
if ((status64 & 0xff) != 0xff) {
lio_swap_8B_data(&status64, 1);
if (((status64 & 0xff) != 0xff)) {
/* retrieve 16-bit firmware status */
status = (uint32_t)(status64 &
0xffffULL);
if (status) {
status = LIO_FW_STATUS_CODE(
status);
} else {
/* i.e. no error */
status = LIO_REQUEST_DONE;
}
}
}
} else if (force_quit || (sc->timeout &&
lio_check_timeout(ticks, sc->timeout))) {
lio_dev_err(octeon_dev, "%s: cmd failed, timeout (%u, %u)\n",
__func__, ticks, sc->timeout);
status = LIO_REQUEST_TIMEOUT;
}
if (status != LIO_REQUEST_PENDING) {
/* we have received a response or we have timed out */
/* remove node from linked list */
STAILQ_REMOVE(&octeon_dev->response_list
[LIO_ORDERED_SC_LIST].head,
&sc->node, lio_stailq_node, entries);
atomic_subtract_int(&octeon_dev->response_list
[LIO_ORDERED_SC_LIST].
pending_req_count, 1);
mtx_unlock(&ordered_sc_list->lock);
if (sc->callback != NULL)
sc->callback(octeon_dev, status,
sc->callback_arg);
request_complete++;
} else {
/* no response yet */
request_complete = 0;
mtx_unlock(&ordered_sc_list->lock);
}
/*
* If we hit the Max Ordered requests to process every loop,
* we quit and let this function be invoked the next time
* the poll thread runs to process the remaining requests.
* This function can take up the entire CPU if there is no
* upper limit to the requests processed.
*/
if (request_complete >= resp_to_process)
break;
} while (request_complete);
return (0);
}
static void
lio_poll_req_completion(void *arg, int pending)
{
struct lio_tq *ctq = (struct lio_tq *)arg;
struct octeon_device *oct = (struct octeon_device *)ctq->ctxptr;
lio_process_ordered_list(oct, 0);
taskqueue_enqueue_timeout(ctq->tq, &ctq->work, lio_ms_to_ticks(50));
}

View File

@ -0,0 +1,151 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/*
* ! \file lio_response_manager.h
* \brief Host Driver: Response queues for host instructions.
*/
#ifndef __LIO_RESPONSE_MANAGER_H__
#define __LIO_RESPONSE_MANAGER_H__
/*
* Maximum ordered requests to process in every invocation of
* lio_process_ordered_list(). The function will continue to process requests
* as long as it can find one that has finished processing. If it keeps
* finding requests that have completed, the function can run for ever. The
* value defined here sets an upper limit on the number of requests it can
* process before it returns control to the poll thread.
*/
#define LIO_MAX_ORD_REQS_TO_PROCESS 4096
/*
* Head of a response list. There are several response lists in the
* system. One for each response order- Unordered, ordered
* and 1 for noresponse entries on each instruction queue.
*/
struct lio_response_list {
/* List structure to add delete pending entries to */
struct lio_stailq_head head;
/* A lock for this response list */
struct mtx lock;
volatile int pending_req_count;
};
/* The type of response list. */
enum {
LIO_ORDERED_LIST = 0,
LIO_UNORDERED_NONBLOCKING_LIST = 1,
LIO_UNORDERED_BLOCKING_LIST = 2,
LIO_ORDERED_SC_LIST = 3
};
/*
* Error codes used in Octeon Host-Core communication.
*
* 31 16 15 0
* ---------------------------------
* | | |
* ---------------------------------
* Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
* are reserved to identify the group to which the error code belongs. The
* lower 16-bits, called Minor Error Number, carry the actual code.
*
* So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
*/
/*------ Error codes used by firmware (bits 15..0 set by firmware */
#define LIO_FW_MAJOR_ERROR_CODE 0x0001
/* A value of 0x00000000 indicates no error i.e. success */
#define LIO_DRIVER_ERROR_NONE 0x00000000
#define LIO_DRIVER_ERROR_REQ_PENDING 0x00000001
#define LIO_DRIVER_ERROR_REQ_TIMEOUT 0x00000003
#define LIO_DRIVER_ERROR_REQ_EINTR 0x00000004
/*
* Status for a request.
* If a request is not queued to Octeon by the driver, the driver returns
* an error condition that's describe by one of the OCTEON_REQ_ERR_* value
* below. If the request is successfully queued, the driver will return
* a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT and
* LIO_REQUEST_INTERRUPTED are only returned by the driver if the
* response for request failed to arrive before a time-out period or if
* the request processing * got interrupted due to a signal respectively.
*/
enum {
LIO_REQUEST_DONE = (LIO_DRIVER_ERROR_NONE),
LIO_REQUEST_PENDING = (LIO_DRIVER_ERROR_REQ_PENDING),
LIO_REQUEST_TIMEOUT = (LIO_DRIVER_ERROR_REQ_TIMEOUT),
LIO_REQUEST_INTERRUPTED = (LIO_DRIVER_ERROR_REQ_EINTR),
LIO_REQUEST_NO_DEVICE = (0x00000021),
LIO_REQUEST_NOT_RUNNING,
LIO_REQUEST_INVALID_IQ,
LIO_REQUEST_INVALID_BUFCNT,
LIO_REQUEST_INVALID_RESP_ORDER,
LIO_REQUEST_NO_MEMORY,
LIO_REQUEST_INVALID_BUFSIZE,
LIO_REQUEST_NO_PENDING_ENTRY,
LIO_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF)
};
#define LIO_STAILQ_FIRST_ENTRY(ptr, type, elem) \
(type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
#define LIO_FW_STATUS_CODE(status) \
((LIO_FW_MAJOR_ERROR_CODE << 16) | (status))
/*
* Initialize the response lists. The number of response lists to create is
* given by count.
* @param octeon_dev - the octeon device structure.
*/
int lio_setup_response_list(struct octeon_device *octeon_dev);
void lio_delete_response_list(struct octeon_device *octeon_dev);
/*
* Check the status of first entry in the ordered list. If the instruction at
* that entry finished processing or has timed-out, the entry is cleaned.
* @param octeon_dev - the octeon device structure.
* @param force_quit - the request is forced to timeout if this is 1
* @return 1 if the ordered list is empty, 0 otherwise.
*/
int lio_process_ordered_list(struct octeon_device *octeon_dev,
uint32_t force_quit);
#endif /* __LIO_RESPONSE_MANAGER_H__ */

131
sys/dev/liquidio/lio_bsd.h Normal file
View File

@ -0,0 +1,131 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef __LIO_BSD_H__
#define __LIO_BSD_H__
#include <sys/param.h>
#include <sys/socket.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/if_gif.h>
#include <netinet/tcp_lro.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
#include <sys/kthread.h>
#include <sys/firmware.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#ifndef PCI_VENDOR_ID_CAVIUM
#define PCI_VENDOR_ID_CAVIUM 0x177D
#endif
#define BIT(nr) (1UL << (nr))
#define lio_check_timeout(a, b) ((int)((b) - (a)) < 0)
#define lio_ms_to_ticks(x) \
((hz > 1000) ? ((x) * (hz/1000)) : ((x) / (1000/hz)))
#define lio_mdelay(x) do { \
if (cold) \
DELAY(1000 * (x)); \
else \
pause("Wait", lio_ms_to_ticks(x)); \
} while(0)
#define lio_sleep_timeout(timeout) lio_mdelay((timeout))
typedef uint32_t __be32;
typedef uint64_t __be64;
#define lio_dev_info(oct, format, args...) \
device_printf(oct->device, "Info: " format, ##args)
#define lio_dev_warn(oct, format, args...) \
device_printf(oct->device, "Warn: " format, ##args)
#define lio_dev_err(oct, format, args...) \
device_printf(oct->device, "Error: " format, ##args)
#ifdef LIO_DEBUG
#define lio_dev_dbg(oct, format, args...) \
device_printf(oct->device, "Debug: " format, ##args)
#else
#define lio_dev_dbg(oct, format, args...) {do { } while (0); }
#endif
struct lio_stailq_node {
STAILQ_ENTRY (lio_stailq_node) entries;
};
STAILQ_HEAD (lio_stailq_head, lio_stailq_node);
static inline struct lio_stailq_node *
lio_delete_first_node(struct lio_stailq_head *root)
{
struct lio_stailq_node *node;
if (STAILQ_EMPTY(root))
node = NULL;
else
node = STAILQ_FIRST(root);
if (node != NULL)
STAILQ_REMOVE_HEAD(root, entries);
return (node);
}
#endif /* __LIO_BSD_H__ */

687
sys/dev/liquidio/lio_core.c Normal file
View File

@ -0,0 +1,687 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_ctrl.h"
#include "lio_main.h"
#include "lio_rxtx.h"
#include "lio_network.h"
int
lio_set_feature(struct ifnet *ifp, int cmd, uint16_t param1)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = cmd;
nctrl.ncmd.s.param1 = param1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct, "Feature change failed in core (ret: 0x%x)\n",
ret);
}
return (ret);
}
void
lio_ctrl_cmd_completion(void *nctrl_ptr)
{
struct lio_ctrl_pkt *nctrl = (struct lio_ctrl_pkt *)nctrl_ptr;
struct lio *lio;
struct octeon_device *oct;
uint8_t *mac;
lio = nctrl->lio;
if (lio->oct_dev == NULL)
return;
oct = lio->oct_dev;
switch (nctrl->ncmd.s.cmd) {
case LIO_CMD_CHANGE_DEVFLAGS:
case LIO_CMD_SET_MULTI_LIST:
break;
case LIO_CMD_CHANGE_MACADDR:
mac = ((uint8_t *)&nctrl->udd[0]) + 2;
if (nctrl->ncmd.s.param1) {
/* vfidx is 0 based, but vf_num (param1) is 1 based */
int vfidx = nctrl->ncmd.s.param1 - 1;
bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
if (mac_is_admin_assigned)
lio_dev_info(oct, "MAC Address %pM is configured for VF %d\n",
mac, vfidx);
} else {
lio_dev_info(oct, "MAC Address changed to %02x:%02x:%02x:%02x:%02x:%02x\n",
mac[0], mac[1], mac[2], mac[3], mac[4],
mac[5]);
}
break;
case LIO_CMD_GPIO_ACCESS:
lio_dev_info(oct, "LED Flashing visual identification\n");
break;
case LIO_CMD_ID_ACTIVE:
lio_dev_info(oct, "LED Flashing visual identification\n");
break;
case LIO_CMD_LRO_ENABLE:
lio_dev_info(oct, "HW LRO Enabled\n");
break;
case LIO_CMD_LRO_DISABLE:
lio_dev_info(oct, "HW LRO Disabled\n");
break;
case LIO_CMD_VERBOSE_ENABLE:
lio_dev_info(oct, "Firmware debug enabled\n");
break;
case LIO_CMD_VERBOSE_DISABLE:
lio_dev_info(oct, "Firmware debug disabled\n");
break;
case LIO_CMD_VLAN_FILTER_CTL:
if (nctrl->ncmd.s.param1)
lio_dev_info(oct, "VLAN filter enabled\n");
else
lio_dev_info(oct, "VLAN filter disabled\n");
break;
case LIO_CMD_ADD_VLAN_FILTER:
lio_dev_info(oct, "VLAN filter %d added\n",
nctrl->ncmd.s.param1);
break;
case LIO_CMD_DEL_VLAN_FILTER:
lio_dev_info(oct, "VLAN filter %d removed\n",
nctrl->ncmd.s.param1);
break;
case LIO_CMD_SET_SETTINGS:
lio_dev_info(oct, "Settings changed\n");
break;
/*
* Case to handle "LIO_CMD_TNL_RX_CSUM_CTL"
* Command passed by NIC driver
*/
case LIO_CMD_TNL_RX_CSUM_CTL:
if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_ENABLE) {
lio_dev_info(oct, "RX Checksum Offload Enabled\n");
} else if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_DISABLE) {
lio_dev_info(oct, "RX Checksum Offload Disabled\n");
}
break;
/*
* Case to handle "LIO_CMD_TNL_TX_CSUM_CTL"
* Command passed by NIC driver
*/
case LIO_CMD_TNL_TX_CSUM_CTL:
if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_ENABLE) {
lio_dev_info(oct, "TX Checksum Offload Enabled\n");
} else if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_DISABLE) {
lio_dev_info(oct, "TX Checksum Offload Disabled\n");
}
break;
/*
* Case to handle "LIO_CMD_VXLAN_PORT_CONFIG"
* Command passed by NIC driver
*/
case LIO_CMD_VXLAN_PORT_CONFIG:
if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_ADD) {
lio_dev_info(oct, "VxLAN Destination UDP PORT:%d ADDED\n",
nctrl->ncmd.s.param1);
} else if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_DEL) {
lio_dev_info(oct, "VxLAN Destination UDP PORT:%d DELETED\n",
nctrl->ncmd.s.param1);
}
break;
case LIO_CMD_SET_FLOW_CTL:
lio_dev_info(oct, "Set RX/TX flow control parameters\n");
break;
case LIO_CMD_SET_FNV:
if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_ENABLE)
lio_dev_info(oct, "FNV Enabled\n");
else if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_DISABLE)
lio_dev_info(oct, "FNV Disabled\n");
break;
case LIO_CMD_PKT_STEERING_CTL:
if (nctrl->ncmd.s.param1 == LIO_CMD_PKT_STEERING_ENABLE) {
lio_dev_info(oct, "Packet Steering Enabled\n");
} else if (nctrl->ncmd.s.param1 ==
LIO_CMD_PKT_STEERING_DISABLE) {
lio_dev_info(oct, "Packet Steering Disabled\n");
}
break;
case LIO_CMD_QUEUE_COUNT_CTL:
lio_dev_info(oct, "Queue count updated to %d\n",
nctrl->ncmd.s.param1);
break;
default:
lio_dev_err(oct, "%s Unknown cmd %d\n", __func__,
nctrl->ncmd.s.cmd);
}
}
/*
* \brief Setup output queue
* @param oct octeon device
* @param q_no which queue
* @param num_descs how many descriptors
* @param desc_size size of each descriptor
* @param app_ctx application context
*/
static int
lio_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
int desc_size, void *app_ctx)
{
int ret_val = 0;
lio_dev_dbg(oct, "Creating Droq: %d\n", q_no);
/* droq creation and local register settings. */
ret_val = lio_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
if (ret_val < 0)
return (ret_val);
if (ret_val == 1) {
lio_dev_dbg(oct, "Using default droq %d\n", q_no);
return (0);
}
/*
* Send Credit for Octeon Output queues. Credits are always
* sent after the output queue is enabled.
*/
lio_write_csr32(oct, oct->droq[q_no]->pkts_credit_reg,
oct->droq[q_no]->max_count);
return (ret_val);
}
static void
lio_push_packet(void *m_buff, uint32_t len, union octeon_rh *rh, void *rxq,
void *arg)
{
struct mbuf *mbuf = m_buff;
struct ifnet *ifp = arg;
struct lio_droq *droq = rxq;
if (ifp != NULL) {
struct lio *lio = if_getsoftc(ifp);
/* Do not proceed if the interface is not in RUNNING state. */
if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
lio_recv_buffer_free(mbuf);
droq->stats.rx_dropped++;
return;
}
if (rh->r_dh.has_hash) {
uint32_t hashtype, hashval;
if (rh->r_dh.has_hwtstamp) {
hashval = htobe32(*(uint32_t *)
(((uint8_t *)mbuf->m_data) +
((rh->r_dh.len - 2) *
BYTES_PER_DHLEN_UNIT)));
hashtype =
htobe32(*(((uint32_t *)
(((uint8_t *)mbuf->m_data) +
((rh->r_dh.len - 2) *
BYTES_PER_DHLEN_UNIT))) + 1));
} else {
hashval = htobe32(*(uint32_t *)
(((uint8_t *)mbuf->m_data) +
((rh->r_dh.len - 1) *
BYTES_PER_DHLEN_UNIT)));
hashtype =
htobe32(*(((uint32_t *)
(((uint8_t *)mbuf->m_data) +
((rh->r_dh.len - 1) *
BYTES_PER_DHLEN_UNIT))) + 1));
}
mbuf->m_pkthdr.flowid = hashval;
switch (hashtype) {
case LIO_RSS_HASH_IPV4:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
break;
case LIO_RSS_HASH_TCP_IPV4:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
break;
case LIO_RSS_HASH_IPV6:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
break;
case LIO_RSS_HASH_TCP_IPV6:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
break;
case LIO_RSS_HASH_IPV6_EX:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6_EX);
break;
case LIO_RSS_HASH_TCP_IPV6_EX:
M_HASHTYPE_SET(mbuf,
M_HASHTYPE_RSS_TCP_IPV6_EX);
break;
default:
M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
}
} else {
/*
* This case won't hit as FW will always set has_hash
* in rh.
*/
M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
mbuf->m_pkthdr.flowid = droq->q_no;
}
m_adj(mbuf, rh->r_dh.len * 8);
len -= rh->r_dh.len * 8;
mbuf->m_flags |= M_PKTHDR;
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) &&
(rh->r_dh.priority || rh->r_dh.vlan)) {
uint16_t priority = rh->r_dh.priority;
uint16_t vid = rh->r_dh.vlan;
uint16_t vtag;
vtag = priority << 13 | vid;
mbuf->m_pkthdr.ether_vtag = vtag;
mbuf->m_flags |= M_VLANTAG;
}
if (rh->r_dh.csum_verified & LIO_IPSUM_VERIFIED)
mbuf->m_pkthdr.csum_flags |= (CSUM_L3_CALC |
CSUM_L3_VALID);
if (rh->r_dh.csum_verified & LIO_L4SUM_VERIFIED) {
mbuf->m_pkthdr.csum_flags |= (CSUM_L4_CALC |
CSUM_L4_VALID);
mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
CSUM_PSEUDO_HDR);
mbuf->m_pkthdr.csum_data = htons(0xffff);
}
mbuf->m_pkthdr.rcvif = ifp;
mbuf->m_pkthdr.len = len;
if ((lio_hwlro == 0) &&
(if_getcapenable(ifp) & IFCAP_LRO) &&
(mbuf->m_pkthdr.csum_flags &
(CSUM_L3_VALID | CSUM_L4_VALID | CSUM_DATA_VALID |
CSUM_PSEUDO_HDR)) == (CSUM_L3_VALID | CSUM_L4_VALID |
CSUM_DATA_VALID |
CSUM_PSEUDO_HDR)) {
if (droq->lro.lro_cnt) {
if (tcp_lro_rx(&droq->lro, mbuf, 0) == 0) {
droq->stats.rx_bytes_received += len;
droq->stats.rx_pkts_received++;
return;
}
}
}
if_input(ifp, mbuf);
droq->stats.rx_bytes_received += len;
droq->stats.rx_pkts_received++;
} else {
lio_recv_buffer_free(mbuf);
droq->stats.rx_dropped++;
}
}
/*
* \brief Setup input and output queues
* @param octeon_dev octeon device
* @param ifidx Interface Index
*
* Note: Queues are with respect to the octeon device. Thus
* an input queue is for egress packets, and output queues
* are for ingress packets.
*/
int
lio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
uint32_t num_iqs, uint32_t num_oqs)
{
struct lio_droq_ops droq_ops;
struct ifnet *ifp;
struct lio_droq *droq;
struct lio *lio;
static int cpu_id, cpu_id_modulus;
int num_tx_descs, q, q_no, retval = 0;
ifp = octeon_dev->props.ifp;
lio = if_getsoftc(ifp);
bzero(&droq_ops, sizeof(struct lio_droq_ops));
droq_ops.fptr = lio_push_packet;
droq_ops.farg = (void *)ifp;
cpu_id = 0;
cpu_id_modulus = mp_ncpus;
/* set up DROQs. */
for (q = 0; q < num_oqs; q++) {
q_no = lio->linfo.rxpciq[q].s.q_no;
lio_dev_dbg(octeon_dev, "lio_setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
q, q_no);
retval = lio_setup_droq(octeon_dev, q_no,
LIO_GET_NUM_RX_DESCS_NIC_IF_CFG(
lio_get_conf(octeon_dev),
lio->ifidx),
LIO_GET_NUM_RX_BUF_SIZE_NIC_IF_CFG(
lio_get_conf(octeon_dev),
lio->ifidx), NULL);
if (retval) {
lio_dev_err(octeon_dev, "%s : Runtime DROQ(RxQ) creation failed.\n",
__func__);
return (1);
}
droq = octeon_dev->droq[q_no];
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
cpu_id++;
if (cpu_id >= cpu_id_modulus)
cpu_id = 0;
lio_register_droq_ops(octeon_dev, q_no, &droq_ops);
}
/* set up IQs. */
for (q = 0; q < num_iqs; q++) {
num_tx_descs = LIO_GET_NUM_TX_DESCS_NIC_IF_CFG(
lio_get_conf(octeon_dev),
lio->ifidx);
retval = lio_setup_iq(octeon_dev, ifidx, q,
lio->linfo.txpciq[q], num_tx_descs);
if (retval) {
lio_dev_err(octeon_dev, " %s : Runtime IQ(TxQ) creation failed.\n",
__func__);
return (1);
}
}
return (0);
}
/*
* \brief Droq packet processor sceduler
* @param oct octeon device
*/
static void
lio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
struct lio_droq *droq;
uint64_t oq_no;
if (oct->int_status & LIO_DEV_INTR_PKT_DATA) {
for (oq_no = 0; oq_no < LIO_MAX_OUTPUT_QUEUES(oct); oq_no++) {
if (!(oct->io_qmask.oq & BIT_ULL(oq_no)))
continue;
droq = oct->droq[oq_no];
taskqueue_enqueue(droq->droq_taskqueue,
&droq->droq_task);
}
}
}
static void
lio_msix_intr_handler(void *vector)
{
struct lio_ioq_vector *ioq_vector = (struct lio_ioq_vector *)vector;
struct octeon_device *oct = ioq_vector->oct_dev;
struct lio_droq *droq = oct->droq[ioq_vector->droq_index];
uint64_t ret;
ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
if ((ret & LIO_MSIX_PO_INT) || (ret & LIO_MSIX_PI_INT)) {
struct lio_instr_queue *iq = oct->instr_queue[droq->q_no];
int reschedule, tx_done = 1;
reschedule = lio_droq_process_packets(oct, droq, oct->rx_budget);
if (atomic_load_acq_int(&iq->instr_pending))
tx_done = lio_flush_iq(oct, iq, oct->tx_budget);
if ((oct->props.ifp != NULL) && (iq->br != NULL)) {
if (mtx_trylock(&iq->enq_lock)) {
if (!drbr_empty(oct->props.ifp, iq->br))
lio_mq_start_locked(oct->props.ifp,
iq);
mtx_unlock(&iq->enq_lock);
}
}
if (reschedule || !tx_done)
taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task);
else
lio_enable_irq(droq, iq);
}
}
static void
lio_intr_handler(void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
/* Disable our interrupts for the duration of ISR */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
oct->fn_list.process_interrupt_regs(oct);
lio_schedule_droq_pkt_handlers(oct);
/* Re-enable our interrupts */
if (!(atomic_load_acq_int(&oct->status) == LIO_DEV_IN_RESET))
oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
}
int
lio_setup_interrupt(struct octeon_device *oct, uint32_t num_ioqs)
{
device_t device;
struct lio_ioq_vector *ioq_vector;
int cpu_id, err, i;
int num_alloc_ioq_vectors;
int num_ioq_vectors;
int res_id;
if (!oct->msix_on)
return (1);
ioq_vector = oct->ioq_vector;
#ifdef RSS
if (oct->sriov_info.num_pf_rings != rss_getnumbuckets()) {
lio_dev_info(oct, "IOQ vectors (%d) are not equal number of RSS buckets (%d)\n",
oct->sriov_info.num_pf_rings, rss_getnumbuckets());
}
#endif
device = oct->device;
oct->num_msix_irqs = num_ioqs;
/* one non ioq interrupt for handling sli_mac_pf_int_sum */
oct->num_msix_irqs += 1;
num_alloc_ioq_vectors = oct->num_msix_irqs;
if (pci_alloc_msix(device, &num_alloc_ioq_vectors) ||
(num_alloc_ioq_vectors != oct->num_msix_irqs))
goto err;
num_ioq_vectors = oct->num_msix_irqs;
/* For PF, there is one non-ioq interrupt handler */
for (i = 0; i < num_ioq_vectors - 1; i++, ioq_vector++) {
res_id = i + 1;
ioq_vector->msix_res =
bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
RF_SHAREABLE | RF_ACTIVE);
if (ioq_vector->msix_res == NULL) {
lio_dev_err(oct,
"Unable to allocate bus res msix[%d]\n", i);
goto err_1;
}
err = bus_setup_intr(device, ioq_vector->msix_res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
lio_msix_intr_handler, ioq_vector,
&ioq_vector->tag);
if (err) {
bus_release_resource(device, SYS_RES_IRQ, res_id,
ioq_vector->msix_res);
ioq_vector->msix_res = NULL;
lio_dev_err(oct, "Failed to register intr handler");
goto err_1;
}
bus_describe_intr(device, ioq_vector->msix_res, ioq_vector->tag,
"rxtx%u", i);
ioq_vector->vector = res_id;
#ifdef RSS
cpu_id = rss_getcpu(i % rss_getnumbuckets());
#else
cpu_id = i % mp_ncpus;
#endif
CPU_SETOF(cpu_id, &ioq_vector->affinity_mask);
/* Setting the IRQ affinity. */
err = bus_bind_intr(device, ioq_vector->msix_res, cpu_id);
if (err)
lio_dev_err(oct, "bus bind interrupt fail");
#ifdef RSS
lio_dev_dbg(oct, "Bound RSS bucket %d to CPU %d\n", i, cpu_id);
#else
lio_dev_dbg(oct, "Bound Queue %d to CPU %d\n", i, cpu_id);
#endif
}
lio_dev_dbg(oct, "MSI-X enabled\n");
res_id = num_ioq_vectors;
oct->msix_res = bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
RF_SHAREABLE | RF_ACTIVE);
if (oct->msix_res == NULL) {
lio_dev_err(oct, "Unable to allocate bus res msix for non-ioq interrupt\n");
goto err_1;
}
err = bus_setup_intr(device, oct->msix_res, INTR_TYPE_NET | INTR_MPSAFE,
NULL, lio_intr_handler, oct, &oct->tag);
if (err) {
bus_release_resource(device, SYS_RES_IRQ, res_id,
oct->msix_res);
oct->msix_res = NULL;
lio_dev_err(oct, "Failed to register intr handler");
goto err_1;
}
bus_describe_intr(device, oct->msix_res, oct->tag, "aux");
oct->aux_vector = res_id;
return (0);
err_1:
if (oct->tag != NULL) {
bus_teardown_intr(device, oct->msix_res, oct->tag);
oct->tag = NULL;
}
while (i) {
i--;
ioq_vector--;
if (ioq_vector->tag != NULL) {
bus_teardown_intr(device, ioq_vector->msix_res,
ioq_vector->tag);
ioq_vector->tag = NULL;
}
if (ioq_vector->msix_res != NULL) {
bus_release_resource(device, SYS_RES_IRQ,
ioq_vector->vector,
ioq_vector->msix_res);
ioq_vector->msix_res = NULL;
}
}
if (oct->msix_res != NULL) {
bus_release_resource(device, SYS_RES_IRQ, oct->aux_vector,
oct->msix_res);
oct->msix_res = NULL;
}
err:
pci_release_msi(device);
lio_dev_err(oct, "MSI-X disabled\n");
return (1);
}

View File

@ -0,0 +1,553 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_network.h"
#include "lio_ctrl.h"
#include "cn23xx_pf_device.h"
#include "lio_image.h"
#include "lio_ioctl.h"
#include "lio_main.h"
#include "lio_rxtx.h"
static int lio_set_rx_csum(struct ifnet *ifp, uint32_t data);
static int lio_set_tso4(struct ifnet *ifp);
static int lio_set_tso6(struct ifnet *ifp);
static int lio_set_lro(struct ifnet *ifp);
static int lio_change_mtu(struct ifnet *ifp, int new_mtu);
static int lio_set_mcast_list(struct ifnet *ifp);
static inline enum lio_ifflags lio_get_new_flags(struct ifnet *ifp);
static inline bool
lio_is_valid_ether_addr(const uint8_t *addr)
{
return (!(0x01 & addr[0]) && !((addr[0] + addr[1] + addr[2] + addr[3] +
addr[4] + addr[5]) == 0x00));
}
static int
lio_change_dev_flags(struct ifnet *ifp)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
/* Create a ctrl pkt command to be sent to core app. */
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
nctrl.ncmd.s.param1 = lio_get_new_flags(ifp);
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
ret = lio_send_ctrl_pkt(oct, &nctrl);
if (ret)
lio_dev_err(oct, "Failed to change flags ret %d\n", ret);
return (ret);
}
/*
* lio_ioctl : User calls this routine for configuring
* the interface.
*
* return 0 on success, positive on failure
*/
int
lio_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct lio *lio = if_getsoftc(ifp);
struct ifreq *ifrequest = (struct ifreq *)data;
int error = 0;
switch (cmd) {
case SIOCSIFADDR:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFADDR\n");
if_setflagbits(ifp, IFF_UP, 0);
error = ether_ioctl(ifp, cmd, data);
break;
case SIOCSIFMTU:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMTU\n");
error = lio_change_mtu(ifp, ifrequest->ifr_mtu);
break;
case SIOCSIFFLAGS:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFFLAGS\n");
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ lio->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI))
error = lio_change_dev_flags(ifp);
} else {
if (!(atomic_load_acq_int(&lio->ifstate) &
LIO_IFSTATE_DETACH))
lio_open(lio);
}
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
lio_stop(ifp);
}
lio->if_flags = if_getflags(ifp);
break;
case SIOCADDMULTI:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCADDMULTI\n");
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
error = lio_set_mcast_list(ifp);
break;
case SIOCDELMULTI:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMULTI\n");
break;
case SIOCSIFMEDIA:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMEDIA\n");
case SIOCGIFMEDIA:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFMEDIA\n");
case SIOCGIFXMEDIA:
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFXMEDIA\n");
error = ifmedia_ioctl(ifp, ifrequest, &lio->ifmedia, cmd);
break;
case SIOCSIFCAP:
{
int features = ifrequest->ifr_reqcap ^
if_getcapenable(ifp);
lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFCAP (Set Capabilities)\n");
if (!features)
break;
if (features & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, (CSUM_TCP |
CSUM_UDP |
CSUM_IP), 0);
else
if_sethwassistbits(ifp, 0,
(CSUM_TCP | CSUM_UDP |
CSUM_IP));
}
if (features & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, (CSUM_UDP_IPV6 |
CSUM_TCP_IPV6), 0);
else
if_sethwassistbits(ifp, 0,
(CSUM_UDP_IPV6 |
CSUM_TCP_IPV6));
}
if (features & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
error |= lio_set_rx_csum(ifp, (features &
(IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6)));
if (features & IFCAP_TSO4)
error |= lio_set_tso4(ifp);
if (features & IFCAP_TSO6)
error |= lio_set_tso6(ifp);
if (features & IFCAP_LRO)
error |= lio_set_lro(ifp);
if (features & IFCAP_VLAN_HWTAGGING)
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (features & IFCAP_VLAN_HWFILTER)
if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
if (features & IFCAP_VLAN_HWTSO)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
VLAN_CAPABILITIES(ifp);
break;
}
default:
lio_dev_dbg(lio->oct_dev, "ioctl: UNKNOWN (0x%X)\n", (int)cmd);
error = ether_ioctl(ifp, cmd, data);
break;
}
return (error);
}
static int
lio_set_tso4(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
if (if_getcapabilities(ifp) & IFCAP_TSO4) {
if_togglecapenable(ifp, IFCAP_TSO4);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
} else {
lio_dev_info(lio->oct_dev, "TSO4 capability not supported\n");
return (EINVAL);
}
return (0);
}
static int
lio_set_tso6(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
if (if_getcapabilities(ifp) & IFCAP_TSO6) {
if_togglecapenable(ifp, IFCAP_TSO6);
if (if_getcapenable(ifp) & IFCAP_TSO6)
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
else
if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
} else {
lio_dev_info(lio->oct_dev, "TSO6 capability not supported\n");
return (EINVAL);
}
return (0);
}
static int
lio_set_rx_csum(struct ifnet *ifp, uint32_t data)
{
struct lio *lio = if_getsoftc(ifp);
int ret = 0;
if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
if_togglecapenable(ifp, (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6));
if (data) {
/* LRO requires RXCSUM */
if ((if_getcapabilities(ifp) & IFCAP_LRO) &&
(if_getcapenable(ifp) & IFCAP_LRO)) {
ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE,
LIO_LROIPV4 |
LIO_LROIPV6);
if_togglecapenable(ifp, IFCAP_LRO);
}
}
} else {
lio_dev_info(lio->oct_dev, "Rx checksum offload capability not supported\n");
return (ENODEV);
}
return ((ret) ? EINVAL : 0);
}
static int
lio_set_lro(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
int ret = 0;
if (!(if_getcapabilities(ifp) & IFCAP_LRO)) {
lio_dev_info(lio->oct_dev, "LRO capability not supported\n");
return (ENODEV);
}
if ((!(if_getcapenable(ifp) & IFCAP_LRO)) &&
(if_getcapenable(ifp) & IFCAP_RXCSUM) &&
(if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) {
if_togglecapenable(ifp, IFCAP_LRO);
if (lio_hwlro)
ret = lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, LIO_LROIPV4 |
LIO_LROIPV6);
} else if (if_getcapenable(ifp) & IFCAP_LRO) {
if_togglecapenable(ifp, IFCAP_LRO);
if (lio_hwlro)
ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE, LIO_LROIPV4 |
LIO_LROIPV6);
} else
lio_dev_info(lio->oct_dev, "LRO requires RXCSUM");
return ((ret) ? EINVAL : 0);
}
static void
lio_mtu_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
{
struct lio_soft_command *sc = buf;
volatile int *mtu_sc_ctx;
mtu_sc_ctx = sc->ctxptr;
if (status) {
lio_dev_err(oct, "MTU updation ctl instruction failed. Status: %llx\n",
LIO_CAST64(status));
*mtu_sc_ctx = -1;
/*
* This barrier is required to be sure that the
* response has been written fully.
*/
wmb();
return;
}
*mtu_sc_ctx = 1;
/*
* This barrier is required to be sure that the response has been
* written fully.
*/
wmb();
}
/* @param ifp is network device */
static int
lio_change_mtu(struct ifnet *ifp, int new_mtu)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
struct lio_soft_command *sc;
union octeon_cmd *ncmd;
volatile int *mtu_sc_ctx;
int retval = 0;
if (lio->mtu == new_mtu)
return (0);
/*
* Limit the MTU to make sure the ethernet packets are between
* LIO_MIN_MTU_SIZE bytes and LIO_MAX_MTU_SIZE bytes
*/
if ((new_mtu < LIO_MIN_MTU_SIZE) || (new_mtu > LIO_MAX_MTU_SIZE)) {
lio_dev_err(oct, "Invalid MTU: %d\n", new_mtu);
lio_dev_err(oct, "Valid range %d and %d\n",
LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
return (EINVAL);
}
sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16,
sizeof(*mtu_sc_ctx));
if (sc == NULL)
return (ENOMEM);
ncmd = (union octeon_cmd *)sc->virtdptr;
mtu_sc_ctx = sc->ctxptr;
*mtu_sc_ctx = 0;
ncmd->cmd64 = 0;
ncmd->s.cmd = LIO_CMD_CHANGE_MTU;
ncmd->s.param1 = new_mtu;
lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC,
LIO_OPCODE_NIC_CMD, 0, 0, 0);
sc->callback = lio_mtu_ctl_callback;
sc->callback_arg = sc;
sc->wait_time = 5000;
retval = lio_send_soft_command(oct, sc);
if (retval == LIO_IQ_SEND_FAILED) {
lio_dev_info(oct,
"Failed to send MTU update Control message\n");
retval = EBUSY;
goto mtu_updation_failed;
}
/*
* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
lio_sleep_cond(oct, mtu_sc_ctx);
if (*mtu_sc_ctx < 0) {
retval = EBUSY;
goto mtu_updation_failed;
}
lio_dev_info(oct, "MTU Changed from %d to %d\n", if_getmtu(ifp),
new_mtu);
if_setmtu(ifp, new_mtu);
lio->mtu = new_mtu;
retval = 0; /*
* this updation is make sure that LIO_IQ_SEND_STOP case
* also success
*/
mtu_updation_failed:
lio_free_soft_command(oct, sc);
return (retval);
}
/* @param ifp network device */
int
lio_set_mac(struct ifnet *ifp, uint8_t *p)
{
struct lio_ctrl_pkt nctrl;
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
int ret = 0;
if (!lio_is_valid_ether_addr(p))
return (EADDRNOTAVAIL);
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_MACADDR;
nctrl.ncmd.s.param1 = 0;
nctrl.ncmd.s.more = 1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
nctrl.wait_time = 100;
nctrl.udd[0] = 0;
/* The MAC Address is presented in network byte order. */
memcpy((uint8_t *)&nctrl.udd[0] + 2, p, ETHER_HDR_LEN);
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct, "MAC Address change failed\n");
return (ENOMEM);
}
memcpy(((uint8_t *)&lio->linfo.hw_addr) + 2, p, ETHER_HDR_LEN);
return (0);
}
/*
* \brief Converts a mask based on ifp flags
* @param ifp network device
*
* This routine generates a lio_ifflags mask from the ifp flags
* received from the OS.
*/
static inline enum lio_ifflags
lio_get_new_flags(struct ifnet *ifp)
{
enum lio_ifflags f = LIO_IFFLAG_UNICAST;
if (if_getflags(ifp) & IFF_PROMISC)
f |= LIO_IFFLAG_PROMISC;
if (if_getflags(ifp) & IFF_ALLMULTI)
f |= LIO_IFFLAG_ALLMULTI;
if (if_getflags(ifp) & IFF_MULTICAST) {
f |= LIO_IFFLAG_MULTICAST;
/*
* Accept all multicast addresses if there are more than we
* can handle
*/
if (if_getamcount(ifp) > LIO_MAX_MULTICAST_ADDR)
f |= LIO_IFFLAG_ALLMULTI;
}
if (if_getflags(ifp) & IFF_BROADCAST)
f |= LIO_IFFLAG_BROADCAST;
return (f);
}
/* @param ifp network device */
static int
lio_set_mcast_list(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
struct lio_ctrl_pkt nctrl;
struct ifmultiaddr *ifma;
uint64_t *mc;
int mc_count = 0;
int ret;
bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
/* Create a ctrl pkt command to be sent to core app. */
nctrl.ncmd.cmd64 = 0;
nctrl.ncmd.s.cmd = LIO_CMD_SET_MULTI_LIST;
nctrl.ncmd.s.param1 = lio_get_new_flags(ifp);
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.lio = lio;
nctrl.cb_fn = lio_ctrl_cmd_completion;
/* copy all the addresses into the udd */
mc = &nctrl.udd[0];
/* to protect access to if_multiaddrs */
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
*mc = 0;
memcpy(((uint8_t *)mc) + 2,
LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
ETHER_ADDR_LEN);
/* no need to swap bytes */
mc_count++;
if (++mc > &nctrl.udd[LIO_MAX_MULTICAST_ADDR])
break;
}
if_maddr_runlock(ifp);
/*
* Apparently, any activity in this call from the kernel has to
* be atomic. So we won't wait for response.
*/
nctrl.wait_time = 0;
nctrl.ncmd.s.param2 = mc_count;
nctrl.ncmd.s.more = mc_count;
ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
lio_dev_err(oct, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
}
return ((ret) ? EINVAL : 0);
}

View File

@ -0,0 +1,44 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _LIO_IOCTL_H_
#define _LIO_IOCTL_H_
int lio_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
void lio_add_hw_stats(struct lio *lio);
void lio_stop(struct ifnet *ifp);
void lio_open(void *arg);
int lio_set_mac(struct ifnet *ifp, uint8_t *p);
#endif /* _LIO_IOCTL_H_ */

2309
sys/dev/liquidio/lio_main.c Normal file

File diff suppressed because it is too large Load Diff

145
sys/dev/liquidio/lio_main.h Normal file
View File

@ -0,0 +1,145 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/*
* \file lio_main.h
* \brief Host Driver: This file is included by all host driver source files
* to include common definitions.
*/
#ifndef _LIO_MAIN_H_
#define _LIO_MAIN_H_
extern unsigned int lio_hwlro;
#ifdef __LP64__
#define LIO_CAST64(v) ((long long)(long)(v))
#else
#error "Unknown system architecture"
#endif
#define LIO_DRV_NAME "lio"
/** Swap 8B blocks */
static inline void
lio_swap_8B_data(uint64_t *data, uint32_t blocks)
{
while (blocks) {
*data = htobe64(*data);
blocks--;
data++;
}
}
/*
* \brief unmaps a PCI BAR
* @param oct Pointer to Octeon device
* @param baridx bar index
*/
static inline void
lio_unmap_pci_barx(struct octeon_device *oct, int baridx)
{
lio_dev_dbg(oct, "Freeing PCI mapped regions for Bar%d\n", baridx);
if (oct->mem_bus_space[baridx].pci_mem != NULL) {
bus_release_resource(oct->device, SYS_RES_MEMORY,
PCIR_BAR(baridx * 2),
oct->mem_bus_space[baridx].pci_mem);
oct->mem_bus_space[baridx].pci_mem = NULL;
}
}
/*
* \brief maps a PCI BAR
* @param oct Pointer to Octeon device
* @param baridx bar index
*/
static inline int
lio_map_pci_barx(struct octeon_device *oct, int baridx)
{
int rid = PCIR_BAR(baridx * 2);
oct->mem_bus_space[baridx].pci_mem =
bus_alloc_resource_any(oct->device, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (oct->mem_bus_space[baridx].pci_mem == NULL) {
lio_dev_err(oct, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
/* Save bus_space values for READ/WRITE_REG macros */
oct->mem_bus_space[baridx].tag =
rman_get_bustag(oct->mem_bus_space[baridx].pci_mem);
oct->mem_bus_space[baridx].handle =
rman_get_bushandle(oct->mem_bus_space[baridx].pci_mem);
lio_dev_dbg(oct, "BAR%d Tag 0x%lx Handle 0x%lx\n",
baridx, oct->mem_bus_space[baridx].tag,
oct->mem_bus_space[baridx].handle);
return (0);
}
static inline void
lio_sleep_cond(struct octeon_device *oct, volatile int *condition)
{
while (!(*condition)) {
lio_mdelay(1);
lio_flush_iq(oct, oct->instr_queue[0], 0);
lio_process_ordered_list(oct, 0);
}
}
int lio_console_debug_enabled(uint32_t console);
#ifndef ROUNDUP4
#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
#endif
#ifndef ROUNDUP8
#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
#endif
#define BIT_ULL(nr) (1ULL << (nr))
void lio_free_mbuf(struct lio_instr_queue *iq,
struct lio_mbuf_free_info *finfo);
void lio_free_sgmbuf(struct lio_instr_queue *iq,
struct lio_mbuf_free_info *finfo);
#endif /* _LIO_MAIN_H_ */

View File

@ -0,0 +1,293 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/* \file lio_network.h
* \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
*/
#ifndef __LIO_NETWORK_H__
#define __LIO_NETWORK_H__
#include "lio_rss.h"
#define LIO_MIN_MTU_SIZE 72
#define LIO_MAX_MTU_SIZE (LIO_MAX_FRM_SIZE - LIO_FRM_HEADER_SIZE)
#define LIO_MAX_SG 64
#define LIO_MAX_FRAME_SIZE 60000
struct lio_fw_stats_resp {
uint64_t rh;
struct octeon_link_stats stats;
uint64_t status;
};
/* LiquidIO per-interface network private data */
struct lio {
/* State of the interface. Rx/Tx happens only in the RUNNING state. */
int ifstate;
/*
* Octeon Interface index number. This device will be represented as
* oct<ifidx> in the system.
*/
int ifidx;
/* Octeon Input queue to use to transmit for this network interface. */
int txq;
/*
* Octeon Output queue from which pkts arrive
* for this network interface.
*/
int rxq;
/* Guards each glist */
struct mtx *glist_lock;
#define LIO_DEFAULT_STATS_INTERVAL 10000
/* callout timer for stats */
struct callout stats_timer;
/* Stats Update Interval in milli Seconds */
uint16_t stats_interval;
/* IRQ coalescing driver stats */
struct octeon_intrmod_cfg intrmod_cfg;
/* Array of gather component linked lists */
struct lio_stailq_head *ghead;
void **glists_virt_base;
vm_paddr_t *glists_dma_base;
uint32_t glist_entry_size;
/* Pointer to the octeon device structure. */
struct octeon_device *oct_dev;
struct ifnet *ifp;
struct ifmedia ifmedia;
int if_flags;
/* Link information sent by the core application for this interface. */
struct octeon_link_info linfo;
/* counter of link changes */
uint64_t link_changes;
/* Size of Tx queue for this octeon device. */
uint32_t tx_qsize;
/* Size of Rx queue for this octeon device. */
uint32_t rx_qsize;
/* Size of MTU this octeon device. */
uint32_t mtu;
/* msg level flag per interface. */
uint32_t msg_enable;
/* Interface info */
uint32_t intf_open;
/* task queue for rx oom status */
struct lio_tq rx_status_tq;
/* VLAN Filtering related */
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
#ifdef RSS
struct lio_rss_params_set rss_set;
#endif /* RSS */
};
#define LIO_MAX_CORES 12
/*
* \brief Enable or disable feature
* @param ifp pointer to network device
* @param cmd Command that just requires acknowledgment
* @param param1 Parameter to command
*/
int lio_set_feature(struct ifnet *ifp, int cmd, uint16_t param1);
/*
* \brief Link control command completion callback
* @param nctrl_ptr pointer to control packet structure
*
* This routine is called by the callback function when a ctrl pkt sent to
* core app completes. The nctrl_ptr contains a copy of the command type
* and data sent to the core app. This routine is only called if the ctrl
* pkt was sent successfully to the core app.
*/
void lio_ctrl_cmd_completion(void *nctrl_ptr);
int lio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
uint32_t num_iqs, uint32_t num_oqs);
int lio_setup_interrupt(struct octeon_device *oct, uint32_t num_ioqs);
static inline void *
lio_recv_buffer_alloc(uint32_t size)
{
struct mbuf *mb = NULL;
mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
if (mb != NULL)
mb->m_pkthdr.len = mb->m_len = size;
return ((void *)mb);
}
static inline void
lio_recv_buffer_free(void *buffer)
{
m_freem((struct mbuf *)buffer);
}
static inline int
lio_get_order(unsigned long size)
{
int order;
size = (size - 1) >> PAGE_SHIFT;
order = 0;
while (size) {
order++;
size >>= 1;
}
return (order);
}
static inline void *
lio_dma_alloc(size_t size, vm_paddr_t *dma_handle)
{
size_t align;
void *mem;
align = PAGE_SIZE << lio_get_order(size);
mem = (void *)kmem_alloc_contig(kmem_arena, size, M_WAITOK, 0, ~0ul,
align, 0, VM_MEMATTR_DEFAULT);
if (mem != NULL)
*dma_handle = vtophys(mem);
else
*dma_handle = 0;
return (mem);
}
static inline void
lio_dma_free(size_t size, void *cpu_addr)
{
kmem_free(kmem_arena, (vm_offset_t)cpu_addr, size);
}
static inline uint64_t
lio_map_ring(device_t dev, void *buf, uint32_t size)
{
vm_paddr_t dma_addr;
dma_addr = vtophys(((struct mbuf *)buf)->m_data);
return ((uint64_t)dma_addr);
}
/*
* \brief check interface state
* @param lio per-network private data
* @param state_flag flag state to check
*/
static inline int
lio_ifstate_check(struct lio *lio, int state_flag)
{
return (atomic_load_acq_int(&lio->ifstate) & state_flag);
}
/*
* \brief set interface state
* @param lio per-network private data
* @param state_flag flag state to set
*/
static inline void
lio_ifstate_set(struct lio *lio, int state_flag)
{
atomic_store_rel_int(&lio->ifstate,
(atomic_load_acq_int(&lio->ifstate) | state_flag));
}
/*
* \brief clear interface state
* @param lio per-network private data
* @param state_flag flag state to clear
*/
static inline void
lio_ifstate_reset(struct lio *lio, int state_flag)
{
atomic_store_rel_int(&lio->ifstate,
(atomic_load_acq_int(&lio->ifstate) &
~(state_flag)));
}
/*
* \brief wait for all pending requests to complete
* @param oct Pointer to Octeon device
*
* Called during shutdown sequence
*/
static inline int
lio_wait_for_pending_requests(struct octeon_device *oct)
{
int i, pcount = 0;
for (i = 0; i < 100; i++) {
pcount = atomic_load_acq_int(
&oct->response_list[LIO_ORDERED_SC_LIST].
pending_req_count);
if (pcount)
lio_sleep_timeout(100);
else
break;
}
if (pcount)
return (1);
return (0);
}
#endif /* __LIO_NETWORK_H__ */

173
sys/dev/liquidio/lio_rss.c Normal file
View File

@ -0,0 +1,173 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifdef RSS
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_ctrl.h"
#include "lio_main.h"
#include "lio_network.h"
#include "lio_rss.h"
int lio_send_rss_param(struct lio *lio);
static void
lio_set_rss_callback(struct octeon_device *oct, uint32_t status, void *arg)
{
struct lio_soft_command *sc = (struct lio_soft_command *)arg;
if (status)
lio_dev_err(oct, "Failed to SET RSS params\n");
else
lio_dev_info(oct, "SET RSS params\n");
lio_free_soft_command(oct, sc);
}
static void
lio_set_rss_info(struct lio *lio)
{
struct octeon_device *oct = lio->oct_dev;
struct lio_rss_params_set *rss_set = &lio->rss_set;
uint32_t rss_hash_config;
int i;
uint8_t queue_id;
for (i = 0; i < LIO_RSS_TABLE_SZ; i++) {
queue_id = rss_get_indirection_to_bucket(i);
queue_id = queue_id % oct->num_oqs;
rss_set->fw_itable[i] = queue_id;
}
rss_hash_config = rss_gethashconfig();
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
rss_set->hashinfo |= LIO_RSS_HASH_IPV4;
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
rss_set->hashinfo |= LIO_RSS_HASH_TCP_IPV4;
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
rss_set->hashinfo |= LIO_RSS_HASH_IPV6;
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
rss_set->hashinfo |= LIO_RSS_HASH_TCP_IPV6;
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
rss_set->hashinfo |= LIO_RSS_HASH_IPV6_EX;
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
rss_set->hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
}
int
lio_send_rss_param(struct lio *lio)
{
struct octeon_device *oct = lio->oct_dev;
struct lio_soft_command *sc = NULL;
union octeon_cmd *cmd = NULL;
struct lio_rss_params *rss_param;
int i, retval;
sc = lio_alloc_soft_command(oct,
OCTEON_CMD_SIZE + sizeof(struct lio_rss_params), 0, 0);
if (sc == NULL) {
lio_dev_err(oct, "%s: Soft command allocation failed\n",
__func__);
return (ENOMEM);
}
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
0, 0);
sc->callback = lio_set_rss_callback;
sc->callback_arg = sc;
sc->wait_time = 1000;
cmd = (union octeon_cmd *)sc->virtdptr;
cmd->cmd64 = 0;
cmd->s.cmd = LIO_CMD_SET_RSS;
rss_param = (struct lio_rss_params *)(cmd + 1);
rss_param->param.flags = 0;
rss_param->param.itablesize = LIO_RSS_TABLE_SZ;
rss_param->param.hashkeysize = LIO_RSS_KEY_SZ;
lio_set_rss_info(lio);
rss_param->param.hashinfo = lio->rss_set.hashinfo;
memcpy(rss_param->itable, (void *)lio->rss_set.fw_itable,
(size_t)rss_param->param.itablesize);
lio_dev_info(oct, "RSS itable: Size %u\n", rss_param->param.itablesize);
for (i = 0; i < rss_param->param.itablesize; i += 8) {
lio_dev_dbg(oct, " %03u:%2u, %03u:%2u, %03u:%2u, %03u:%2u, %03u:%2u, %03u:%2u, %03u:%2u, %03u:%2u\n",
i + 0, rss_param->itable[i + 0],
i + 1, rss_param->itable[i + 1],
i + 2, rss_param->itable[i + 2],
i + 3, rss_param->itable[i + 3],
i + 4, rss_param->itable[i + 4],
i + 5, rss_param->itable[i + 5],
i + 6, rss_param->itable[i + 6],
i + 7, rss_param->itable[i + 7]);
}
rss_getkey(lio->rss_set.key);
memcpy(rss_param->key, (void *)lio->rss_set.key,
(size_t)rss_param->param.hashkeysize);
/* swap cmd and rss params */
lio_swap_8B_data((uint64_t *)cmd,
((OCTEON_CMD_SIZE + LIO_RSS_PARAM_SIZE) >> 3));
retval = lio_send_soft_command(oct, sc);
if (retval == LIO_IQ_SEND_FAILED) {
lio_dev_err(oct,
"%s: Sending soft command failed, status: %x\n",
__func__, retval);
lio_free_soft_command(oct, sc);
return (-1);
}
return (0);
}
#endif /* RSS */

View File

@ -0,0 +1,89 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef __LIO_RSS_H__
#define __LIO_RSS_H__
#ifdef RSS
#include <net/rss_config.h>
#include <netinet/in_rss.h>
#define LIO_RSS_TABLE_SZ 128
#define LIO_RSS_KEY_SZ 40
struct lio_rss_params {
#define LIO_RSS_PARAM_SIZE 16
struct param {
#if BYTE_ORDER == LITTLE_ENDIAN
uint64_t flags:16;
uint64_t hashinfo:32;
uint64_t itablesize:16;
uint64_t hashkeysize:16;
uint64_t reserved:48;
#elif BYTE_ORDER == BIG_ENDIAN
uint64_t itablesize:16;
uint64_t hashinfo:32;
uint64_t flags:16;
uint64_t reserved:48;
uint64_t hashkeysize:16;
#else
#error Undefined BYTE_ORDER
#endif
} param;
uint8_t itable[LIO_RSS_TABLE_SZ];
uint8_t key[LIO_RSS_KEY_SZ];
};
struct lio_rss_params_set {
uint8_t key[LIO_RSS_KEY_SZ];
uint8_t fw_itable[LIO_RSS_TABLE_SZ];
uint64_t hashinfo;
};
#endif /* RSS */
#define LIO_RSS_HASH_IPV4 0x100
#define LIO_RSS_HASH_TCP_IPV4 0x200
#define LIO_RSS_HASH_IPV6 0x400
#define LIO_RSS_HASH_IPV6_EX 0x800
#define LIO_RSS_HASH_TCP_IPV6 0x1000
#define LIO_RSS_HASH_TCP_IPV6_EX 0x2000
#endif /* __LIO_RSS_H__ */

325
sys/dev/liquidio/lio_rxtx.c Normal file
View File

@ -0,0 +1,325 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "lio_bsd.h"
#include "lio_common.h"
#include "lio_droq.h"
#include "lio_iq.h"
#include "lio_response_manager.h"
#include "lio_device.h"
#include "lio_ctrl.h"
#include "lio_main.h"
#include "lio_network.h"
#include "lio_rxtx.h"
int
lio_xmit(struct lio *lio, struct lio_instr_queue *iq,
struct mbuf **m_headp)
{
struct lio_data_pkt ndata;
union lio_cmd_setup cmdsetup;
struct lio_mbuf_free_info *finfo = NULL;
struct octeon_device *oct = iq->oct_dev;
struct lio_iq_stats *stats;
struct octeon_instr_irh *irh;
struct lio_request_list *tx_buf;
union lio_tx_info *tx_info;
struct mbuf *m_head;
bus_dma_segment_t segs[LIO_MAX_SG];
bus_dmamap_t map;
uint64_t dptr = 0;
uint32_t tag = 0;
int iq_no = 0;
int nsegs;
int status = 0;
iq_no = iq->txpciq.s.q_no;
tag = iq_no;
stats = &oct->instr_queue[iq_no]->stats;
tx_buf = iq->request_list + iq->host_write_index;
/*
* Check for all conditions in which the current packet cannot be
* transmitted.
*/
if (!(atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
(!lio->linfo.link.s.link_up)) {
lio_dev_info(oct, "Transmit failed link_status : %d\n",
lio->linfo.link.s.link_up);
status = ENETDOWN;
goto drop_packet;
}
if (lio_iq_is_full(oct, iq_no)) {
/* Defer sending if queue is full */
lio_dev_dbg(oct, "Transmit failed iq:%d full\n", iq_no);
stats->tx_iq_busy++;
return (ENOBUFS);
}
map = tx_buf->map;
status = bus_dmamap_load_mbuf_sg(iq->txtag, map, *m_headp, segs, &nsegs,
BUS_DMA_NOWAIT);
if (status == EFBIG) {
struct mbuf *m;
m = m_defrag(*m_headp, M_NOWAIT);
if (m == NULL) {
stats->mbuf_defrag_failed++;
goto drop_packet;
}
*m_headp = m;
status = bus_dmamap_load_mbuf_sg(iq->txtag, map,
*m_headp, segs, &nsegs,
BUS_DMA_NOWAIT);
}
if (status == ENOMEM) {
goto retry;
} else if (status) {
stats->tx_dmamap_fail++;
lio_dev_dbg(oct, "bus_dmamap_load_mbuf_sg failed with error %d. iq:%d",
status, iq_no);
goto drop_packet;
}
m_head = *m_headp;
/* Info used to unmap and free the buffers. */
finfo = &tx_buf->finfo;
finfo->map = map;
finfo->mb = m_head;
/* Prepare the attributes for the data to be passed to OSI. */
bzero(&ndata, sizeof(struct lio_data_pkt));
ndata.buf = (void *)finfo;
ndata.q_no = iq_no;
ndata.datasize = m_head->m_pkthdr.len;
cmdsetup.cmd_setup64 = 0;
cmdsetup.s.iq_no = iq_no;
if (m_head->m_pkthdr.csum_flags & CSUM_IP)
cmdsetup.s.ip_csum = 1;
if ((m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) ||
(m_head->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)))
cmdsetup.s.transport_csum = 1;
if (nsegs == 1) {
cmdsetup.s.u.datasize = segs[0].ds_len;
lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
dptr = segs[0].ds_addr;
ndata.cmd.cmd3.dptr = dptr;
ndata.reqtype = LIO_REQTYPE_NORESP_NET;
} else {
struct lio_gather *g;
int i;
mtx_lock(&lio->glist_lock[iq_no]);
g = (struct lio_gather *)
lio_delete_first_node(&lio->ghead[iq_no]);
mtx_unlock(&lio->glist_lock[iq_no]);
if (g == NULL) {
lio_dev_err(oct,
"Transmit scatter gather: glist null!\n");
goto retry;
}
cmdsetup.s.gather = 1;
cmdsetup.s.u.gatherptrs = nsegs;
lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
bzero(g->sg, g->sg_size);
i = 0;
while (nsegs--) {
g->sg[(i >> 2)].ptr[(i & 3)] = segs[i].ds_addr;
lio_add_sg_size(&g->sg[(i >> 2)], segs[i].ds_len,
(i & 3));
i++;
}
dptr = g->sg_dma_ptr;
ndata.cmd.cmd3.dptr = dptr;
finfo->g = g;
ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
}
irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
tx_info = (union lio_tx_info *)&ndata.cmd.cmd3.ossp[0];
if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
tx_info->s.gso_size = m_head->m_pkthdr.tso_segsz;
tx_info->s.gso_segs = howmany(m_head->m_pkthdr.len,
m_head->m_pkthdr.tso_segsz);
stats->tx_gso++;
}
/* HW insert VLAN tag */
if (m_head->m_flags & M_VLANTAG) {
irh->priority = m_head->m_pkthdr.ether_vtag >> 13;
irh->vlan = m_head->m_pkthdr.ether_vtag & 0xfff;
}
status = lio_send_data_pkt(oct, &ndata);
if (status == LIO_IQ_SEND_FAILED)
goto retry;
if (tx_info->s.gso_segs)
stats->tx_done += tx_info->s.gso_segs;
else
stats->tx_done++;
stats->tx_tot_bytes += ndata.datasize;
return (0);
retry:
return (ENOBUFS);
drop_packet:
stats->tx_dropped++;
lio_dev_err(oct, "IQ%d Transmit dropped: %lu\n", iq_no,
stats->tx_dropped);
m_freem(*m_headp);
*m_headp = NULL;
return (status);
}
int
lio_mq_start_locked(struct ifnet *ifp, struct lio_instr_queue *iq)
{
struct lio *lio = if_getsoftc(ifp);
struct mbuf *next;
int err = 0;
if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
(!lio->linfo.link.s.link_up))
return (-ENETDOWN);
/* Process the queue */
while ((next = drbr_peek(ifp, iq->br)) != NULL) {
err = lio_xmit(lio, iq, &next);
if (err) {
if (next == NULL)
drbr_advance(ifp, iq->br);
else
drbr_putback(ifp, iq->br, next);
break;
}
drbr_advance(ifp, iq->br);
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, next);
if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
(!lio->linfo.link.s.link_up))
break;
}
return (err);
}
int
lio_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
struct lio_instr_queue *iq;
int err = 0, i;
#ifdef RSS
uint32_t bucket_id;
#endif
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
#ifdef RSS
if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
&bucket_id) == 0) {
i = bucket_id % oct->num_iqs;
if (bucket_id > oct->num_iqs)
lio_dev_dbg(oct,
"bucket_id (%d) > num_iqs (%d)\n",
bucket_id, oct->num_iqs);
} else
#endif
i = m->m_pkthdr.flowid % oct->num_iqs;
} else
i = curcpu % oct->num_iqs;
iq = oct->instr_queue[i];
err = drbr_enqueue(ifp, iq->br, m);
if (err)
return (err);
if (mtx_trylock(&iq->enq_lock)) {
lio_mq_start_locked(ifp, iq);
mtx_unlock(&iq->enq_lock);
}
return (err);
}
void
lio_qflush(struct ifnet *ifp)
{
struct lio *lio = if_getsoftc(ifp);
struct octeon_device *oct = lio->oct_dev;
struct lio_instr_queue *iq;
struct mbuf *m;
int i;
for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
iq = oct->instr_queue[i];
mtx_lock(&iq->enq_lock);
while ((m = buf_ring_dequeue_sc(iq->br)) != NULL)
m_freem(m);
mtx_unlock(&iq->enq_lock);
}
if_qflush(ifp);
}

View File

@ -0,0 +1,86 @@
/*
* BSD LICENSE
*
* Copyright(c) 2017 Cavium, Inc.. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Cavium, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _LIO_RXTX_H_
#define _LIO_RXTX_H_
/* Bit mask values for lio->ifstate */
#define LIO_IFSTATE_DROQ_OPS 0x01
#define LIO_IFSTATE_REGISTERED 0x02
#define LIO_IFSTATE_RUNNING 0x04
#define LIO_IFSTATE_DETACH 0x08
#define LIO_IFSTATE_RESETTING 0x10
/*
* Structure of a node in list of gather components maintained by
* NIC driver for each network device.
*/
struct lio_gather {
/* List manipulation. Next and prev pointers. */
struct lio_stailq_node node;
/* Size of the gather component at sg in bytes. */
int sg_size;
/*
* Gather component that can accommodate max sized fragment list
* received from the IP layer.
*/
struct lio_sg_entry *sg;
uint64_t sg_dma_ptr;
};
union lio_tx_info {
uint64_t tx_info64;
struct {
#if _BYTE_ORDER == _BIG_ENDIAN
uint16_t gso_size;
uint16_t gso_segs;
uint32_t reserved;
#else /* _BYTE_ORDER == _LITTLE_ENDIAN */
uint32_t reserved;
uint16_t gso_segs;
uint16_t gso_size;
#endif
} s;
};
int lio_xmit(struct lio *lio, struct lio_instr_queue *iq,
struct mbuf **m_headp);
int lio_mq_start_locked(struct ifnet *ifp, struct lio_instr_queue *iq);
int lio_mq_start(struct ifnet *ifp, struct mbuf *m);
void lio_qflush(struct ifnet *ifp);
#endif /* _LIO_RXTX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -226,6 +226,7 @@ SUBDIR= \
${_linux_common} \
${_linux64} \
linuxkpi \
${_lio} \
lmc \
lpt \
mac_biba \
@ -710,6 +711,7 @@ _ixl= ixl
_ixlv= ixlv
_linux64= linux64
_linux_common= linux_common
_lio= lio
_ntb= ntb
_pms= pms
_qlxge= qlxge

27
sys/modules/lio/Makefile Normal file
View File

@ -0,0 +1,27 @@
#$FreeBSD$
#.PATH: ${.CURDIR}
.PATH: ${.CURDIR}/../../dev/liquidio
.PATH: ${.CURDIR}/../../dev/liquidio/base
KMOD = if_lio
CFLAGS += -I${.CURDIR}/../../dev/liquidio
CFLAGS += -I${.CURDIR}/../../dev/liquidio/base
CFLAGS += -DSMP
SRCS = device_if.h bus_if.h pci_if.h vnode_if.h opt_inet.h opt_inet6.h
SRCS += vnode_if_newproto.h vnode_if_typedef.h
SRCS += lio_main.c lio_sysctl.c lio_ioctl.c lio_core.c
SRCS += lio_rxtx.c lio_rss.c
SRCS += lio_device.c lio_ctrl.c
SRCS += lio_console.c cn23xx_pf_device.c
SRCS += lio_mem_ops.c lio_droq.c
SRCS += lio_response_manager.c lio_request_manager.c
FIRMWS = lio_23xx_nic.bin.fw:lio_23xx_nic.bin
CLEANFILES+= lio_23xx_nic.bin.fw
lio_23xx_nic.bin.fw: ${.CURDIR}/../../contrib/dev/liquidio/lio_23xx_nic.bin.uu
uudecode -p ${.CURDIR}/../../contrib/dev/liquidio/lio_23xx_nic.bin.uu > ${.TARGET}
.include <bsd.kmod.mk>