ice(4): Introduce new driver for Intel E800 Ethernet controllers

The ice(4) driver is the driver for the Intel E8xx series Ethernet
controllers; currently with codenames Columbiaville and
Columbia Park.

These new controllers support 100G speeds, as well as introducing
more queues, better virtualization support, and more offload
capabilities. Future work will enable virtual functions (like
in ixl(4)) and the other functionality outlined above.

For full functionality, the kernel should be compiled with
"device ice_ddp" like in the amd64 NOTES file, and/or
ice_ddp_load="YES" should be added to /boot/loader.conf so that
the DDP package file included in this commit can be downloaded
to the adapter. Otherwise, the adapter will fall back to a single
queue mode with limited functionality.

A man page for this driver will be forthcoming.

MFC after:	1 month
Relnotes:	yes
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D21959
This commit is contained in:
erj 2020-05-26 23:35:10 +00:00
parent 18da85dd2f
commit ba15208e99
62 changed files with 64254 additions and 0 deletions

View File

@ -99,10 +99,12 @@ sys/compat/linuxkpi hselasky If in doubt, ask.
#x11 phabricator group.
(to avoid drm graphics drivers
impact)
sys/contrib/dev/ice erj Pre-commit phabricator review requested.
sys/contrib/ipfilter cy Pre-commit review requested.
sys/dev/e1000 erj Pre-commit phabricator review requested.
sys/dev/ixgbe erj Pre-commit phabricator review requested.
sys/dev/ixl erj Pre-commit phabricator review requested.
sys/dev/ice erj Pre-commit phabricator review requested.
sys/dev/sound/usb hselasky If in doubt, ask.
sys/dev/usb hselasky If in doubt, ask.
sys/dev/xen royger Pre-commit review recommended.

View File

@ -245,6 +245,7 @@ device ix # Intel PRO/10GbE PCIE PF Ethernet
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
device ice # Intel 800 Series Physical Function
device vmx # VMware VMXNET3 Ethernet
# PCI Ethernet NICs.

View File

@ -291,6 +291,8 @@ device cpufreq
# bxe: Broadcom NetXtreme II (BCM5771X/BCM578XX) PCIe 10Gb Ethernet
# adapters.
# ice: Intel 800 Series Physical Function
# Requires the ice_ddp module for full functionality
# ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter
# Requires the ipw firmware module
# iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters
@ -316,6 +318,8 @@ device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs.
device iwn # Intel 4965/1000/5000/6000 wireless NICs.
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
device ice # Intel 800 Series Physical Function
device ice_ddp # Intel 800 Series DDP Package
device mthca # Mellanox HCA InfiniBand
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand

View File

@ -83,6 +83,8 @@ device vnic # Cavium ThunderX NIC
device al_eth # Annapurna Alpine Ethernet NIC
device dwc_rk # Rockchip Designware
device dwc_socfpga # Altera SOCFPGA Ethernet MAC
device ice # Intel 800 Series Physical Function
device ice_ddp # Intel 800 Series DDP Package
# Etherswitch devices
device e6000sw # Marvell mv88e6085 based switches

View File

@ -144,6 +144,52 @@ dev/agp/agp_via.c optional agp
dev/amdgpio/amdgpio.c optional amdgpio
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
dev/ice/if_ice_iflib.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_lib.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_osdep.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_resmgr.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_strings.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_iflib_recovery_txrx.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_iflib_txrx.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_common.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_controlq.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_dcb.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_flex_pipe.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_flow.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_nvm.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sched.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sriov.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
no-implicit-rule before-depend local \
clean "ice_ddp.c"
ice_ddp.fwo optional ice_ddp \
dependency "ice_ddp.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ice_ddp.fwo"
ice_ddp.fw optional ice_ddp \
dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
no-obj no-implicit-rule \
clean "ice_ddp.fw"
dev/ioat/ioat.c optional ioat pci
dev/ioat/ioat_test.c optional ioat pci
dev/ixl/if_ixl.c optional ixl pci \

View File

@ -240,6 +240,52 @@ dev/axgbe/xgbe-dev.c optional axgbe
dev/axgbe/xgbe-drv.c optional axgbe
dev/axgbe/xgbe-mdio.c optional axgbe
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/ice/if_ice_iflib.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_lib.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_osdep.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_resmgr.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_strings.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_iflib_recovery_txrx.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_iflib_txrx.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_common.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_controlq.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_dcb.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_flex_pipe.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_flow.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_nvm.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sched.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sriov.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
no-implicit-rule before-depend local \
clean "ice_ddp.c"
ice_ddp.fwo optional ice_ddp \
dependency "ice_ddp.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ice_ddp.fwo"
ice_ddp.fw optional ice_ddp \
dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
no-obj no-implicit-rule \
clean "ice_ddp.fw"
dev/iicbus/sy8106a.c optional sy8106a fdt
dev/iicbus/twsi/mv_twsi.c optional twsi fdt
dev/iicbus/twsi/a10_twsi.c optional twsi fdt

View File

@ -0,0 +1,41 @@
Copyright (c) 2006-2018, Intel Corporation.
All rights reserved.
Redistribution. Redistribution and use in binary form, without
modification, are permitted provided that the following conditions are
met:
* Redistributions must reproduce the above copyright notice and the
following disclaimer in the documentation and/or other materials
provided with the distribution.
* Neither the name of Intel Corporation nor the names of its suppliers
may be used to endorse or promote products derived from this software
without specific prior written permission.
* No reverse engineering, decompilation, or disassembly of this software
is permitted.
Limited patent license. Intel Corporation grants a world-wide,
royalty-free, non-exclusive license under patents it now or hereafter
owns or controls to make, have made, use, import, offer to sell and
sell ("Utilize") this software, but solely to the extent that any
such patent is necessary to Utilize the software alone, or in
combination with an operating system licensed under an approved Open
Source license as listed by the Open Source Initiative at
http://opensource.org/licenses. The patent license shall not apply to
any other combinations which include this software. No hardware per
se is licensed hereunder.
DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.

197
sys/contrib/dev/ice/README Normal file
View File

@ -0,0 +1,197 @@
Dynamic Device Personalization (DDP) Package
============================================
February 21, 2020
Contents
========
- Overview
- Safe Mode
- Notes
- Installation & Troubleshooting
- Legal
Overview
========
Adapters based on the Intel(R) Ethernet Controller 800 Series require a Dynamic
Device Personalization (DDP) package file to enable advanced features (such as
dynamic tunneling, Flow Director, RSS, and ADQ).
DDP allows you to change the packet processing pipeline of a device by applying
a profile package to the device at runtime. Profiles can be used to, for
example, add support for new protocols, change existing protocols, or change
default settings. DDP profiles can also be rolled back without rebooting the
system.
The DDP package loads during device initialization. The driver checks to see if
the DDP package is present and compatible. If this file exists, the driver will
load it into the device. If the DDP package file is missing or incompatible
with the driver, the driver will go into Safe Mode where it will use the
configuration contained in the device's NVM. See "Safe Mode" later in this
README for more information.
A general purpose, OS-default DDP package is automatically installed with all
supported Intel Ethernet Controller 800 Series drivers on Microsoft* Windows*,
ESX*, FreeBSD*, and Linux* operating systems. Additional DDP packages are
available to address needs for specific market segments. For example, a
telecommunications (Comms) DDP package is available to support certain
market-specific protocols in addition to the protocols in the OS-default
package.
The OS-default DDP package supports the following:
- MAC
- EtherType
- VLAN
- IPv4
- IPv6
- TCP
- ARP
- UDP
- SCTP
- ICMP
- ICMPv6
- CTRL
- LLDP
- VXLAN-GPE
- VXLAN (non-GPE)
- Geneve
- GRE
- NVGRE
- RoCEv2
Safe Mode
=========
Safe Mode disables advanced and performance features, and supports only basic
traffic and minimal functionality, such as updating the NVM or downloading a
new driver or DDP package.
See the Intel(R) Ethernet Adapters and Devices User Guide for more details on
DDP and Safe Mode.
Notes
=====
- You cannot update the DDP package if any PF drivers are already loaded. To
overwrite a package, unload all PFs and then reload the driver with the new
package.
- Except for Linux, you can only use one DDP package per driver, even if you
have more than one device installed that uses the driver.
- Only the first loaded PF per device can download a package for that device.
- If you are using DPDK, see the DPDK documentation at https://www.dpdk.org/
for installation instructions and more information.
Installation and Troubleshooting
================================
Microsoft* Windows*
-------------------
The DDP package is installed as part of the driver binary. You don't need to
take additional steps to install the DDP package file.
If you encounter issues with the DDP package file, download the latest driver.
ESX
---
The DDP package is installed as part of the driver binary. You don't need to
take additional steps to install the DDP package file.
If you encounter issues with the DDP package file, download the latest driver.
FreeBSD
-------
The FreeBSD driver automatically installs the default DDP package file during
driver installation. See the ice driver README for general installation and
building instructions.
The DDP package loads during device initialization. The driver looks for the
ice_ddp module and checks that it contains a valid DDP package file.
If you encounter issues with the DDP package file, you may need to download an
updated driver or ice_ddp module. See the log messages for more information.
NOTE: It's important to do 'make install' during initial ice driver
installation so that the driver loads the DDP package automatically.
Linux
-----
The Linux driver automatically installs the default DDP package file during
driver installation. See the ice driver README for general installation and
building instructions.
The DDP package loads during device initialization. The driver looks for
intel/ice/ddp/ice.pkg in your firmware root (typically /lib/firmware/ or
/lib/firmware/updates/) and checks that it contains a valid DDP package file.
The ice.pkg file is a symbolic link to the default DDP package file installed
by the linux-firmware software package or the ice out-of-tree driver
installation.
If you encounter issues with the DDP package file, you may need to download an
updated driver or DDP package file. See the log messages for more information.
You can install specific DDP package files for different physical devices in
the same system. To install a specific DDP package:
1. Download the DDP package file (ice-x.x.x.x.zip) you want for your device. In
addition to licensing information and this README, this zip file contains the
following files:
ice-x.x.x.x.pkg
ice.pkg
NOTE: The ice.pkg file is a Linux symbolic link file pointing to
ice-x.x.x.x.pkg (in the same path).
2. Rename the ice-x.x.x.x.pkg file as ice-xxxxxxxxxxxxxxxx.pkg, where
'xxxxxxxxxxxxxxxx' is the unique 64-bit PCI Express device serial number (in
hex) of the device you want the package downloaded on. The filename must
include the complete serial number (including leading zeros) and be all
lowercase. For example, if the 64-bit serial number is b887a3ffffca0568, then
the file name would be ice-b887a3ffffca0568.pkg.
To find the serial number from the PCI bus address, you can use the following
command:
# lspci -vv -s af:00.0 | grep -i Serial
Capabilities: [150 v1] Device Serial Number b8-87-a3-ff-ff-ca-05-68
You can use the following command to format the serial number without the
dashes:
# lspci -vv -s af:00.0 | grep -i Serial | awk '{print $7}' | sed s/-//g
b887a3ffffca0568
3. Copy the renamed DDP package file to /lib/firmware/updates/intel/ice/ddp/.
If the directory does not yet exist, create it before copying the file.
4. Unload all of the PFs on the device.
5. Reload the driver with the new package.
NOTE: The presence of a device-specific DDP package file overrides the loading
of the default DDP package file (ice.pkg).
Legal / Disclaimers
===================
Copyright (c) 2019 - 2020, Intel Corporation.
Intel and the Intel logo are trademarks of Intel Corporation or its
subsidiaries in the U.S. and/or other countries.
*Other names and brands may be claimed as the property of others.
This software and the related documents are Intel copyrighted materials, and
your use of them is governed by the express license under which they were
provided to you ("License"). Unless the License provides otherwise, you may not
use, modify, copy, publish, distribute, disclose or transmit this software or
the related documents without Intel's prior written permission.
This software and the related documents are provided as is, with no express or
implied warranties, other than those that are expressly stated in the License.

Binary file not shown.

2968
sys/dev/ice/ice_adminq_cmd.h Normal file

File diff suppressed because it is too large Load Diff

50
sys/dev/ice/ice_alloc.h Normal file
View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_ALLOC_H_
#define _ICE_ALLOC_H_
/* Memory types */
enum ice_memset_type {
ICE_NONDMA_MEM = 0,
ICE_DMA_MEM
};
/* Memcpy types */
enum ice_memcpy_type {
ICE_NONDMA_TO_NONDMA = 0,
ICE_NONDMA_TO_DMA,
ICE_DMA_TO_DMA,
ICE_DMA_TO_NONDMA
};
#endif /* _ICE_ALLOC_H_ */

407
sys/dev/ice/ice_bitops.h Normal file
View File

@ -0,0 +1,407 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_BITOPS_H_
#define _ICE_BITOPS_H_
/* Define the size of the bitmap chunk */
typedef u32 ice_bitmap_t;
/* Number of bits per bitmap chunk */
#define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t))
/* Determine which chunk a bit belongs in */
#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
/* How many chunks are required to store this many bits */
#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK)
/* Which bit inside a chunk this bit corresponds to */
#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
/* How many bits are valid in the last chunk, assumes nr > 0 */
#define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1)
/* Generate a bitmask of valid bits in the last chunk, assumes nr > 0 */
#define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \
(BITS_PER_CHUNK - LAST_CHUNK_BITS(nr)))
#define ice_declare_bitmap(A, sz) \
ice_bitmap_t A[BITS_TO_CHUNKS(sz)]
static inline bool ice_is_bit_set_internal(u16 nr, const ice_bitmap_t *bitmap)
{
return !!(*bitmap & BIT(nr));
}
/*
* If atomic version of the bitops are required, each specific OS
* implementation will need to implement OS/platform specific atomic
* version of the functions below:
*
* ice_clear_bit_internal
* ice_set_bit_internal
* ice_test_and_clear_bit_internal
* ice_test_and_set_bit_internal
*
* and define macro ICE_ATOMIC_BITOPS to overwrite the default non-atomic
* implementation.
*/
static inline void ice_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap)
{
*bitmap &= ~BIT(nr);
}
static inline void ice_set_bit_internal(u16 nr, ice_bitmap_t *bitmap)
{
*bitmap |= BIT(nr);
}
static inline bool ice_test_and_clear_bit_internal(u16 nr,
ice_bitmap_t *bitmap)
{
if (ice_is_bit_set_internal(nr, bitmap)) {
ice_clear_bit_internal(nr, bitmap);
return true;
}
return false;
}
static inline bool ice_test_and_set_bit_internal(u16 nr, ice_bitmap_t *bitmap)
{
if (ice_is_bit_set_internal(nr, bitmap))
return true;
ice_set_bit_internal(nr, bitmap);
return false;
}
/**
* ice_is_bit_set - Check state of a bit in a bitmap
* @bitmap: the bitmap to check
* @nr: the bit to check
*
* Returns true if bit nr of bitmap is set. False otherwise. Assumes that nr
* is less than the size of the bitmap.
*/
static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, u16 nr)
{
return ice_is_bit_set_internal(BIT_IN_CHUNK(nr),
&bitmap[BIT_CHUNK(nr)]);
}
/**
* ice_clear_bit - Clear a bit in a bitmap
* @bitmap: the bitmap to change
* @nr: the bit to change
*
* Clears the bit nr in bitmap. Assumes that nr is less than the size of the
* bitmap.
*/
static inline void ice_clear_bit(u16 nr, ice_bitmap_t *bitmap)
{
ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
}
/**
* ice_set_bit - Set a bit in a bitmap
* @bitmap: the bitmap to change
* @nr: the bit to change
*
* Sets the bit nr in bitmap. Assumes that nr is less than the size of the
* bitmap.
*/
static inline void ice_set_bit(u16 nr, ice_bitmap_t *bitmap)
{
ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
}
/**
* ice_test_and_clear_bit - Atomically clear a bit and return the old bit value
* @nr: the bit to change
* @bitmap: the bitmap to change
*
* Check and clear the bit nr in bitmap. Assumes that nr is less than the size
* of the bitmap.
*/
static inline bool
ice_test_and_clear_bit(u16 nr, ice_bitmap_t *bitmap)
{
return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr),
&bitmap[BIT_CHUNK(nr)]);
}
/**
* ice_test_and_set_bit - Atomically set a bit and return the old bit value
* @nr: the bit to change
* @bitmap: the bitmap to change
*
* Check and set the bit nr in bitmap. Assumes that nr is less than the size of
* the bitmap.
*/
static inline bool
ice_test_and_set_bit(u16 nr, ice_bitmap_t *bitmap)
{
return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr),
&bitmap[BIT_CHUNK(nr)]);
}
/* ice_zero_bitmap - set bits of bitmap to zero.
* @bmp: bitmap to set zeros
* @size: Size of the bitmaps in bits
*
* Set all of the bits in a bitmap to zero. Note that this function assumes it
* operates on an ice_bitmap_t which was declared using ice_declare_bitmap. It
* will zero every bit in the last chunk, even if those bits are beyond the
* size.
*/
static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
{
ice_memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t),
ICE_NONDMA_MEM);
}
/**
* ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
* @bmp2: The second bitmap to intersect wit the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise AND on two "source" bitmaps of the same size
* and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
* size as the "source" bitmaps to avoid buffer overflows. This function returns
* a non-zero value if at least one bit location from both "source" bitmaps is
* non-zero.
*/
static inline int
ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, u16 size)
{
ice_bitmap_t res = 0, mask;
u16 i;
/* Handle all but the last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) {
dst[i] = bmp1[i] & bmp2[i];
res |= dst[i];
}
/* We want to take care not to modify any bits outside of the bitmap
* size, even in the destination bitmap. Thus, we won't directly
* assign the last bitmap, but instead use a bitmask to ensure we only
* modify bits which are within the size, and leave any bits above the
* size value alone.
*/
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask);
res |= dst[i] & mask;
return res != 0;
}
/**
* ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
* @bmp2: The second bitmap to intersect wit the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise OR on two "source" bitmaps of the same size
* and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
* size as the "source" bitmaps to avoid buffer overflows.
*/
static inline void
ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, u16 size)
{
ice_bitmap_t mask;
u16 i;
/* Handle all but last chunk*/
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] | bmp2[i];
/* We want to only OR bits within the size. Furthermore, we also do
* not want to modify destination bits which are beyond the specified
* size. Use a bitmask to ensure that we only modify the bits that are
* within the specified size.
*/
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask);
}
/**
* ice_xor_bitmap - bitwise XOR 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap of XOR operation
* @bmp2: The second bitmap to XOR with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise XOR on two "source" bitmaps of the same size
* and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
* size as the "source" bitmaps to avoid buffer overflows.
*/
static inline void
ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, u16 size)
{
ice_bitmap_t mask;
u16 i;
/* Handle all but last chunk*/
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] ^ bmp2[i];
/* We want to only XOR bits within the size. Furthermore, we also do
* not want to modify destination bits which are beyond the specified
* size. Use a bitmask to ensure that we only modify the bits that are
* within the specified size.
*/
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask);
}
/**
* ice_find_next_bit - Find the index of the next set bit of a bitmap
* @bitmap: the bitmap to scan
* @size: the size in bits of the bitmap
* @offset: the offset to start at
*
* Scans the bitmap and returns the index of the first set bit which is equal
* to or after the specified offset. Will return size if no bits are set.
*/
static inline u16
ice_find_next_bit(const ice_bitmap_t *bitmap, u16 size, u16 offset)
{
u16 i, j;
if (offset >= size)
return size;
/* Since the starting position may not be directly on a chunk
* boundary, we need to be careful to handle the first chunk specially
*/
i = BIT_CHUNK(offset);
if (bitmap[i] != 0) {
u16 off = i * BITS_PER_CHUNK;
for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) {
if (ice_is_bit_set(bitmap, off + j))
return min(size, (u16)(off + j));
}
}
/* Now we handle the remaining chunks, if any */
for (i++; i < BITS_TO_CHUNKS(size); i++) {
if (bitmap[i] != 0) {
u16 off = i * BITS_PER_CHUNK;
for (j = 0; j < BITS_PER_CHUNK; j++) {
if (ice_is_bit_set(bitmap, off + j))
return min(size, (u16)(off + j));
}
}
}
return size;
}
/**
* ice_find_first_bit - Find the index of the first set bit of a bitmap
* @bitmap: the bitmap to scan
* @size: the size in bits of the bitmap
*
* Scans the bitmap and returns the index of the first set bit. Will return
* size if no bits are set.
*/
static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size)
{
return ice_find_next_bit(bitmap, size, 0);
}
/**
* ice_is_any_bit_set - Return true of any bit in the bitmap is set
* @bitmap: the bitmap to check
* @size: the size of the bitmap
*
* Equivalent to checking if ice_find_first_bit returns a value less than the
* bitmap size.
*/
static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size)
{
return ice_find_first_bit(bitmap, size) < size;
}
/**
* ice_cp_bitmap - copy bitmaps.
* @dst: bitmap destination
* @src: bitmap to copy from
* @size: Size of the bitmaps in bits
*
* This function copy bitmap from src to dst. Note that this function assumes
* it is operating on a bitmap declared using ice_declare_bitmap. It will copy
* the entire last chunk even if this contains bits beyond the size.
*/
static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size)
{
ice_memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t),
ICE_NONDMA_TO_NONDMA);
}
/**
* ice_cmp_bitmaps - compares two bitmaps.
* @bmp1: the bitmap to compare
* @bmp2: the bitmap to compare with bmp1
* @size: Size of the bitmaps in bits
*
* This function compares two bitmaps, and returns result as true or false.
*/
static inline bool
ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size)
{
ice_bitmap_t mask;
u16 i;
/* Handle all but last chunk*/
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
if (bmp1[i] != bmp2[i])
return false;
/* We want to only compare bits within the size.*/
mask = LAST_CHUNK_MASK(size);
if ((bmp1[i] & mask) != (bmp2[i] & mask))
return false;
return true;
}
#undef BIT_CHUNK
#undef BIT_IN_CHUNK
#undef LAST_CHUNK_BITS
#undef LAST_CHUNK_MASK
#endif /* _ICE_BITOPS_H_ */

4895
sys/dev/ice/ice_common.c Normal file

File diff suppressed because it is too large Load Diff

302
sys/dev/ice/ice_common.h Normal file
View File

@ -0,0 +1,302 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_COMMON_H_
#define _ICE_COMMON_H_
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
#include "virtchnl.h"
#include "ice_switch.h"
enum ice_fw_modes {
ICE_FW_MODE_NORMAL,
ICE_FW_MODE_DBG,
ICE_FW_MODE_REC,
ICE_FW_MODE_ROLLBACK
};
/* prototype for functions used for SW locks */
void ice_free_list(struct LIST_HEAD_TYPE *list);
void ice_init_lock(struct ice_lock *lock);
void ice_acquire_lock(struct ice_lock *lock);
void ice_release_lock(struct ice_lock *lock);
void ice_destroy_lock(struct ice_lock *lock);
void *ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m, u64 size);
void ice_free_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m);
void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
enum ice_status
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
enum ice_status ice_update_link_info(struct ice_port_info *pi);
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
enum ice_status
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd);
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
enum ice_status
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
enum ice_status ice_set_mac_type(struct ice_hw *hw);
/* Define a macro that will align a pointer to point to the next memory address
* that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
* example, given the variable pointer = 0x1006, then after the following call:
*
* pointer = ICE_ALIGN(pointer, 4)
*
* ... the value of pointer would equal 0x1008, since 0x1008 is the next
* address after 0x1006 which is divisible by 4.
*/
#define ICE_ALIGN(ptr, align) (((ptr) + ((align) - 1)) & ~((align) - 1))
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
enum ice_status
ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
enum ice_status
ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
u32 tx_cmpltnq_index);
enum ice_status
ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index);
enum ice_status
ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
u32 tx_drbell_q_index);
enum ice_status
ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
enum ice_status
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
enum ice_status
ice_aq_add_lan_txq(struct ice_hw *hw, u8 count,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
bool is_tc_change, bool subseq_call, bool flush_pipe,
u8 timeout, u32 *blocked_cgds,
struct ice_aqc_move_txqs_data *buf, u16 buf_size,
u8 *txqs_moved, struct ice_sq_cd *cd);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool save_bad_pac, bool pad_short_pac, bool double_vlan,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
enum ice_status
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
enum ice_status
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
enum ice_status
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec);
enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
enum ice_status
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info);
enum ice_status
__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data);
enum ice_status
__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_lanqs);
enum ice_status
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
void
ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
struct ice_eth_stats *cur_stats);
enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
bool ice_is_generic_mac(struct ice_hw *hw);
enum ice_status
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
enum ice_status
ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
enum ice_status
ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode,
bool *reset_needed);
enum ice_status ice_aq_alternate_clear(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
enum ice_status
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_common_sysctls.h
* @brief driver wide sysctls not related to the iflib stack
*
* Contains static sysctl values which are driver wide and configure all
* devices of the driver at once.
*
* Device specific sysctls are setup by functions in ice_lib.c
*/
#ifndef _ICE_COMMON_SYSCTLS_H_
#define _ICE_COMMON_SYSCTLS_H_
#include <sys/sysctl.h>
/**
* @var ice_enable_tx_fc_filter
* @brief boolean indicating if the Tx Flow Control filter should be enabled
*
* Global sysctl variable indicating whether the Tx Flow Control filters
* should be enabled. If true, Ethertype 0x8808 packets will be dropped if
* they come from non-HW sources. If false, packets coming from software will
* not be dropped. Leave this on if unless you must send flow control frames
* (or other control frames) from software.
*
* @remark each PF has a separate sysctl which can override this value.
*/
bool ice_enable_tx_fc_filter = true;
/**
* @var ice_enable_tx_lldp_filter
* @brief boolean indicating if the Tx LLDP filter should be enabled
*
* Global sysctl variable indicating whether the Tx Flow Control filters
* should be enabled. If true, Ethertype 0x88cc packets will be dropped if
* they come from non-HW sources. If false, packets coming from software will
* not be dropped. Leave this on if unless you must send LLDP frames from
* software.
*
* @remark each PF has a separate sysctl which can override this value.
*/
bool ice_enable_tx_lldp_filter = true;
/* sysctls marked as tunable, (i.e. with the CTLFLAG_TUN set) will
* automatically load tunable values, without the need to manually create the
* TUNABLE definition.
*
* This works since at least FreeBSD 11, and was backported into FreeBSD 10
* before the FreeBSD 10.1-RELEASE.
*
* If the tunable needs a custom loader, mark the SYSCTL as CTLFLAG_NOFETCH,
* and create the tunable manually.
*/
static SYSCTL_NODE(_hw, OID_AUTO, ice, CTLFLAG_RD, 0, "ICE driver parameters");
static SYSCTL_NODE(_hw_ice, OID_AUTO, debug, ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 0,
"ICE driver debug parameters");
SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
&ice_enable_tx_fc_filter, 0,
"Drop Ethertype 0x8808 control frames originating from non-HW sources");
SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_lldp_filter, CTLFLAG_RDTUN,
&ice_enable_tx_lldp_filter, 0,
"Drop Ethertype 0x88cc LLDP frames originating from non-HW sources");
#endif /* _ICE_COMMON_SYSCTLS_H_ */

View File

@ -0,0 +1,424 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_common_txrx.h
* @brief common Tx/Rx utility functions
*
* Contains common utility functions for the Tx/Rx hot path.
*
* The functions do depend on the if_pkt_info_t structure. A suitable
* implementation of this structure must be provided if these functions are to
* be used without the iflib networking stack.
*/
#ifndef _ICE_COMMON_TXRX_H_
#define _ICE_COMMON_TXRX_H_
#include <netinet/udp.h>
#include <netinet/sctp.h>
/**
* ice_tso_detect_sparse - detect TSO packets with too many segments
* @pi: packet information
*
* Hardware only transmits packets with a maximum of 8 descriptors. For TSO
* packets, hardware needs to be able to build the split packets using 8 or
* fewer descriptors. Additionally, the header must be contained within at
* most 3 descriptors.
*
* To verify this, we walk the headers to find out how many descriptors the
* headers require (usually 1). Then we ensure that, for each TSO segment, its
* data plus the headers are contained within 8 or fewer descriptors.
*/
static inline int
ice_tso_detect_sparse(if_pkt_info_t pi)
{
int count, curseg, i, hlen, segsz, seglen, tsolen, hdrs, maxsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
int nsegs = pi->ipi_nsegs;
curseg = hdrs = 0;
hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
tsolen = pi->ipi_len - hlen;
/* First, count the number of descriptors for the header.
* Additionally, make sure it does not span more than 3 segments.
*/
i = 0;
curseg = segs[0].ds_len;
while (hlen > 0) {
hdrs++;
if (hdrs > ICE_MAX_TSO_HDR_SEGS)
return (1);
if (curseg == 0) {
i++;
if (__predict_false(i == nsegs))
return (1);
curseg = segs[i].ds_len;
}
seglen = min(curseg, hlen);
curseg -= seglen;
hlen -= seglen;
}
maxsegs = ICE_MAX_TX_SEGS - hdrs;
/* We must count the headers, in order to verify that they take up
* 3 or fewer descriptors. However, we don't need to check the data
* if the total segments is small.
*/
if (nsegs <= maxsegs)
return (0);
count = 0;
/* Now check the data to make sure that each TSO segment is made up of
* no more than maxsegs descriptors. This ensures that hardware will
* be capable of performing TSO offload.
*/
while (tsolen > 0) {
segsz = pi->ipi_tso_segsz;
while (segsz > 0 && tsolen != 0) {
count++;
if (count > maxsegs) {
return (1);
}
if (curseg == 0) {
i++;
if (__predict_false(i == nsegs)) {
return (1);
}
curseg = segs[i].ds_len;
}
seglen = min(curseg, segsz);
segsz -= seglen;
curseg -= seglen;
tsolen -= seglen;
}
count = 0;
}
return (0);
}
/**
* ice_tso_setup - Setup a context descriptor to prepare for a TSO packet
* @txq: the Tx queue to use
* @pi: the packet info to prepare for
*
* Setup a context descriptor in preparation for sending a Tx packet that
* requires the TSO offload. Returns the index of the descriptor to use when
* encapsulating the Tx packet data into descriptors.
*/
static inline int
ice_tso_setup(struct ice_tx_queue *txq, if_pkt_info_t pi)
{
struct ice_tx_ctx_desc *txd;
u32 cmd, mss, type, tsolen;
int idx;
u64 type_cmd_tso_mss;
idx = pi->ipi_pidx;
txd = (struct ice_tx_ctx_desc *)&txq->tx_base[idx];
tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
type = ICE_TX_DESC_DTYPE_CTX;
cmd = ICE_TX_CTX_DESC_TSO;
/* TSO MSS must not be less than 64 */
if (pi->ipi_tso_segsz < ICE_MIN_TSO_MSS) {
txq->stats.mss_too_small++;
pi->ipi_tso_segsz = ICE_MIN_TSO_MSS;
}
mss = pi->ipi_tso_segsz;
type_cmd_tso_mss = ((u64)type << ICE_TXD_CTX_QW1_DTYPE_S) |
((u64)cmd << ICE_TXD_CTX_QW1_CMD_S) |
((u64)tsolen << ICE_TXD_CTX_QW1_TSO_LEN_S) |
((u64)mss << ICE_TXD_CTX_QW1_MSS_S);
txd->qw1 = htole64(type_cmd_tso_mss);
txd->tunneling_params = htole32(0);
txq->tso++;
return ((idx + 1) & (txq->desc_count-1));
}
/**
* ice_tx_setup_offload - Setup register values for performing a Tx offload
* @txq: The Tx queue, used to track checksum offload stats
* @pi: the packet info to program for
* @cmd: the cmd register value to update
* @off: the off register value to update
*
* Based on the packet info provided, update the cmd and off values for
* enabling Tx offloads. This depends on the packet type and which offloads
* have been requested.
*
* We also track the total number of times that we've requested hardware
* offload a particular type of checksum for debugging purposes.
*/
static inline void
ice_tx_setup_offload(struct ice_tx_queue *txq, if_pkt_info_t pi, u32 *cmd, u32 *off)
{
u32 remaining_csum_flags = pi->ipi_csum_flags;
switch (pi->ipi_etype) {
#ifdef INET
case ETHERTYPE_IP:
if (pi->ipi_csum_flags & ICE_CSUM_IP) {
*cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
txq->stats.cso[ICE_CSO_STAT_TX_IP4]++;
remaining_csum_flags &= ~CSUM_IP;
} else
*cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
*cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
/*
* This indicates that the IIPT flag was set to the IPV6 value;
* there's no checksum for IPv6 packets.
*/
txq->stats.cso[ICE_CSO_STAT_TX_IP6]++;
break;
#endif
default:
txq->stats.cso[ICE_CSO_STAT_TX_L3_ERR]++;
break;
}
*off |= (pi->ipi_ehdrlen >> 1) << ICE_TX_DESC_LEN_MACLEN_S;
*off |= (pi->ipi_ip_hlen >> 2) << ICE_TX_DESC_LEN_IPLEN_S;
if (!(remaining_csum_flags & ~ICE_RX_CSUM_FLAGS))
return;
switch (pi->ipi_ipproto) {
case IPPROTO_TCP:
if (pi->ipi_csum_flags & ICE_CSUM_TCP) {
*cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
*off |= (pi->ipi_tcp_hlen >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
txq->stats.cso[ICE_CSO_STAT_TX_TCP]++;
}
break;
case IPPROTO_UDP:
if (pi->ipi_csum_flags & ICE_CSUM_UDP) {
*cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
*off |= (sizeof(struct udphdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
txq->stats.cso[ICE_CSO_STAT_TX_UDP]++;
}
break;
case IPPROTO_SCTP:
if (pi->ipi_csum_flags & ICE_CSUM_SCTP) {
*cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
*off |= (sizeof(struct sctphdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
txq->stats.cso[ICE_CSO_STAT_TX_SCTP]++;
}
break;
default:
txq->stats.cso[ICE_CSO_STAT_TX_L4_ERR]++;
break;
}
}
/**
* ice_rx_checksum - verify hardware checksum is valid or not
* @rxq: the Rx queue structure
* @flags: checksum flags to update
* @data: checksum data to update
* @status0: descriptor status data
* @ptype: packet type
*
* Determine whether the hardware indicated that the Rx checksum is valid. If
* so, update the checksum flags and data, informing the stack of the status
* of the checksum so that it does not spend time verifying it manually.
*/
static void
ice_rx_checksum(struct ice_rx_queue *rxq, uint32_t *flags, uint32_t *data,
u16 status0, u16 ptype)
{
const u16 l3_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S));
const u16 l4_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));
const u16 xsum_errors = (l3_error | l4_error |
BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S));
struct ice_rx_ptype_decoded decoded;
bool is_ipv4, is_ipv6;
/* No L3 or L4 checksum was calculated */
if (!(status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) {
return;
}
decoded = ice_decode_rx_desc_ptype(ptype);
*flags = 0;
if (!(decoded.known && decoded.outer_ip))
return;
is_ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
is_ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
/* No checksum errors were reported */
if (!(status0 & xsum_errors)) {
if (is_ipv4)
*flags |= CSUM_L3_CALC | CSUM_L3_VALID;
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
case ICE_RX_PTYPE_INNER_PROT_UDP:
case ICE_RX_PTYPE_INNER_PROT_SCTP:
*flags |= CSUM_L4_CALC | CSUM_L4_VALID;
*data |= htons(0xffff);
break;
default:
break;
}
return;
}
/*
* Certain IPv6 extension headers impact the validity of L4 checksums.
* If one of these headers exist, hardware will set the IPV6EXADD bit
* in the descriptor. If the bit is set then pretend like hardware
* didn't checksum this packet.
*/
if (is_ipv6 && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))) {
rxq->stats.cso[ICE_CSO_STAT_RX_IP6_ERR]++;
return;
}
/*
* At this point, status0 must have at least one of the l3_error or
* l4_error bits set.
*/
if (status0 & l3_error) {
if (is_ipv4) {
rxq->stats.cso[ICE_CSO_STAT_RX_IP4_ERR]++;
*flags |= CSUM_L3_CALC;
} else {
/* Hardware indicated L3 error but this isn't IPv4? */
rxq->stats.cso[ICE_CSO_STAT_RX_L3_ERR]++;
}
/* don't bother reporting L4 errors if we got an L3 error */
return;
} else if (is_ipv4) {
*flags |= CSUM_L3_CALC | CSUM_L3_VALID;
}
if (status0 & l4_error) {
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
rxq->stats.cso[ICE_CSO_STAT_RX_TCP_ERR]++;
*flags |= CSUM_L4_CALC;
break;
case ICE_RX_PTYPE_INNER_PROT_UDP:
rxq->stats.cso[ICE_CSO_STAT_RX_UDP_ERR]++;
*flags |= CSUM_L4_CALC;
break;
case ICE_RX_PTYPE_INNER_PROT_SCTP:
rxq->stats.cso[ICE_CSO_STAT_RX_SCTP_ERR]++;
*flags |= CSUM_L4_CALC;
break;
default:
/*
* Hardware indicated L4 error, but this isn't one of
* the expected protocols.
*/
rxq->stats.cso[ICE_CSO_STAT_RX_L4_ERR]++;
}
}
}
/**
* ice_ptype_to_hash - Convert packet type to a hash value
* @ptype: the packet type to convert
*
* Given the packet type, convert to a suitable hashtype to report to the
* upper stack via the iri_rsstype value of the if_rxd_info_t structure.
*
* If the hash type is unknown we'll report M_HASHTYPE_OPAQUE.
*/
static inline int
ice_ptype_to_hash(u16 ptype)
{
struct ice_rx_ptype_decoded decoded;
if (ptype >= ARRAY_SIZE(ice_ptype_lkup))
return M_HASHTYPE_OPAQUE;
decoded = ice_decode_rx_desc_ptype(ptype);
if (!decoded.known)
return M_HASHTYPE_OPAQUE;
if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
return M_HASHTYPE_OPAQUE;
/* Note: anything that gets to this point is IP */
if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6) {
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
return M_HASHTYPE_RSS_TCP_IPV6;
case ICE_RX_PTYPE_INNER_PROT_UDP:
return M_HASHTYPE_RSS_UDP_IPV6;
default:
return M_HASHTYPE_RSS_IPV6;
}
}
if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4) {
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
return M_HASHTYPE_RSS_TCP_IPV4;
case ICE_RX_PTYPE_INNER_PROT_UDP:
return M_HASHTYPE_RSS_UDP_IPV4;
default:
return M_HASHTYPE_RSS_IPV4;
}
}
/* We should never get here!! */
return M_HASHTYPE_OPAQUE;
}
#endif

1227
sys/dev/ice/ice_controlq.c Normal file

File diff suppressed because it is too large Load Diff

125
sys/dev/ice/ice_controlq.h Normal file
View File

@ -0,0 +1,125 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_CONTROLQ_H_
#define _ICE_CONTROLQ_H_
#include "ice_adminq_cmd.h"
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*/
#define EXP_FW_API_VER_BRANCH 0x00
#define EXP_FW_API_VER_MAJOR 0x01
#define EXP_FW_API_VER_MINOR 0x05
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
};
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */
union {
struct ice_dma_mem *sq_bi;
struct ice_dma_mem *rq_bi;
} r;
u16 count; /* Number of descriptors */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
u32 bah;
u32 bal;
u32 len_mask;
u32 len_ena_mask;
u32 head_mask;
};
/* sq transaction details */
struct ice_sq_cd {
struct ice_aq_desc *wb_desc;
};
#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
/* rq event information */
struct ice_rq_event_info {
struct ice_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
};
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
enum ice_aq_err rq_last_status; /* last status on receive queue */
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */
u16 num_rq_entries; /* receive queue depth */
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
enum ice_aq_err sq_last_status; /* last status on send queue */
struct ice_lock sq_lock; /* Send queue lock */
struct ice_lock rq_lock; /* Receive queue lock */
};
#endif /* _ICE_CONTROLQ_H_ */

1697
sys/dev/ice/ice_dcb.c Normal file

File diff suppressed because it is too large Load Diff

271
sys/dev/ice/ice_dcb.h Normal file
View File

@ -0,0 +1,271 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_DCB_H_
#define _ICE_DCB_H_
#include "ice_type.h"
#include "ice_common.h"
#define ICE_DCBX_OFFLOAD_DIS 0
#define ICE_DCBX_OFFLOAD_ENABLED 1
#define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1
#define ICE_DCBX_STATUS_DONE 2
#define ICE_DCBX_STATUS_MULTIPLE_PEERS 3
#define ICE_DCBX_STATUS_DIS 7
#define ICE_TLV_TYPE_END 0
#define ICE_TLV_TYPE_ORG 127
#define ICE_IEEE_8021QAZ_OUI 0x0080C2
#define ICE_IEEE_SUBTYPE_ETS_CFG 9
#define ICE_IEEE_SUBTYPE_ETS_REC 10
#define ICE_IEEE_SUBTYPE_PFC_CFG 11
#define ICE_IEEE_SUBTYPE_APP_PRI 12
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
#define ICE_CEE_SUBTYPE_CTRL 1
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
#define ICE_CEE_SUBTYPE_APP_PRI 4
#define ICE_CEE_MAX_FEAT_TYPE 3
#define ICE_LLDP_ADMINSTATUS_DIS 0
#define ICE_LLDP_ADMINSTATUS_ENA_RX 1
#define ICE_LLDP_ADMINSTATUS_ENA_TX 2
#define ICE_LLDP_ADMINSTATUS_ENA_RXTX 3
/* Defines for LLDP TLV header */
#define ICE_LLDP_TLV_LEN_S 0
#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
#define ICE_LLDP_TLV_TYPE_S 9
#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
#define ICE_LLDP_TLV_SUBTYPE_S 0
#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
#define ICE_LLDP_TLV_OUI_S 8
#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
/* Defines for IEEE ETS TLV */
#define ICE_IEEE_ETS_MAXTC_S 0
#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
#define ICE_IEEE_ETS_CBS_S 6
#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
#define ICE_IEEE_ETS_WILLING_S 7
#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
#define ICE_IEEE_ETS_PRIO_0_S 0
#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
#define ICE_IEEE_ETS_PRIO_1_S 4
#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
#define ICE_CEE_PGID_PRIO_0_S 0
#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
#define ICE_CEE_PGID_PRIO_1_S 4
#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
#define ICE_CEE_PGID_STRICT 15
/* Defines for IEEE TSA types */
#define ICE_IEEE_TSA_STRICT 0
#define ICE_IEEE_TSA_CBS 1
#define ICE_IEEE_TSA_ETS 2
#define ICE_IEEE_TSA_VENDOR 255
/* Defines for IEEE PFC TLV */
#define ICE_IEEE_PFC_CAP_S 0
#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
#define ICE_IEEE_PFC_MBC_S 6
#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
#define ICE_IEEE_PFC_WILLING_S 7
#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
/* Defines for IEEE APP TLV */
#define ICE_IEEE_APP_SEL_S 0
#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
#define ICE_IEEE_APP_PRIO_S 5
#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
/* TLV definitions for preparing MIB */
#define ICE_TLV_ID_CHASSIS_ID 0
#define ICE_TLV_ID_PORT_ID 1
#define ICE_TLV_ID_TIME_TO_LIVE 2
#define ICE_IEEE_TLV_ID_ETS_CFG 3
#define ICE_IEEE_TLV_ID_ETS_REC 4
#define ICE_IEEE_TLV_ID_PFC_CFG 5
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
#pragma pack(1)
/* IEEE 802.1AB LLDP TLV structure */
struct ice_lldp_generic_tlv {
__be16 typelen;
u8 tlvinfo[1];
};
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
__be32 ouisubtype;
u8 tlvinfo[1];
};
#pragma pack()
struct ice_cee_tlv_hdr {
__be16 typelen;
u8 operver;
u8 maxver;
};
struct ice_cee_ctrl_tlv {
struct ice_cee_tlv_hdr hdr;
__be32 seqno;
__be32 ackno;
};
struct ice_cee_feat_tlv {
struct ice_cee_tlv_hdr hdr;
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
#define ICE_CEE_FEAT_TLV_ENA_M 0x80
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
u8 subtype;
u8 tlvinfo[1];
};
#pragma pack(1)
struct ice_cee_app_prio {
__be16 protocol;
u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
#define ICE_CEE_APP_SELECTOR_M 0x03
__be16 lower_oui;
u8 prio_map;
};
#pragma pack()
/* TODO: The below structures related LLDP/DCBX variables
* and statistics are defined but need to find how to get
* the required information from the Firmware to use them
*/
/* IEEE 802.1AB LLDP Agent Statistics */
struct ice_lldp_stats {
u64 remtablelastchangetime;
u64 remtableinserts;
u64 remtabledeletes;
u64 remtabledrops;
u64 remtableageouts;
u64 txframestotal;
u64 rxframesdiscarded;
u64 rxportframeerrors;
u64 rxportframestotal;
u64 rxporttlvsdiscardedtotal;
u64 rxporttlvsunrecognizedtotal;
u64 remtoomanyneighbors;
};
/* IEEE 802.1Qaz DCBX variables */
struct ice_dcbx_variables {
u32 defmaxtrafficclasses;
u32 defprioritytcmapping;
u32 deftcbandwidth;
u32 deftsaassignment;
};
enum ice_status
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfcmode_set, u8 *pfcmode_ret,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd);
enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
u8 ice_get_dcbx_status(struct ice_hw *hw);
enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
enum ice_status
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
enum ice_status
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf);
enum ice_status
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
enum ice_status
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
enum ice_status
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd);
#endif /* _ICE_DCB_H_ */

78
sys/dev/ice/ice_devids.h Normal file
View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_DEVIDS_H_
#define _ICE_DEVIDS_H_
/* Device IDs */
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
#define ICE_DEV_ID_E823L_SFP 0x124D
/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
/* Intel(R) Ethernet Connection E823-L 1GbE */
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */
#define ICE_DEV_ID_E822C_QSFP 0x1891
/* Intel(R) Ethernet Connection E822-C for SFP */
#define ICE_DEV_ID_E822C_SFP 0x1892
/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
/* Intel(R) Ethernet Connection E822-L for backplane */
#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* _ICE_DEVIDS_H_ */

195
sys/dev/ice/ice_drv_info.h Normal file
View File

@ -0,0 +1,195 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_drv_info.h
* @brief device IDs and driver version
*
* Contains the device IDs tables and the driver version string.
*
* This file contains static or constant definitions intended to be included
* exactly once in the main driver interface file. It implicitly depends on
* the main driver header file.
*
* These definitions could be placed directly in the interface file, but are
* kept separate for organizational purposes.
*/
/**
* @var ice_driver_version
* @brief driver version string
*
* Driver version information, used for display as part of an informational
* sysctl, and as part of the driver information sent to the firmware at load.
*
* @var ice_major_version
* @brief driver major version number
*
* @var ice_minor_version
* @brief driver minor version number
*
* @var ice_patch_version
* @brief driver patch version number
*
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "0.26.0-k";
const uint8_t ice_major_version = 0;
const uint8_t ice_minor_version = 26;
const uint8_t ice_patch_version = 0;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 0.26.0-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.26.0-k")
/**
* @var ice_vendor_info_array
* @brief array of PCI devices supported by this driver
*
* Array of PCI devices which are supported by this driver. Used to determine
* whether a given device should be loaded by this driver. This information is
* also exported as part of the module information for other tools to analyze.
*
* @remark Each type of device ID needs to be listed from most-specific entry
* to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before
* the PVIDV() for it.
*/
static pci_vendor_info_t ice_vendor_info_array[] = {
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE,
"Intel(R) Ethernet Controller E810-C for backplane"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0001, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q1"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0002, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0003, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q1"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0004, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0005, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0006, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0007, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0008, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
"Intel(R) Ethernet Controller E810-C for QSFP"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0001, 0,
"Intel(R) Ethernet Network Adapter E810-L-1"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0002, 0,
"Intel(R) Ethernet Network Adapter E810-L-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0003, 0,
"Intel(R) Ethernet Network Adapter E810-L-1"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0004, 0,
"Intel(R) Ethernet Network Adapter E810-L-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0005, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0006, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0007, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0008, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0009, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 2.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
"Intel(R) Ethernet Controller E810-C for SFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE,
"Intel(R) Ethernet Connection E822-C for backplane"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP,
"Intel(R) Ethernet Connection E822-C for QSFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP,
"Intel(R) Ethernet Connection E822-C for SFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T,
"Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII,
"Intel(R) Ethernet Connection E822-C 1GbE"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE,
"Intel(R) Ethernet Connection E822-L for backplane"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP,
"Intel(R) Ethernet Connection E822-L for SFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T,
"Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII,
"Intel(R) Ethernet Connection E822-L 1GbE"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE,
"Intel(R) Ethernet Connection E823-L for backplane"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP,
"Intel(R) Ethernet Connection E823-L for SFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP,
"Intel(R) Ethernet Connection E823-L for QSFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T,
"Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE,
"Intel(R) Ethernet Connection E823-L 1GbE"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE,
"Intel(R) Ethernet Controller E810-XXV for backplane"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP,
"Intel(R) Ethernet Controller E810-XXV for QSFP"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
ICE_INTEL_VENDOR_ID, 0x0003, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
ICE_INTEL_VENDOR_ID, 0x0004, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
ICE_INTEL_VENDOR_ID, 0x0005, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
ICE_INTEL_VENDOR_ID, 0x0006, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
"Intel(R) Ethernet Controller E810-XXV for SFP"),
PVID_END
};

View File

@ -0,0 +1,91 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_features.h
* @brief device feature controls
*
* Contains a list of various device features which could be enabled or
* disabled.
*/
#ifndef _ICE_FEATURES_H_
#define _ICE_FEATURES_H_
/**
* @enum feat_list
* @brief driver feature enumeration
*
* Enumeration of possible device driver features that can be enabled or
* disabled. Each possible value represents a different feature which can be
* enabled or disabled.
*
* The driver stores a bitmap of the features that the device and OS are
* capable of, as well as another bitmap indicating which features are
* currently enabled for that device.
*/
enum feat_list {
ICE_FEATURE_SRIOV,
ICE_FEATURE_RSS,
ICE_FEATURE_NETMAP,
ICE_FEATURE_FDIR,
ICE_FEATURE_MSI,
ICE_FEATURE_MSIX,
ICE_FEATURE_RDMA,
ICE_FEATURE_SAFE_MODE,
ICE_FEATURE_LENIENT_LINK_MODE,
ICE_FEATURE_DEFAULT_OVERRIDE,
/* Must be last entry */
ICE_FEATURE_COUNT
};
/**
* ice_disable_unsupported_features - Disable features not enabled by OS
* @bitmap: the feature bitmap
*
* Check for OS support of various driver features. Clear the feature bit for
* any feature which is not enabled by the OS. This should be called early
* during driver attach after setting up the feature bitmap.
*
* @remark the bitmap parameter is marked as unused in order to avoid an
* unused parameter warning in case none of the features need to be disabled.
*/
static inline void
ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
{
ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
#ifndef DEV_NETMAP
ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
#endif
}
#endif

5630
sys/dev/ice/ice_flex_pipe.c Normal file

File diff suppressed because it is too large Load Diff

131
sys/dev/ice/ice_flex_pipe.h Normal file
View File

@ -0,0 +1,131 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_FLEX_PIPE_H_
#define _ICE_FLEX_PIPE_H_
#include "ice_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_status
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
enum ice_status
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
enum ice_status
ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
u16 *value);
void
ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
ice_bitmap_t *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
enum ice_status
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
enum ice_status
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
enum ice_status
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
u16 *port);
enum ice_status
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
bool
ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type);
enum ice_status ice_replay_tunnels(struct ice_hw *hw);
/* XLT1/PType group functions */
enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg);
/* XLT2/VSI group functions */
enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
enum ice_status
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
enum ice_status
ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
enum ice_status
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
struct ice_prof_map *
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
struct ice_prof_map *
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
enum ice_status
ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
enum ice_status
ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
enum ice_status
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len);
#endif /* _ICE_FLEX_PIPE_H_ */

734
sys/dev/ice/ice_flex_type.h Normal file
View File

@ -0,0 +1,734 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
#define ICE_FV_OFFSET_INVAL 0x1FF
#pragma pack(1)
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
u16 off; /* Offset within the protocol header */
u8 resvrd;
};
#pragma pack()
#define ICE_MAX_NUM_PROFILES 256
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[1];
};
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[1];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[1];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[1];
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[1];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_XLT0_BUILDER 53
#define ICE_SID_RXPARSER_NODE_PTYPE 54
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_RXPARSER_XLT0 59
#define ICE_SID_TXPARSER_CAM 60
#define ICE_SID_TXPARSER_NOMATCH_CAM 61
#define ICE_SID_TXPARSER_IMEM 62
#define ICE_SID_TXPARSER_XLT0_BUILDER 63
#define ICE_SID_TXPARSER_NODE_PTYPE 64
#define ICE_SID_TXPARSER_MARKER_PTYPE 65
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_TXPARSER_PROTO_GRP 67
#define ICE_SID_TXPARSER_METADATA_INIT 68
#define ICE_SID_TXPARSER_XLT0 69
#define ICE_SID_RXPARSER_INIT_REDIR 70
#define ICE_SID_TXPARSER_INIT_REDIR 71
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_TXPARSER_MARKER_GRP 73
#define ICE_SID_RXPARSER_LAST_PROTO 74
#define ICE_SID_TXPARSER_LAST_PROTO 75
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_TXPARSER_PG_SPILL 77
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
#define ICE_SID_LBL_RESERVED_12 0x80000012
#define ICE_SID_LBL_RESERVED_13 0x80000013
#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
#define ICE_SID_LBL_PTYPE 0x80000016
#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
#define ICE_SID_LBL_FLAG 0x80000023
#define ICE_SID_LBL_REG 0x80000024
#define ICE_SID_LBL_SW_PTG 0x80000025
#define ICE_SID_LBL_ACL_PTG 0x80000026
#define ICE_SID_LBL_PE_PTG 0x80000027
#define ICE_SID_LBL_RSS_PTG 0x80000028
#define ICE_SID_LBL_FD_PTG 0x80000029
#define ICE_SID_LBL_SW_VSIG 0x8000002A
#define ICE_SID_LBL_ACL_VSIG 0x8000002B
#define ICE_SID_LBL_PE_VSIG 0x8000002C
#define ICE_SID_LBL_RSS_VSIG 0x8000002D
#define ICE_SID_LBL_FD_VSIG 0x8000002E
#define ICE_SID_LBL_PTYPE_META 0x8000002F
#define ICE_SID_LBL_SW_PROFID 0x80000030
#define ICE_SID_LBL_ACL_PROFID 0x80000031
#define ICE_SID_LBL_PE_PROFID 0x80000032
#define ICE_SID_LBL_RSS_PROFID 0x80000033
#define ICE_SID_LBL_FD_PROFID 0x80000034
#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
/* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1
#define ICE_PTYPE_IPV4FRAG_PAY 22
#define ICE_PTYPE_IPV4_PAY 23
#define ICE_PTYPE_IPV4_UDP_PAY 24
#define ICE_PTYPE_IPV4_TCP_PAY 26
#define ICE_PTYPE_IPV4_SCTP_PAY 27
#define ICE_PTYPE_IPV4_ICMP_PAY 28
#define ICE_PTYPE_IPV6FRAG_PAY 88
#define ICE_PTYPE_IPV6_PAY 89
#define ICE_PTYPE_IPV6_UDP_PAY 90
#define ICE_PTYPE_IPV6_TCP_PAY 92
#define ICE_PTYPE_IPV6_SCTP_PAY 93
#define ICE_PTYPE_IPV6_ICMP_PAY 94
/* Packet Type Groups (PTG) - Inner Most fields (IM) */
#define ICE_PTG_IM_IPV4_TCP 16
#define ICE_PTG_IM_IPV4_UDP 17
#define ICE_PTG_IM_IPV4_SCTP 18
#define ICE_PTG_IM_IPV4_PAY 20
#define ICE_PTG_IM_IPV4_OTHER 21
#define ICE_PTG_IM_IPV6_TCP 32
#define ICE_PTG_IM_IPV6_UDP 33
#define ICE_PTG_IM_IPV6_SCTP 34
#define ICE_PTG_IM_IPV6_OTHER 37
#define ICE_PTG_IM_L2_OTHER 67
struct ice_flex_fields {
union {
struct {
u8 src_ip;
u8 dst_ip;
u8 flow_label; /* valid for IPv6 only */
} ip_fields;
struct {
u8 src_prt;
u8 dst_prt;
} tcp_udp_fields;
struct {
u8 src_ip;
u8 dst_ip;
u8 src_prt;
u8 dst_prt;
} ip_tcp_udp_fields;
struct {
u8 src_prt;
u8 dst_prt;
u8 flow_label; /* valid for IPv6 only */
u8 spi;
} ip_esp_fields;
struct {
u32 offset;
u32 length;
} off_len;
} fields;
};
#define ICE_XLT1_DFLT_GRP 0
#define ICE_XLT1_TABLE_SIZE 1024
/* package labels */
struct ice_label {
__le16 value;
#define ICE_PKG_LABEL_SIZE 64
char name[ICE_PKG_LABEL_SIZE];
};
struct ice_label_section {
__le16 count;
struct ice_label label[1];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_label_section) - sizeof(struct ice_label), \
sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
struct ice_fv fv[1];
};
struct ice_sw_fv_list_entry {
struct LIST_ENTRY_TYPE list_entry;
u32 profile_id;
struct ice_fv *fv_ptr;
};
#pragma pack(1)
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
*/
struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
u8 tcam_search_key;
};
#pragma pack()
struct ice_boost_key {
struct ice_boost_key_value key;
struct ice_boost_key_value key2;
};
/* package Boost TCAM entry */
struct ice_boost_tcam_entry {
__le16 addr;
__le16 reserved;
/* break up the 40 bytes of key into different fields */
struct ice_boost_key key;
u8 boost_hit_index_group;
/* The following contains bitfields which are not on byte boundaries.
* These fields are currently unused by driver software.
*/
#define ICE_BOOST_BIT_FIELDS 43
u8 bit_fields[ICE_BOOST_BIT_FIELDS];
};
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
struct ice_boost_tcam_entry tcam[1];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_boost_tcam_section) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
#pragma pack(1)
struct ice_xlt1_section {
__le16 count;
__le16 offset;
u8 value[1];
};
#pragma pack()
#define ICE_XLT1_SIZE(n) (sizeof(struct ice_xlt1_section) + \
(sizeof(u8) * ((n) - 1)))
struct ice_xlt2_section {
__le16 count;
__le16 offset;
__le16 value[1];
};
#define ICE_XLT2_SIZE(n) (sizeof(struct ice_xlt2_section) + \
(sizeof(u16) * ((n) - 1)))
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
u8 redir_value[1];
};
#define ICE_PROF_REDIR_SIZE(n) (sizeof(struct ice_prof_redir_section) + \
(sizeof(u8) * ((n) - 1)))
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
/* Tunnel enabling */
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
struct ice_tunnel_type_scan {
enum ice_tunnel_type type;
const char *label_prefix;
};
struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
u8 in_use;
u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
};
struct ice_pkg_es {
__le16 count;
__le16 offset;
struct ice_fv_word es[1];
};
struct ice_es {
u32 sid;
u16 count;
u16 fvw;
u16 *ref_count;
struct LIST_HEAD_TYPE prof_map;
struct ice_fv_word *t;
struct ice_lock prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
};
/* PTYPE Group management */
/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
* group (PTG) ID as output.
*
* Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
* are a part of this group until moved to a new PTG.
*/
#define ICE_DEFAULT_PTG 0
struct ice_ptg_entry {
struct ice_ptg_ptype *first_ptype;
u8 in_use;
};
struct ice_ptg_ptype {
struct ice_ptg_ptype *next_ptype;
u8 ptg;
};
#define ICE_MAX_TCAM_PER_PROFILE 32
#define ICE_MAX_PTG_PER_PROFILE 32
struct ice_prof_map {
struct LIST_ENTRY_TYPE list;
u64 profile_cookie;
u64 context;
u8 prof_id;
u8 ptg_cnt;
u8 ptg[ICE_MAX_PTG_PER_PROFILE];
};
#define ICE_INVALID_TCAM 0xFFFF
struct ice_tcam_inf {
u16 tcam_idx;
u8 ptg;
u8 prof_id;
u8 in_use;
};
struct ice_vsig_prof {
struct LIST_ENTRY_TYPE list;
u64 profile_cookie;
u8 prof_id;
u8 tcam_count;
struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
};
struct ice_vsig_entry {
struct LIST_HEAD_TYPE prop_lst;
struct ice_vsig_vsi *first_vsi;
u8 in_use;
};
struct ice_vsig_vsi {
struct ice_vsig_vsi *next_vsi;
u32 prop_mask;
u16 changed;
u16 vsig;
};
#define ICE_XLT1_CNT 1024
#define ICE_MAX_PTGS 256
/* XLT1 Table */
struct ice_xlt1 {
struct ice_ptg_entry *ptg_tbl;
struct ice_ptg_ptype *ptypes;
u8 *t;
u32 sid;
u16 count;
};
#define ICE_XLT2_CNT 768
#define ICE_MAX_VSIGS 768
/* VSIG bit layout:
* [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS
* [13:15]: PF number of device
*/
#define ICE_VSIG_IDX_M (0x1FFF)
#define ICE_PF_NUM_S 13
#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
#define ICE_VSIG_VALUE(vsig, pf_id) \
(u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
(((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
#define ICE_DEFAULT_VSIG 0
/* XLT2 Table */
struct ice_xlt2 {
struct ice_vsig_entry *vsig_tbl;
struct ice_vsig_vsi *vsis;
u16 *t;
u32 sid;
u16 count;
};
/* Extraction sequence - list of match fields:
* protocol ID, offset, profile length
*/
union ice_match_fld {
struct {
u8 prot_id;
u8 offset;
u8 length;
u8 reserved; /* must be zero */
} fld;
u32 val;
};
#define ICE_MATCH_LIST_SZ 20
#pragma pack(1)
struct ice_match {
u8 count;
union ice_match_fld list[ICE_MATCH_LIST_SZ];
};
/* Profile ID Management */
struct ice_prof_id_key {
__le16 flags;
u8 xlt1;
__le16 xlt2_cdid;
};
/* Keys are made up of two values, each one-half the size of the key.
* For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
*/
#define ICE_TCAM_KEY_VAL_SZ 5
#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ)
struct ice_prof_tcam_entry {
__le16 addr;
u8 key[ICE_TCAM_KEY_SZ];
u8 prof_id;
};
struct ice_prof_id_section {
__le16 count;
struct ice_prof_tcam_entry entry[1];
};
#pragma pack()
struct ice_prof_tcam {
u32 sid;
u16 count;
u16 max_prof_id;
struct ice_prof_tcam_entry *t;
u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */
};
struct ice_prof_redir {
u8 *t;
u32 sid;
u16 count;
};
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
u8 overwrite; /* set to true to allow overwrite of table entries */
u8 is_list_init;
};
enum ice_chg_type {
ICE_TCAM_NONE = 0,
ICE_PTG_ES_ADD,
ICE_TCAM_ADD,
ICE_VSIG_ADD,
ICE_VSIG_REM,
ICE_VSI_MOVE,
};
struct ice_chs_chg {
struct LIST_ENTRY_TYPE list_entry;
enum ice_chg_type type;
u8 add_ptg;
u8 add_vsig;
u8 add_tcam_idx;
u8 add_prof;
u16 ptype;
u8 ptg;
u8 prof_id;
u16 vsi;
u16 vsig;
u16 orig_vsig;
u16 tcam_idx;
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_ALL = 0x6,
ICE_PROF_ALL = 0xFF,
};
#endif /* _ICE_FLEX_TYPE_H_ */

2228
sys/dev/ice/ice_flow.c Normal file

File diff suppressed because it is too large Load Diff

383
sys/dev/ice/ice_flow.h Normal file
View File

@ -0,0 +1,383 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
#include "ice_flex_type.h"
#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
#define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful
#define ICE_FLOW_PROF_ID_BYPASS 0
#define ICE_FLOW_PROF_ID_DEFAULT 1
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_VSI_INVAL 0xffff
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
/* Generate flow hash field from flow field type(s) */
#define ICE_FLOW_HASH_IPV4 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
#define ICE_FLOW_HASH_IPV6 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
#define ICE_FLOW_HASH_TCP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
#define ICE_FLOW_HASH_UDP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
#define ICE_FLOW_HASH_SCTP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
#define ICE_HASH_INVALID 0
#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
* tunneling or encapsulation protocols for network virtualization such as GRE,
* VxLAN, etc.
*/
enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_NONE = 0x00000000,
ICE_FLOW_SEG_HDR_ETH = 0x00000001,
ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
ICE_FLOW_SEG_HDR_ARP = 0x00000010,
ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
};
enum ice_flow_field {
/* L2 */
ICE_FLOW_FIELD_IDX_ETH_DA,
ICE_FLOW_FIELD_IDX_ETH_SA,
ICE_FLOW_FIELD_IDX_S_VLAN,
ICE_FLOW_FIELD_IDX_C_VLAN,
ICE_FLOW_FIELD_IDX_ETH_TYPE,
/* L3 */
ICE_FLOW_FIELD_IDX_IPV4_DSCP,
ICE_FLOW_FIELD_IDX_IPV6_DSCP,
ICE_FLOW_FIELD_IDX_IPV4_TTL,
ICE_FLOW_FIELD_IDX_IPV4_PROT,
ICE_FLOW_FIELD_IDX_IPV6_TTL,
ICE_FLOW_FIELD_IDX_IPV6_PROT,
ICE_FLOW_FIELD_IDX_IPV4_SA,
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA,
/* L4 */
ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
ICE_FLOW_FIELD_IDX_TCP_FLAGS,
/* ARP */
ICE_FLOW_FIELD_IDX_ARP_SIP,
ICE_FLOW_FIELD_IDX_ARP_DIP,
ICE_FLOW_FIELD_IDX_ARP_SHA,
ICE_FLOW_FIELD_IDX_ARP_DHA,
ICE_FLOW_FIELD_IDX_ARP_OP,
/* ICMP */
ICE_FLOW_FIELD_IDX_ICMP_TYPE,
ICE_FLOW_FIELD_IDX_ICMP_CODE,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
ICE_AVF_FLOW_FIELD_INVALID = 0,
ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV4_TCP,
ICE_AVF_FLOW_FIELD_IPV4_SCTP,
ICE_AVF_FLOW_FIELD_IPV4_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV4,
/* Values 37-38 are reserved */
ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV6_TCP,
ICE_AVF_FLOW_FIELD_IPV6_SCTP,
ICE_AVF_FLOW_FIELD_IPV6_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV6,
ICE_AVF_FLOW_FIELD_RSVD47,
ICE_AVF_FLOW_FIELD_FCOE_OX,
ICE_AVF_FLOW_FIELD_FCOE_RX,
ICE_AVF_FLOW_FIELD_FCOE_OTHER,
/* Values 51-62 are reserved */
ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
ICE_AVF_FLOW_FIELD_MAX
};
/* Supported RSS offloads This macro is defined to support
* VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
#define ICE_DEFAULT_RSS_HENA ( \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
enum ice_flow_dir {
ICE_FLOW_DIR_UNDEFINED = 0,
ICE_FLOW_TX = 0x01,
ICE_FLOW_RX = 0x02,
ICE_FLOW_TX_RX = ICE_FLOW_RX | ICE_FLOW_TX
};
enum ice_flow_priority {
ICE_FLOW_PRIO_LOW,
ICE_FLOW_PRIO_NORMAL,
ICE_FLOW_PRIO_HIGH
};
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_PROFILE_MAX 1024
#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
struct ice_flow_seg_xtrct {
u8 prot_id; /* Protocol ID of extracted header field */
u16 off; /* Starting offset of the field in header in bytes */
u8 idx; /* Index of FV entry used */
u8 disp; /* Displacement of field in bits fr. FV entry's start */
};
enum ice_flow_fld_match_type {
ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
};
struct ice_flow_fld_loc {
/* Describe offsets of field information relative to the beginning of
* input buffer provided when adding flow entries.
*/
u16 val; /* Offset where the value is located */
u16 mask; /* Offset where the mask/prefix value is located */
u16 last; /* Length or offset where the upper value is located */
};
struct ice_flow_fld_info {
enum ice_flow_fld_match_type type;
/* Location where to retrieve data from an input buffer */
struct ice_flow_fld_loc src;
/* Location where to put the data into the final entry buffer */
struct ice_flow_fld_loc entry;
struct ice_flow_seg_xtrct xtrct;
};
struct ice_flow_seg_fld_raw {
struct ice_flow_fld_info info;
u16 off; /* Offset from the start of the segment */
};
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
u8 raws_cnt; /* Number of raw fields to be matched */
struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
};
/* This structure describes a flow entry, and is tracked only in this file */
struct ice_flow_entry {
struct LIST_ENTRY_TYPE l_entry;
u64 id;
struct ice_flow_prof *prof;
/* Action list */
struct ice_flow_action *acts;
/* Flow entry's content */
void *entry;
enum ice_flow_priority priority;
u16 vsi_handle;
u16 entry_sz;
u8 acts_cnt;
};
#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
struct ice_flow_prof {
struct LIST_ENTRY_TYPE l_entry;
u64 id;
enum ice_flow_dir dir;
u8 segs_cnt;
u8 acts_cnt;
/* Keep track of flow entries associated with this flow profile */
struct ice_lock entries_lock;
struct LIST_HEAD_TYPE entries;
struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
/* software VSI handles referenced by this flow profile */
ice_declare_bitmap(vsis, ICE_MAX_VSI);
union {
/* struct sw_recipe */
/* struct fd */
u32 data;
} cfg;
/* Default actions */
struct ice_flow_action *acts;
};
struct ice_rss_cfg {
struct LIST_ENTRY_TYPE l_entry;
/* bitmap of VSIs added to the RSS entry */
ice_declare_bitmap(vsis, ICE_MAX_VSI);
u64 hashed_flds;
u32 packet_hdr;
};
enum ice_flow_action_type {
ICE_FLOW_ACT_NOP,
ICE_FLOW_ACT_ALLOW,
ICE_FLOW_ACT_DROP,
ICE_FLOW_ACT_CNTR_PKT,
ICE_FLOW_ACT_FWD_VSI,
ICE_FLOW_ACT_FWD_VSI_LIST, /* Should be abstracted away */
ICE_FLOW_ACT_FWD_QUEUE, /* Can Queues be abstracted away? */
ICE_FLOW_ACT_FWD_QUEUE_GROUP, /* Can Queues be abstracted away? */
ICE_FLOW_ACT_PUSH,
ICE_FLOW_ACT_POP,
ICE_FLOW_ACT_MODIFY,
ICE_FLOW_ACT_CNTR_BYTES,
ICE_FLOW_ACT_CNTR_PKT_BYTES,
ICE_FLOW_ACT_GENERIC_0,
ICE_FLOW_ACT_GENERIC_1,
ICE_FLOW_ACT_GENERIC_2,
ICE_FLOW_ACT_GENERIC_3,
ICE_FLOW_ACT_GENERIC_4,
ICE_FLOW_ACT_RPT_FLOW_ID,
ICE_FLOW_ACT_BUILD_PROF_IDX,
};
struct ice_flow_action {
enum ice_flow_action_type type;
union {
u32 dummy;
} data;
};
u64
ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt);
enum ice_status
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof);
enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
enum ice_status
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof);
u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
enum ice_status
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, struct ice_flow_action *acts, u8 acts_cnt,
u64 *entry_h);
enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
u64 entry_h);
void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
void
ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 prefix_loc, u8 prefix_sz);
void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
enum ice_status
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
#endif /* _ICE_FLOW_H_ */

9480
sys/dev/ice/ice_hw_autogen.h Normal file

File diff suppressed because it is too large Load Diff

294
sys/dev/ice/ice_iflib.h Normal file
View File

@ -0,0 +1,294 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_iflib.h
* @brief main header for the iflib driver implementation
*
* Contains the definitions for various structures used by the iflib driver
* implementation, including the Tx and Rx queue structures and the ice_softc
* structure.
*/
#ifndef _ICE_IFLIB_H_
#define _ICE_IFLIB_H_
/* include kernel options first */
#include "ice_opts.h"
#include <sys/param.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <net/iflib.h>
#include "ifdi_if.h"
#include "ice_lib.h"
#include "ice_osdep.h"
#include "ice_resmgr.h"
#include "ice_type.h"
#include "ice_features.h"
/**
* ASSERT_CTX_LOCKED - Assert that the iflib context lock is held
* @sc: ice softc pointer
*
* Macro to trigger an assertion if the iflib context lock is not
* currently held.
*/
#define ASSERT_CTX_LOCKED(sc) sx_assert((sc)->iflib_ctx_lock, SA_XLOCKED)
/**
* IFLIB_CTX_LOCK - lock the iflib context lock
* @sc: ice softc pointer
*
* Macro used to unlock the iflib context lock.
*/
#define IFLIB_CTX_LOCK(sc) sx_xlock((sc)->iflib_ctx_lock)
/**
* IFLIB_CTX_UNLOCK - unlock the iflib context lock
* @sc: ice softc pointer
*
* Macro used to unlock the iflib context lock.
*/
#define IFLIB_CTX_UNLOCK(sc) sx_xunlock((sc)->iflib_ctx_lock)
/**
* ASSERT_CFG_LOCKED - Assert that a configuration lock is held
* @sc: ice softc pointer
*
* Macro used by ice_lib.c to verify that certain functions are called while
* holding a configuration lock. For the iflib implementation, this will be
* the iflib context lock.
*/
#define ASSERT_CFG_LOCKED(sc) ASSERT_CTX_LOCKED(sc)
/**
* ICE_IFLIB_MAX_DESC_COUNT - Maximum ring size for iflib
*
* The iflib stack currently requires that the ring size, or number of
* descriptors, be a power of 2. The ice hardware is limited to a maximum of
* 8160 descriptors, which is not quite 2^13. Limit the maximum ring size for
* iflib to just 2^12 (4096).
*/
#define ICE_IFLIB_MAX_DESC_COUNT 4096
/**
* @struct ice_irq_vector
* @brief Driver irq vector structure
*
* ice_lib.c requires the following parameters
* @me: the vector number
*
* Other parameters may be iflib driver specific
*
* The iflib driver uses a single hardware interrupt per Rx queue, and uses
* software interrupts for the Tx queues.
*/
struct ice_irq_vector {
u32 me;
struct if_irq irq;
};
/**
* @struct ice_tx_queue
* @brief Driver Tx queue structure
*
* ice_lib.c requires the following parameters:
* @vsi: backpointer the VSI structure
* @me: this queue's index into the queue array
* @irqv: always NULL for iflib
* @desc_count: the number of descriptors
* @tx_paddr: the physical address for this queue
* @q_teid: the Tx queue TEID returned from firmware
* @stats: queue statistics
*
* Other parameters may be iflib driver specific
*/
struct ice_tx_queue {
struct ice_vsi *vsi;
struct ice_tx_desc *tx_base;
bus_addr_t tx_paddr;
struct tx_stats stats;
u64 tso;
u16 desc_count;
u32 tail;
struct ice_irq_vector *irqv;
u32 q_teid;
u32 me;
/* descriptor writeback status */
qidx_t *tx_rsq;
qidx_t tx_rs_cidx;
qidx_t tx_rs_pidx;
qidx_t tx_cidx_processed;
};
/**
* @struct ice_rx_queue
* @brief Driver Rx queue structure
*
* ice_lib.c requires the following parameters:
* @vsi: backpointer the VSI structure
* @me: this queue's index into the queue array
* @irqv: pointer to vector structure associated with this queue
* @desc_count: the number of descriptors
* @rx_paddr: the physical address for this queue
* @tail: the tail register address for this queue
* @stats: queue statistics
*
* Other parameters may be iflib driver specific
*/
struct ice_rx_queue {
struct ice_vsi *vsi;
union ice_32b_rx_flex_desc *rx_base;
bus_addr_t rx_paddr;
struct rx_stats stats;
u16 desc_count;
u32 tail;
struct ice_irq_vector *irqv;
u32 me;
struct if_irq que_irq;
};
/**
* @struct ice_softc
* @brief main structure representing one device
*
* ice_lib.c requires the following parameters
* @all_vsi: the array of all allocated VSIs
* @debug_sysctls: sysctl node for debug sysctls
* @dev: device_t pointer
* @feat_en: bitmap of enabled driver features
* @hw: embedded ice_hw structure
* @ifp: pointer to the ifnet structure
* @link_up: boolean indicating if link is up
* @num_available_vsi: size of the VSI array
* @pf_vsi: embedded VSI structure for the main PF VSI
* @rx_qmgr: queue manager for Rx queues
* @soft_stats: software statistics for this device
* @state: driver state flags
* @stats: hardware statistics for this device
* @tx_qmgr: queue manager for Tx queues
* @vsi_sysctls: sysctl node for all VSI sysctls
* @enable_tx_fc_filter: boolean indicating if the Tx FC filter is enabled
* @enable_tx_lldp_filter: boolean indicating if the Tx LLDP filter is enabled
* @rebuild_ticks: indicates when a post-reset rebuild started
* @imgr: resource manager for interrupt allocations
* @pf_imap: interrupt mapping for PF LAN interrupts
* @lan_vectors: # of vectors used by LAN driver (length of pf_imap)
* @ldo_tlv: LAN Default Override settings from NVM
*
* ice_iov.c requires the following parameters (when PCI_IOV is defined):
* @vfs: array of VF context structures
* @num_vfs: number of VFs to use for SR-IOV
*
* The main representation for a single OS device, used to represent a single
* physical function.
*/
struct ice_softc {
struct ice_hw hw;
struct ice_vsi pf_vsi; /* Main PF VSI */
char admin_mtx_name[16]; /* name of the admin mutex */
struct mtx admin_mtx; /* mutex to protect the admin timer */
struct callout admin_timer; /* timer to trigger admin task */
struct ice_vsi **all_vsi; /* Array of VSI pointers */
u16 num_available_vsi; /* Size of VSI array */
struct sysctl_oid *vsi_sysctls; /* Sysctl node for VSI sysctls */
struct sysctl_oid *debug_sysctls; /* Sysctl node for debug sysctls */
device_t dev;
if_ctx_t ctx;
if_shared_ctx_t sctx;
if_softc_ctx_t scctx;
struct ifmedia *media;
struct ifnet *ifp;
/* device statistics */
struct ice_pf_hw_stats stats;
struct ice_pf_sw_stats soft_stats;
/* Tx/Rx queue managers */
struct ice_resmgr tx_qmgr;
struct ice_resmgr rx_qmgr;
/* Interrupt allocation manager */
struct ice_resmgr imgr;
u16 *pf_imap;
int lan_vectors;
/* iflib Tx/Rx queue count sysctl values */
int ifc_sysctl_ntxqs;
int ifc_sysctl_nrxqs;
/* IRQ Vector data */
struct resource *msix_table;
int num_irq_vectors;
struct ice_irq_vector *irqvs;
/* BAR info */
struct ice_bar_info bar0;
/* link status */
bool link_up;
/* Ethertype filters enabled */
bool enable_tx_fc_filter;
bool enable_tx_lldp_filter;
int rebuild_ticks;
/* driver state flags, only access using atomic functions */
u32 state;
/* NVM link override settings */
struct ice_link_default_override_tlv ldo_tlv;
struct sx *iflib_ctx_lock;
/* Tri-state feature flags (capable/enabled) */
ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT);
ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT);
};
#endif /* _ICE_IFLIB_H_ */

View File

@ -0,0 +1,188 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_iflib_recovery_txrx.c
* @brief iflib Tx/Rx ops for recovery mode
*
* Contains the if_txrx structure of operations used when the driver detects
* that the firmware is in recovery mode. These ops essentially do nothing and
* exist to prevent any chance that the stack could attempt to transmit or
* receive when the device is in firmware recovery mode.
*/
#include "ice_iflib.h"
/*
* iflib txrx methods used when in recovery mode
*/
static int ice_recovery_txd_encap(void *arg, if_pkt_info_t pi);
static int ice_recovery_rxd_pkt_get(void *arg, if_rxd_info_t ri);
static void ice_recovery_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
static int ice_recovery_txd_credits_update(void *arg, uint16_t txqid, bool clear);
static int ice_recovery_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static void ice_recovery_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_recovery_rxd_refill(void *arg, if_rxd_update_t iru);
/**
* @var ice_recovery_txrx
* @brief Tx/Rx operations for recovery mode
*
* Similar to ice_txrx, but contains pointers to functions which are no-ops.
* Used when the driver is in firmware recovery mode to prevent any attempt to
* transmit or receive packets while the hardware is not initialized.
*/
struct if_txrx ice_recovery_txrx = {
.ift_txd_encap = ice_recovery_txd_encap,
.ift_txd_flush = ice_recovery_txd_flush,
.ift_txd_credits_update = ice_recovery_txd_credits_update,
.ift_rxd_available = ice_recovery_rxd_available,
.ift_rxd_pkt_get = ice_recovery_rxd_pkt_get,
.ift_rxd_refill = ice_recovery_rxd_refill,
.ift_rxd_flush = ice_recovery_rxd_flush,
};
/**
* ice_recovery_txd_encap - prepare Tx descriptors for a packet
* @arg: the iflib softc structure pointer
* @pi: packet info
*
* Since the Tx queues are not initialized during recovery mode, this function
* does nothing.
*
* @returns ENOSYS
*/
static int
ice_recovery_txd_encap(void __unused *arg, if_pkt_info_t __unused pi)
{
return (ENOSYS);
}
/**
* ice_recovery_txd_flush - Flush Tx descriptors to hardware
* @arg: device specific softc pointer
* @txqid: the Tx queue to flush
* @pidx: descriptor index to advance tail to
*
* Since the Tx queues are not initialized during recovery mode, this function
* does nothing.
*/
static void
ice_recovery_txd_flush(void __unused *arg, uint16_t __unused txqid,
qidx_t __unused pidx)
{
;
}
/**
* ice_recovery_txd_credits_update - cleanup Tx descriptors
* @arg: device private softc
* @txqid: the Tx queue to update
* @clear: if false, only report, do not actually clean
*
* Since the Tx queues are not initialized during recovery mode, this function
* always reports that no descriptors are ready.
*
* @returns 0
*/
static int
ice_recovery_txd_credits_update(void __unused *arg, uint16_t __unused txqid,
bool __unused clear)
{
return (0);
}
/**
* ice_recovery_rxd_available - Return number of available Rx packets
* @arg: device private softc
* @rxqid: the Rx queue id
* @pidx: descriptor start point
* @budget: maximum Rx budget
*
* Since the Rx queues are not initialized during recovery mode, this function
* always reports that no packets are ready.
*
* @returns 0
*/
static int
ice_recovery_rxd_available(void __unused *arg, uint16_t __unused rxqid,
qidx_t __unused pidx, qidx_t __unused budget)
{
return (0);
}
/**
* ice_recovery_rxd_pkt_get - Called by iflib to send data to upper layer
* @arg: device specific softc
* @ri: receive packet info
*
* Since the Rx queues are not initialized during recovery mode this function
* always returns an error indicating that nothing could be done.
*
* @returns ENOSYS
*/
static int
ice_recovery_rxd_pkt_get(void __unused *arg, if_rxd_info_t __unused ri)
{
return (ENOSYS);
}
/**
* ice_recovery_rxd_refill - Prepare Rx descriptors for re-use by hardware
* @arg: device specific softc structure
* @iru: the Rx descriptor update structure
*
* Since the Rx queues are not initialized during Recovery mode, this function
* does nothing.
*/
static void
ice_recovery_rxd_refill(void __unused *arg, if_rxd_update_t __unused iru)
{
;
}
/**
* ice_recovery_rxd_flush - Flush Rx descriptors to hardware
* @arg: device specific softc pointer
* @rxqid: the Rx queue to flush
* @flidx: unused parameter
* @pidx: descriptor index to advance tail to
*
* Since the Rx queues are not initialized during Recovery mode, this function
* does nothing.
*/
static void
ice_recovery_rxd_flush(void __unused *arg, uint16_t __unused rxqid,
uint8_t flidx __unused, qidx_t __unused pidx)
{
;
}

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_iflib_sysctls.h
* @brief iflib specific driver wide sysctls
*
* Contains driver wide sysctls related to the iflib networking stack.
*/
#ifndef _ICE_IFLIB_SYSCTLS_H_
#define _ICE_IFLIB_SYSCTLS_H_
/* include sysctls that are generic and not related to the iflib stack */
#include "ice_common_sysctls.h"
#endif /* _ICE_IFLIB_SYSCTLS_H_ */

View File

@ -0,0 +1,401 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_iflib_txrx.c
* @brief iflib Tx/Rx hotpath
*
* Main location for the iflib Tx/Rx hotpath implementation.
*
* Contains the implementation for the iflib function callbacks and the
* if_txrx ops structure.
*/
#include "ice_iflib.h"
/* Tx/Rx hotpath utility functions */
#include "ice_common_txrx.h"
/*
* iflib txrx method declarations
*/
static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi);
static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri);
static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
* advanced 32byte Rx descriptors.
*/
#define RX_FLEX_NIC(desc, field) \
(((struct ice_32b_rx_flex_desc_nic *)desc)->field)
/**
* @var ice_txrx
* @brief Tx/Rx operations for the iflib stack
*
* Structure defining the Tx and Rx related operations that iflib can request
* the driver to perform. These are the main entry points for the hot path of
* the transmit and receive paths in the iflib driver.
*/
struct if_txrx ice_txrx = {
.ift_txd_encap = ice_ift_txd_encap,
.ift_txd_flush = ice_ift_txd_flush,
.ift_txd_credits_update = ice_ift_txd_credits_update,
.ift_rxd_available = ice_ift_rxd_available,
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
.ift_rxd_refill = ice_ift_rxd_refill,
.ift_rxd_flush = ice_ift_rxd_flush,
};
/**
* ice_ift_txd_encap - prepare Tx descriptors for a packet
* @arg: the iflib softc structure pointer
* @pi: packet info
*
* Prepares and encapsulates the given packet into into Tx descriptors, in
* preparation for sending to the transmit engine. Sets the necessary context
* descriptors for TSO and other offloads, and prepares the last descriptor
* for the writeback status.
*
* Return 0 on success, non-zero error code on failure.
*/
static int
ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
int nsegs = pi->ipi_nsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
struct ice_tx_desc *txd = NULL;
int i, j, mask, pidx_last;
u32 cmd, off;
cmd = off = 0;
i = pi->ipi_pidx;
/* Set up the TSO/CSUM offload */
if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) {
/* Set up the TSO context descriptor if required */
if (pi->ipi_csum_flags & CSUM_TSO) {
if (ice_tso_detect_sparse(pi))
return (EFBIG);
i = ice_tso_setup(txq, pi);
}
ice_tx_setup_offload(txq, pi, &cmd, &off);
}
if (pi->ipi_mflags & M_VLANTAG)
cmd |= ICE_TX_DESC_CMD_IL2TAG1;
mask = txq->desc_count - 1;
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
txd = &txq->tx_base[i];
seglen = segs[j].ds_len;
txd->buf_addr = htole64(segs[j].ds_addr);
txd->cmd_type_offset_bsz =
htole64(ICE_TX_DESC_DTYPE_DATA
| ((u64)cmd << ICE_TXD_QW1_CMD_S)
| ((u64)off << ICE_TXD_QW1_OFFSET_S)
| ((u64)seglen << ICE_TXD_QW1_TX_BUF_SZ_S)
| ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S));
txq->stats.tx_bytes += seglen;
pidx_last = i;
i = (i+1) & mask;
}
/* Set the last descriptor for report */
#define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
txd->cmd_type_offset_bsz |=
htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S));
/* Add to report status array */
txq->tx_rsq[txq->tx_rs_pidx] = pidx_last;
txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask;
MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx);
pi->ipi_new_pidx = i;
++txq->stats.tx_packets;
return (0);
}
/**
* ice_ift_txd_flush - Flush Tx descriptors to hardware
* @arg: device specific softc pointer
* @txqid: the Tx queue to flush
* @pidx: descriptor index to advance tail to
*
* Advance the Transmit Descriptor Tail (TDT). This indicates to hardware that
* frames are available for transmit.
*/
static void
ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
struct ice_hw *hw = &sc->hw;
wr32(hw, txq->tail, pidx);
}
/**
* ice_ift_txd_credits_update - cleanup Tx descriptors
* @arg: device private softc
* @txqid: the Tx queue to update
* @clear: if false, only report, do not actually clean
*
* If clear is false, iflib is asking if we *could* clean up any Tx
* descriptors.
*
* If clear is true, iflib is requesting to cleanup and reclaim used Tx
* descriptors.
*/
static int
ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
qidx_t processed = 0;
qidx_t cur, prev, ntxd, rs_cidx;
int32_t delta;
bool is_done;
rs_cidx = txq->tx_rs_cidx;
if (rs_cidx == txq->tx_rs_pidx)
return (0);
cur = txq->tx_rsq[rs_cidx];
MPASS(cur != QIDX_INVALID);
is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
if (!is_done)
return (0);
else if (clear == false)
return (1);
prev = txq->tx_cidx_processed;
ntxd = txq->desc_count;
do {
MPASS(prev != cur);
delta = (int32_t)cur - (int32_t)prev;
if (delta < 0)
delta += ntxd;
MPASS(delta > 0);
processed += delta;
prev = cur;
rs_cidx = (rs_cidx + 1) & (ntxd-1);
if (rs_cidx == txq->tx_rs_pidx)
break;
cur = txq->tx_rsq[rs_cidx];
MPASS(cur != QIDX_INVALID);
is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
} while (is_done);
txq->tx_rs_cidx = rs_cidx;
txq->tx_cidx_processed = prev;
return (processed);
}
/**
* ice_ift_rxd_available - Return number of available Rx packets
* @arg: device private softc
* @rxqid: the Rx queue id
* @pidx: descriptor start point
* @budget: maximum Rx budget
*
* Determines how many Rx packets are available on the queue, up to a maximum
* of the given budget.
*/
static int
ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
union ice_32b_rx_flex_desc *rxd;
uint16_t status0;
int cnt, i, nrxd;
nrxd = rxq->desc_count;
for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) {
rxd = &rxq->rx_base[i];
status0 = le16toh(rxd->wb.status_error0);
if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0)
break;
if (++i == nrxd)
i = 0;
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))
cnt++;
}
return (cnt);
}
/**
* ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
* @arg: device specific softc
* @ri: receive packet info
*
* This function is called by iflib, and executes in ithread context. It is
* called by iflib to obtain data which has been DMA'ed into host memory.
* Returns zero on success, and an error code on failure.
*/
static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
union ice_32b_rx_flex_desc *cur;
u16 status0, plen, vtag, ptype;
bool eop;
size_t cidx;
int i;
cidx = ri->iri_cidx;
i = 0;
do {
/* 5 descriptor receive limit */
MPASS(i < ICE_MAX_RX_SEGS);
cur = &rxq->rx_base[cidx];
status0 = le16toh(cur->wb.status_error0);
plen = le16toh(cur->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
/* we should never be called without a valid descriptor */
MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0);
ri->iri_len += plen;
cur->wb.status_error0 = 0;
eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S));
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
vtag = le16toh(cur->wb.l2tag1);
else
vtag = 0;
/*
* Make sure packets with bad L2 values are discarded.
* NOTE: Only the EOP descriptor has valid error results.
*/
if (eop && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S))) {
rxq->stats.desc_errs++;
return (EBADMSG);
}
ri->iri_frags[i].irf_flid = 0;
ri->iri_frags[i].irf_idx = cidx;
ri->iri_frags[i].irf_len = plen;
if (++cidx == rxq->desc_count)
cidx = 0;
i++;
} while (!eop);
/* capture soft statistics for this Rx queue */
rxq->stats.rx_packets++;
rxq->stats.rx_bytes += ri->iri_len;
if ((iflib_get_ifp(sc->ctx)->if_capenable & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
ri->iri_rsstype = ice_ptype_to_hash(ptype);
ri->iri_vtag = vtag;
ri->iri_nfrags = i;
if (vtag)
ri->iri_flags |= M_VLANTAG;
return (0);
}
/**
* ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
* @arg: device specific softc structure
* @iru: the Rx descriptor update structure
*
* Update the Rx descriptor indices for a given queue, assigning new physical
* addresses to the descriptors, preparing them for re-use by the hardware.
*/
static void
ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq;
uint32_t next_pidx;
int i;
uint64_t *paddrs;
uint32_t pidx;
uint16_t qsidx, count;
paddrs = iru->iru_paddrs;
pidx = iru->iru_pidx;
qsidx = iru->iru_qsidx;
count = iru->iru_count;
rxq = &(sc->pf_vsi.rx_queues[qsidx]);
for (i = 0, next_pidx = pidx; i < count; i++) {
rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
if (++next_pidx == (uint32_t)rxq->desc_count)
next_pidx = 0;
}
}
/**
* ice_ift_rxd_flush - Flush Rx descriptors to hardware
* @arg: device specific softc pointer
* @rxqid: the Rx queue to flush
* @flidx: unused parameter
* @pidx: descriptor index to advance tail to
*
* Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
* software is done with the descriptor and it can be recycled.
*/
static void
ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
qidx_t pidx)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
struct ice_hw *hw = &sc->hw;
wr32(hw, rxq->tail, pidx);
}

2355
sys/dev/ice/ice_lan_tx_rx.h Normal file

File diff suppressed because it is too large Load Diff

8000
sys/dev/ice/ice_lib.c Normal file

File diff suppressed because it is too large Load Diff

811
sys/dev/ice/ice_lib.h Normal file
View File

@ -0,0 +1,811 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_lib.h
* @brief header for generic device and sysctl functions
*
* Contains definitions and function declarations for the ice_lib.c file. It
* does not depend on the iflib networking stack.
*/
#ifndef _ICE_LIB_H_
#define _ICE_LIB_H_
#include <sys/types.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <sys/bitstring.h>
#include "ice_dcb.h"
#include "ice_type.h"
#include "ice_common.h"
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_resmgr.h"
#include "ice_rss.h"
/* Hide debug sysctls unless INVARIANTS is enabled */
#ifdef INVARIANTS
#define ICE_CTLFLAG_DEBUG 0
#else
#define ICE_CTLFLAG_DEBUG CTLFLAG_SKIP
#endif
/**
* for_each_set_bit - For loop over each set bit in a bit string
* @bit: storage for the bit index
* @data: address of data block to loop over
* @nbits: maximum number of bits to loop over
*
* macro to create a for loop over a bit string, which runs the body once for
* each bit that is set in the string. The bit variable will be set to the
* index of each set bit in the string, with zero representing the first bit.
*/
#define for_each_set_bit(bit, data, nbits) \
for (bit_ffs((bitstr_t *)(data), (nbits), &(bit)); \
(bit) != -1; \
bit_ffs_at((bitstr_t *)(data), (bit) + 1, (nbits), &(bit)))
/**
* @var broadcastaddr
* @brief broadcast MAC address
*
* constant defining the broadcast MAC address, used for programming the
* broadcast address as a MAC filter for the PF VSI.
*/
static const u8 broadcastaddr[ETHER_ADDR_LEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
MALLOC_DECLARE(M_ICE);
extern const char ice_driver_version[];
extern const uint8_t ice_major_version;
extern const uint8_t ice_minor_version;
extern const uint8_t ice_patch_version;
extern const uint8_t ice_rc_version;
/* global sysctl indicating whether the Tx FC filter should be enabled */
extern bool ice_enable_tx_fc_filter;
/* global sysctl indicating whether the Tx LLDP filter should be enabled */
extern bool ice_enable_tx_lldp_filter;
/**
* @struct ice_bar_info
* @brief PCI BAR mapping information
*
* Contains data about a PCI BAR that the driver has mapped for use.
*/
struct ice_bar_info {
struct resource *res;
bus_space_tag_t tag;
bus_space_handle_t handle;
bus_size_t size;
int rid;
};
/* Alignment for queues */
#define DBA_ALIGN 128
/* Maximum TSO size is (256K)-1 */
#define ICE_TSO_SIZE ((256*1024) - 1)
/* Minimum size for TSO MSS */
#define ICE_MIN_TSO_MSS 64
#define ICE_MAX_TX_SEGS 8
#define ICE_MAX_TSO_SEGS 128
#define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1)
#define ICE_MAX_RX_SEGS 5
#define ICE_MAX_TSO_HDR_SEGS 3
#define ICE_MSIX_BAR 3
#define ICE_DEFAULT_DESC_COUNT 1024
#define ICE_MAX_DESC_COUNT 8160
#define ICE_MIN_DESC_COUNT 64
#define ICE_DESC_COUNT_INCR 32
/* List of hardware offloads we support */
#define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \
CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \
CSUM_IP_TSO | CSUM_IP6_TSO)
/* Macros to decide what kind of hardware offload to enable */
#define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
#define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP)
#define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP)
#define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO)
/* List of known RX CSUM offload flags */
#define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \
CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \
CSUM_COALESCED)
/* List of interface capabilities supported by ice hardware */
#define ICE_FULL_CAPS \
(IFCAP_TSO4 | IFCAP_TSO6 | \
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \
IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO)
/* Safe mode disables support for hardware checksums and TSO */
#define ICE_SAFE_CAPS \
(ICE_FULL_CAPS & ~(IFCAP_HWCSUM | IFCAP_TSO | \
IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM))
#define ICE_CAPS(sc) \
(ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS)
/**
* ICE_NVM_ACCESS
* @brief Private ioctl command number for NVM access ioctls
*
* The ioctl command number used by NVM update for accessing the driver for
* NVM access commands.
*/
#define ICE_NVM_ACCESS \
(((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
#define ICE_AQ_LEN 512
#define ICE_MBXQ_LEN 512
#define ICE_SBQ_LEN 512
#define ICE_CTRLQ_WORK_LIMIT 256
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
/* wait up to 50 microseconds for queue state change */
#define ICE_Q_WAIT_RETRY_LIMIT 5
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
ICE_AQ_VSI_UP_TABLE_UP##i##_M)
/*
* For now, set this to the hardware maximum. Each function gets a smaller
* number assigned to it in hw->func_caps.guar_num_vsi, though there
* appears to be no guarantee that is the maximum number that a function
* can use.
*/
#define ICE_MAX_VSI_AVAILABLE 768
/* Maximum size of a single frame (for Tx and Rx) */
#define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX
/* Maximum MTU size */
#define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
/*
* Hardware requires that TSO packets have an segment size of at least 64
* bytes. To avoid sending bad frames to the hardware, the driver forces the
* MSS for all TSO packets to have a segment size of at least 64 bytes.
*
* However, if the MTU is reduced below a certain size, then the resulting
* larger MSS can result in transmitting segmented frames with a packet size
* larger than the MTU.
*
* Avoid this by preventing the MTU from being lowered below this limit.
* Alternative solutions require changing the TCP stack to disable offloading
* the segmentation when the requested segment size goes below 64 bytes.
*/
#define ICE_MIN_MTU 112
#define ICE_DEFAULT_VF_QUEUES 4
/**
* @enum ice_dyn_idx_t
* @brief Dynamic Control ITR indexes
*
* This enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
enum ice_dyn_idx_t {
ICE_IDX_ITR0 = 0,
ICE_IDX_ITR1 = 1,
ICE_IDX_ITR2 = 2,
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
/* By convenction ITR0 is used for RX, and ITR1 is used for TX */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_MAX 8160
/* Define the default Tx and Rx ITR as 50us (translates to ~20k int/sec max) */
#define ICE_DFLT_TX_ITR 50
#define ICE_DFLT_RX_ITR 50
/**
* ice_itr_to_reg - Convert an ITR setting into its register equivalent
* @hw: The device HW structure
* @itr_setting: the ITR setting to convert
*
* Based on the hardware ITR granularity, convert an ITR setting into the
* correct value to prepare programming to the HW.
*/
static inline u16 ice_itr_to_reg(struct ice_hw *hw, u16 itr_setting)
{
return itr_setting / hw->itr_gran;
}
/**
* @enum ice_rx_dtype
* @brief DTYPE header split options
*
* This enum matches the Rx context bits to define whether header split is
* enabled or not.
*/
enum ice_rx_dtype {
ICE_RX_DTYPE_NO_SPLIT = 0,
ICE_RX_DTYPE_HEADER_SPLIT = 1,
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
/* Strings used for displaying FEC mode
*
* Use ice_fec_str() to get these unless these need to be embedded in a
* string constant.
*/
#define ICE_FEC_STRING_AUTO "Auto"
#define ICE_FEC_STRING_RS "RS-FEC"
#define ICE_FEC_STRING_BASER "FC-FEC/BASE-R"
#define ICE_FEC_STRING_NONE "None"
/* Strings used for displaying Flow Control mode
*
* Use ice_fc_str() to get these unless these need to be embedded in a
* string constant.
*/
#define ICE_FC_STRING_FULL "Full"
#define ICE_FC_STRING_TX "Tx"
#define ICE_FC_STRING_RX "Rx"
#define ICE_FC_STRING_NONE "None"
/*
* The number of times the ice_handle_i2c_req function will retry reading
* I2C data via the Admin Queue before returning EBUSY.
*/
#define ICE_I2C_MAX_RETRIES 10
/*
* The Start LLDP Agent AQ command will fail if it's sent too soon after
* the LLDP agent is stopped. The period between the stop and start
* commands must currently be at least 2 seconds.
*/
#define ICE_START_LLDP_RETRY_WAIT (2 * hz)
/*
* The ice_(set|clear)_vsi_promisc() function expects a mask of promiscuous
* modes to operate on. This mask is the default one for the driver, where
* promiscuous is enabled/disabled for all types of non-VLAN-tagged/VLAN 0
* traffic.
*/
#define ICE_VSI_PROMISC_MASK (ICE_PROMISC_UCAST_TX | \
ICE_PROMISC_UCAST_RX | \
ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_MCAST_RX)
struct ice_softc;
/**
* @enum ice_rx_cso_stat
* @brief software checksum offload statistics
*
* Enumeration of possible checksum offload statistics captured by software
* during the Rx path.
*/
enum ice_rx_cso_stat {
ICE_CSO_STAT_RX_IP4_ERR,
ICE_CSO_STAT_RX_IP6_ERR,
ICE_CSO_STAT_RX_L3_ERR,
ICE_CSO_STAT_RX_TCP_ERR,
ICE_CSO_STAT_RX_UDP_ERR,
ICE_CSO_STAT_RX_SCTP_ERR,
ICE_CSO_STAT_RX_L4_ERR,
ICE_CSO_STAT_RX_COUNT
};
/**
* @enum ice_tx_cso_stat
* @brief software checksum offload statistics
*
* Enumeration of possible checksum offload statistics captured by software
* during the Tx path.
*/
enum ice_tx_cso_stat {
ICE_CSO_STAT_TX_TCP,
ICE_CSO_STAT_TX_UDP,
ICE_CSO_STAT_TX_SCTP,
ICE_CSO_STAT_TX_IP4,
ICE_CSO_STAT_TX_IP6,
ICE_CSO_STAT_TX_L3_ERR,
ICE_CSO_STAT_TX_L4_ERR,
ICE_CSO_STAT_TX_COUNT
};
/**
* @struct tx_stats
* @brief software Tx statistics
*
* Contains software counted Tx statistics for a single queue
*/
struct tx_stats {
/* Soft Stats */
u64 tx_bytes;
u64 tx_packets;
u64 mss_too_small;
u64 cso[ICE_CSO_STAT_TX_COUNT];
};
/**
* @struct rx_stats
* @brief software Rx statistics
*
* Contains software counted Rx statistics for a single queue
*/
struct rx_stats {
/* Soft Stats */
u64 rx_packets;
u64 rx_bytes;
u64 desc_errs;
u64 cso[ICE_CSO_STAT_RX_COUNT];
};
/**
* @struct ice_vsi_hw_stats
* @brief hardware statistics for a VSI
*
* Stores statistics that are generated by hardware for a VSI.
*/
struct ice_vsi_hw_stats {
struct ice_eth_stats prev;
struct ice_eth_stats cur;
bool offsets_loaded;
};
/**
* @struct ice_pf_hw_stats
* @brief hardware statistics for a PF
*
* Stores statistics that are generated by hardware for each PF.
*/
struct ice_pf_hw_stats {
struct ice_hw_port_stats prev;
struct ice_hw_port_stats cur;
bool offsets_loaded;
};
/**
* @struct ice_pf_sw_stats
* @brief software statistics for a PF
*
* Contains software generated statistics relevant to a PF.
*/
struct ice_pf_sw_stats {
/* # of reset events handled, by type */
u32 corer_count;
u32 globr_count;
u32 empr_count;
u32 pfr_count;
/* # of detected MDD events for Tx and Rx */
u32 tx_mdd_count;
u32 rx_mdd_count;
};
/**
* @struct ice_vsi
* @brief VSI structure
*
* Contains data relevant to a single VSI
*/
struct ice_vsi {
/* back pointer to the softc */
struct ice_softc *sc;
bool dynamic; /* if true, dynamically allocated */
enum ice_vsi_type type; /* type of this VSI */
u16 idx; /* software index to sc->all_vsi[] */
u16 *tx_qmap; /* Tx VSI to PF queue mapping */
u16 *rx_qmap; /* Rx VSI to PF queue mapping */
bitstr_t *vmap; /* Vector(s) assigned to VSI */
enum ice_resmgr_alloc_type qmap_type;
struct ice_tx_queue *tx_queues; /* Tx queue array */
struct ice_rx_queue *rx_queues; /* Rx queue array */
int num_tx_queues;
int num_rx_queues;
int num_vectors;
int16_t rx_itr;
int16_t tx_itr;
/* RSS configuration */
u16 rss_table_size; /* HW RSS table size */
u8 rss_lut_type; /* Used to configure Get/Set RSS LUT AQ call */
int max_frame_size;
u16 mbuf_sz;
struct ice_aqc_vsi_props info;
/* context for per-VSI sysctls */
struct sysctl_ctx_list ctx;
struct sysctl_oid *vsi_node;
/* context for per-txq sysctls */
struct sysctl_ctx_list txqs_ctx;
struct sysctl_oid *txqs_node;
/* context for per-rxq sysctls */
struct sysctl_ctx_list rxqs_ctx;
struct sysctl_oid *rxqs_node;
/* VSI-level stats */
struct ice_vsi_hw_stats hw_stats;
};
/**
* @enum ice_state
* @brief Driver state flags
*
* Used to indicate the status of various driver events. Intended to be
* modified only using atomic operations, so that we can use it even in places
* which aren't locked.
*/
enum ice_state {
ICE_STATE_CONTROLQ_EVENT_PENDING,
ICE_STATE_VFLR_PENDING,
ICE_STATE_MDD_PENDING,
ICE_STATE_RESET_OICR_RECV,
ICE_STATE_RESET_PFR_REQ,
ICE_STATE_PREPARED_FOR_RESET,
ICE_STATE_RESET_FAILED,
ICE_STATE_DRIVER_INITIALIZED,
ICE_STATE_NO_MEDIA,
ICE_STATE_RECOVERY_MODE,
ICE_STATE_ROLLBACK_MODE,
ICE_STATE_LINK_STATUS_REPORTED,
ICE_STATE_DETACHING,
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
/* This entry must be last */
ICE_STATE_LAST,
};
/* Functions for setting and checking driver state. Note the functions take
* bit positions, not bitmasks. The atomic_testandset_32 and
* atomic_testandclear_32 operations require bit positions, while the
* atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to
* programming error, so we provide wrapper functions to avoid this.
*/
/**
* ice_set_state - Set the specified state
* @s: the state bitmap
* @bit: the state to set
*
* Atomically update the state bitmap with the specified bit set.
*/
static inline void
ice_set_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_set_32 expects a bitmask */
atomic_set_32(s, BIT(bit));
}
/**
* ice_clear_state - Clear the specified state
* @s: the state bitmap
* @bit: the state to clear
*
* Atomically update the state bitmap with the specified bit cleared.
*/
static inline void
ice_clear_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_clear_32 expects a bitmask */
atomic_clear_32(s, BIT(bit));
}
/**
* ice_testandset_state - Test and set the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Atomically update the state bitmap, setting the specified bit. Returns the
* previous value of the bit.
*/
static inline u32
ice_testandset_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_testandset_32 expects a bit position */
return atomic_testandset_32(s, bit);
}
/**
* ice_testandclear_state - Test and clear the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Atomically update the state bitmap, clearing the specified bit. Returns the
* previous value of the bit.
*/
static inline u32
ice_testandclear_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_testandclear_32 expects a bit position */
return atomic_testandclear_32(s, bit);
}
/**
* ice_test_state - Test the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Return true if the state is set, false otherwise. Use this only if the flow
* does not need to update the state. If you must update the state as well,
* prefer ice_testandset_state or ice_testandclear_state.
*/
static inline u32
ice_test_state(volatile u32 *s, enum ice_state bit)
{
return (*s & BIT(bit)) ? true : false;
}
/**
* @struct ice_str_buf
* @brief static length buffer for string returning
*
* Structure containing a fixed size string buffer, used to implement
* numeric->string conversion functions that may want to return non-constant
* strings.
*
* This allows returning a fixed size string that is generated by a conversion
* function, and then copied to the used location without needing to use an
* explicit local variable passed by reference.
*/
struct ice_str_buf {
char str[ICE_STR_BUF_LEN];
};
struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err);
struct ice_str_buf _ice_status_str(enum ice_status status);
struct ice_str_buf _ice_err_str(int err);
struct ice_str_buf _ice_fltr_flag_str(u16 flag);
struct ice_str_buf _ice_mdd_tx_tclan_str(u8 event);
struct ice_str_buf _ice_mdd_tx_pqm_str(u8 event);
struct ice_str_buf _ice_mdd_rx_str(u8 event);
struct ice_str_buf _ice_fw_lldp_status(u32 lldp_status);
#define ice_aq_str(err) _ice_aq_str(err).str
#define ice_status_str(err) _ice_status_str(err).str
#define ice_err_str(err) _ice_err_str(err).str
#define ice_fltr_flag_str(flag) _ice_fltr_flag_str(flag).str
#define ice_mdd_tx_tclan_str(event) _ice_mdd_tx_tclan_str(event).str
#define ice_mdd_tx_pqm_str(event) _ice_mdd_tx_pqm_str(event).str
#define ice_mdd_rx_str(event) _ice_mdd_rx_str(event).str
#define ice_fw_lldp_status(lldp_status) _ice_fw_lldp_status(lldp_status).str
/**
* ice_enable_intr - Enable interrupts for given vector
* @hw: the device private HW structure
* @vector: the interrupt index in PF space
*
* In MSI or Legacy interrupt mode, interrupt 0 is the only valid index.
*/
static inline void
ice_enable_intr(struct ice_hw *hw, int vector)
{
u32 dyn_ctl;
/* Use ITR_NONE so that ITR configuration is not changed. */
dyn_ctl = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl);
}
/**
* ice_disable_intr - Disable interrupts for given vector
* @hw: the device private HW structure
* @vector: the interrupt index in PF space
*
* In MSI or Legacy interrupt mode, interrupt 0 is the only valid index.
*/
static inline void
ice_disable_intr(struct ice_hw *hw, int vector)
{
u32 dyn_ctl;
/* Use ITR_NONE so that ITR configuration is not changed. */
dyn_ctl = ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S;
wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl);
}
/**
* ice_is_tx_desc_done - determine if a Tx descriptor is done
* @txd: the Tx descriptor to check
*
* Returns true if hardware is done with a Tx descriptor and software is
* capable of re-using it.
*/
static inline bool
ice_is_tx_desc_done(struct ice_tx_desc *txd)
{
return (((txd->cmd_type_offset_bsz & ICE_TXD_QW1_DTYPE_M)
>> ICE_TXD_QW1_DTYPE_S) == ICE_TX_DESC_DTYPE_DESC_DONE);
}
/**
* ice_get_pf_id - Get the PF id from the hardware registers
* @hw: the ice hardware structure
*
* Reads the PF_FUNC_RID register and extracts the function number from it.
* Intended to be used in cases where hw->pf_id hasn't yet been assigned by
* ice_init_hw.
*
* @pre this function should be called only after PCI register access has been
* setup, and prior to ice_init_hw. After hardware has been initialized, the
* cached hw->pf_id value can be used.
*/
static inline u8
ice_get_pf_id(struct ice_hw *hw)
{
return (u8)((rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >>
PF_FUNC_RID_FUNCTION_NUMBER_S);
}
/* Details of how to re-initialize depend on the networking stack */
void ice_request_stack_reinit(struct ice_softc *sc);
/* Details of how to check if the network stack is detaching us */
bool ice_driver_is_detaching(struct ice_softc *sc);
int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending);
int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num);
void ice_free_bar(device_t dev, struct ice_bar_info *bar);
void ice_set_ctrlq_len(struct ice_hw *hw);
void ice_release_vsi(struct ice_vsi *vsi);
struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type);
int ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
const int max_rx_queues);
void ice_free_vsi_qmaps(struct ice_vsi *vsi);
int ice_initialize_vsi(struct ice_vsi *vsi);
void ice_deinit_vsi(struct ice_vsi *vsi);
uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi);
int ice_get_phy_type_low(uint64_t phy_type_low);
int ice_get_phy_type_high(uint64_t phy_type_high);
enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
void ice_configure_rxq_interrupts(struct ice_vsi *vsi);
void ice_configure_txq_interrupts(struct ice_vsi *vsi);
void ice_flush_rxq_interrupts(struct ice_vsi *vsi);
void ice_flush_txq_interrupts(struct ice_vsi *vsi);
int ice_cfg_vsi_for_tx(struct ice_vsi *vsi);
int ice_cfg_vsi_for_rx(struct ice_vsi *vsi);
int ice_control_rx_queues(struct ice_vsi *vsi, bool enable);
int ice_cfg_pf_default_mac_filters(struct ice_softc *sc);
int ice_rm_pf_default_mac_filters(struct ice_softc *sc);
void ice_print_nvm_version(struct ice_softc *sc);
void ice_update_vsi_hw_stats(struct ice_vsi *vsi);
void ice_reset_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_softc *sc);
void ice_reset_pf_stats(struct ice_softc *sc);
void ice_add_device_sysctls(struct ice_softc *sc);
void ice_log_hmc_error(struct ice_hw *hw, device_t dev);
void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
struct ice_eth_stats *stats);
void ice_add_vsi_sysctls(struct ice_vsi *vsi);
void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
struct ice_hw_port_stats *stats);
void ice_configure_misc_interrupts(struct ice_softc *sc);
int ice_sync_multicast_filters(struct ice_softc *sc);
enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent);
void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi);
void ice_add_device_tunables(struct ice_softc *sc);
int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr);
int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr);
int ice_vsi_disable_tx(struct ice_vsi *vsi);
void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi);
void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi);
void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi);
void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi);
void ice_add_txq_sysctls(struct ice_tx_queue *txq);
void ice_add_rxq_sysctls(struct ice_rx_queue *rxq);
int ice_config_rss(struct ice_vsi *vsi);
void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc);
void ice_load_pkg_file(struct ice_softc *sc);
void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status);
uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter);
void ice_save_pci_info(struct ice_hw *hw, device_t dev);
int ice_replay_all_vsi_cfg(struct ice_softc *sc);
void ice_link_up_msg(struct ice_softc *sc);
int ice_update_laa_mac(struct ice_softc *sc);
void ice_get_and_print_bus_info(struct ice_softc *sc);
const char *ice_fec_str(enum ice_fec_mode mode);
const char *ice_fc_str(enum ice_fc_mode mode);
const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action);
const char * ice_state_to_str(enum ice_state state);
int ice_init_link_events(struct ice_softc *sc);
void ice_configure_rx_itr(struct ice_vsi *vsi);
void ice_configure_tx_itr(struct ice_vsi *vsi);
void ice_setup_pf_vsi(struct ice_softc *sc);
void ice_handle_mdd_event(struct ice_softc *sc);
void ice_init_dcb_setup(struct ice_softc *sc);
int ice_send_version(struct ice_softc *sc);
int ice_cfg_pf_ethertype_filters(struct ice_softc *sc);
void ice_init_link_configuration(struct ice_softc *sc);
void ice_init_saved_phy_cfg(struct ice_softc *sc);
void ice_apply_saved_phy_cfg(struct ice_softc *sc);
void ice_set_link_management_mode(struct ice_softc *sc);
int ice_module_event_handler(module_t mod, int what, void *arg);
int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req);
int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length);
int ice_alloc_intr_tracking(struct ice_softc *sc);
void ice_free_intr_tracking(struct ice_softc *sc);
#endif /* _ICE_LIB_H_ */

1303
sys/dev/ice/ice_nvm.c Normal file

File diff suppressed because it is too large Load Diff

143
sys/dev/ice/ice_nvm.h Normal file
View File

@ -0,0 +1,143 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_NVM_H_
#define _ICE_NVM_H_
#define ICE_NVM_CMD_READ 0x0000000B
#define ICE_NVM_CMD_WRITE 0x0000000C
/* NVM Access config bits */
#define ICE_NVM_CFG_MODULE_M MAKEMASK(0xFF, 0)
#define ICE_NVM_CFG_MODULE_S 0
#define ICE_NVM_CFG_FLAGS_M MAKEMASK(0xF, 8)
#define ICE_NVM_CFG_FLAGS_S 8
#define ICE_NVM_CFG_EXT_FLAGS_M MAKEMASK(0xF, 12)
#define ICE_NVM_CFG_EXT_FLAGS_S 12
#define ICE_NVM_CFG_ADAPTER_INFO_M MAKEMASK(0xFFFF, 16)
#define ICE_NVM_CFG_ADAPTER_INFO_S 16
/* NVM Read Get Driver Features */
#define ICE_NVM_GET_FEATURES_MODULE 0xE
#define ICE_NVM_GET_FEATURES_FLAGS 0xF
/* NVM Read/Write Mapped Space */
#define ICE_NVM_REG_RW_MODULE 0x0
#define ICE_NVM_REG_RW_FLAGS 0x1
#define ICE_NVM_ACCESS_MAJOR_VER 0
#define ICE_NVM_ACCESS_MINOR_VER 5
/* NVM Access feature flags. Other bits in the features field are reserved and
* should be set to zero when reporting the ice_nvm_features structure.
*/
#define ICE_NVM_FEATURES_0_REG_ACCESS BIT(1)
/* NVM Access Features */
struct ice_nvm_features {
u8 major; /* Major version (informational only) */
u8 minor; /* Minor version (informational only) */
u16 size; /* size of ice_nvm_features structure */
u8 features[12]; /* Array of feature bits */
};
/* NVM Access command */
struct ice_nvm_access_cmd {
u32 command; /* NVM command: READ or WRITE */
u32 config; /* NVM command configuration */
u32 offset; /* offset to read/write, in bytes */
u32 data_size; /* size of data field, in bytes */
};
/* NVM Access data */
union ice_nvm_access_data {
u32 regval; /* Storage for register value */
struct ice_nvm_features drv_features; /* NVM features */
};
/* NVM Access registers */
#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
#define GL_HICR 0x00082040
#define GL_HICR_EN 0x00082044
#define GLGEN_CSR_DEBUG_C 0x00075750
#define GLPCI_LBARCTRL 0x0009DE74
#define GLNVM_GENS 0x000B6100
#define GLNVM_FLA 0x000B6108
#define ICE_NVM_ACCESS_GL_HIDA_MAX 15
#define ICE_NVM_ACCESS_GL_HIBA_MAX 1023
u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd);
enum ice_status
ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
enum ice_status
ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
enum ice_status
ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
enum ice_status
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
enum ice_status
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd);
enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data);
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
enum ice_status
ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd);
enum ice_status
ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
u16 elem_count, struct ice_sq_cd *cd);
#endif /* _ICE_NVM_H_ */

48
sys/dev/ice/ice_opts.h Normal file
View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_opts.h
* @brief header including kernel option files
*
* Contains the various opt_*.h header files which set various macros
* indicating features and functionality which depend on kernel configuration.
*/
#ifndef _ICE_OPTS_H_
#define _ICE_OPTS_H_
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#endif

409
sys/dev/ice/ice_osdep.c Normal file
View File

@ -0,0 +1,409 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_osdep.c
* @brief Functions used to implement OS compatibility layer
*
* Contains functions used by ice_osdep.h to implement the OS compatibility
* layer used by some of the hardware files. Specifically, it is for the bits
* of OS compatibility which don't make sense as macros or inline functions.
*/
#include "ice_common.h"
#include "ice_iflib.h"
#include <machine/stdarg.h>
#include <sys/time.h>
/**
* @var M_ICE_OSDEP
* @brief OS compatibility layer allocation type
*
* malloc(9) allocation type used by the OS compatibility layer for
* distinguishing allocations by this layer from those of the rest of the
* driver.
*/
MALLOC_DEFINE(M_ICE_OSDEP, "ice-osdep", "Intel(R) 100Gb Network Driver osdep allocations");
/**
* @var ice_lock_count
* @brief Global count of # of ice_lock mutexes initialized
*
* A global count of the total number of times that ice_init_lock has been
* called. This is used to generate unique lock names for each ice_lock, to
* aid in witness lock checking.
*/
u16 ice_lock_count = 0;
static void ice_dmamap_cb(void *arg, bus_dma_segment_t * segs, int __unused nseg, int error);
/**
* ice_hw_to_dev - Given a hw private struct, find the associated device_t
* @hw: the hardware private structure
*
* Given a hw structure pointer, lookup the softc and extract the device
* pointer. Assumes that hw is embedded within the ice_softc, instead of being
* allocated separately, so that __containerof math will work.
*
* This can't be defined in ice_osdep.h as it depends on the complete
* definition of struct ice_softc. That can't be easily included in
* ice_osdep.h without creating circular header dependencies.
*/
device_t
ice_hw_to_dev(struct ice_hw *hw) {
struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
return sc->dev;
}
/**
* ice_debug - Log a debug message if the type is enabled
* @hw: device private hardware structure
* @mask: the debug message type
* @fmt: printf format specifier
*
* Check if hw->debug_mask has enabled the given message type. If so, log the
* message to the console using vprintf. Mimic the output of device_printf by
* using device_print_prettyname().
*/
void
ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...)
{
device_t dev = ice_hw_to_dev(hw);
va_list args;
if (!(mask & hw->debug_mask))
return;
device_print_prettyname(dev);
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
/**
* ice_debug_array - Format and print an array of values to the console
* @hw: private hardware structure
* @mask: the debug message type
* @rowsize: preferred number of rows to use
* @groupsize: preferred size in bytes to print each chunk
* @buf: the array buffer to print
* @len: size of the array buffer
*
* Format the given array as a series of uint8_t values with hexadecimal
* notation and log the contents to the console log.
*
* TODO: Currently only supports a group size of 1, due to the way hexdump is
* implemented.
*/
void
ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
uint32_t __unused groupsize, uint8_t *buf, size_t len)
{
device_t dev = ice_hw_to_dev(hw);
char prettyname[20];
if (!(mask & hw->debug_mask))
return;
/* Format the device header to a string */
snprintf(prettyname, sizeof(prettyname), "%s: ", device_get_nameunit(dev));
/* Make sure the row-size isn't too large */
if (rowsize > 0xFF)
rowsize = 0xFF;
hexdump(buf, len, prettyname, HD_OMIT_CHARS | rowsize);
}
/**
* rd32 - Read a 32bit hardware register value
* @hw: the private hardware structure
* @reg: register address to read
*
* Read the specified 32bit register value from BAR0 and return its contents.
*/
uint32_t
rd32(struct ice_hw *hw, uint32_t reg)
{
struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
return bus_space_read_4(sc->bar0.tag, sc->bar0.handle, reg);
}
/**
* rd64 - Read a 64bit hardware register value
* @hw: the private hardware structure
* @reg: register address to read
*
* Read the specified 64bit register value from BAR0 and return its contents.
*
* @pre For 32-bit builds, assumes that the 64bit register read can be
* safely broken up into two 32-bit register reads.
*/
uint64_t
rd64(struct ice_hw *hw, uint32_t reg)
{
struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
uint64_t data;
#ifdef __amd64__
data = bus_space_read_8(sc->bar0.tag, sc->bar0.handle, reg);
#else
/*
* bus_space_read_8 isn't supported on 32bit platforms, so we fall
* back to using two bus_space_read_4 calls.
*/
data = bus_space_read_4(sc->bar0.tag, sc->bar0.handle, reg);
data |= ((uint64_t)bus_space_read_4(sc->bar0.tag, sc->bar0.handle, reg + 4)) << 32;
#endif
return data;
}
/**
* wr32 - Write a 32bit hardware register
* @hw: the private hardware structure
* @reg: the register address to write to
* @val: the 32bit value to write
*
* Write the specified 32bit value to a register address in BAR0.
*/
void
wr32(struct ice_hw *hw, uint32_t reg, uint32_t val)
{
struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
bus_space_write_4(sc->bar0.tag, sc->bar0.handle, reg, val);
}
/**
* wr64 - Write a 64bit hardware register
* @hw: the private hardware structure
* @reg: the register address to write to
* @val: the 64bit value to write
*
* Write the specified 64bit value to a register address in BAR0.
*
* @pre For 32-bit builds, assumes that the 64bit register write can be safely
* broken up into two 32-bit register writes.
*/
void
wr64(struct ice_hw *hw, uint32_t reg, uint64_t val)
{
struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
#ifdef __amd64__
bus_space_write_8(sc->bar0.tag, sc->bar0.handle, reg, val);
#else
uint32_t lo_val, hi_val;
/*
* bus_space_write_8 isn't supported on 32bit platforms, so we fall
* back to using two bus_space_write_4 calls.
*/
lo_val = (uint32_t)val;
hi_val = (uint32_t)(val >> 32);
bus_space_write_4(sc->bar0.tag, sc->bar0.handle, reg, lo_val);
bus_space_write_4(sc->bar0.tag, sc->bar0.handle, reg + 4, hi_val);
#endif
}
/**
* ice_usec_delay - Delay for the specified number of microseconds
* @time: microseconds to delay
* @sleep: if true, sleep where possible
*
* If sleep is true, and if the current thread is allowed to sleep, pause so
* that another thread can execute. Otherwise, use DELAY to spin the thread
* instead.
*/
void
ice_usec_delay(uint32_t time, bool sleep)
{
if (sleep && THREAD_CAN_SLEEP())
pause("ice_usec_delay", USEC_2_TICKS(time));
else
DELAY(time);
}
/**
* ice_msec_delay - Delay for the specified number of milliseconds
* @time: milliseconds to delay
* @sleep: if true, sleep where possible
*
* If sleep is true, and if the current thread is allowed to sleep, pause so
* that another thread can execute. Otherwise, use DELAY to spin the thread
* instead.
*/
void
ice_msec_delay(uint32_t time, bool sleep)
{
if (sleep && THREAD_CAN_SLEEP())
pause("ice_msec_delay", MSEC_2_TICKS(time));
else
DELAY(time * 1000);
}
/**
* ice_msec_pause - pause (sleep) the thread for a time in milliseconds
* @time: milliseconds to sleep
*
* Wrapper for ice_msec_delay with sleep set to true.
*/
void
ice_msec_pause(uint32_t time)
{
ice_msec_delay(time, true);
}
/**
* ice_msec_spin - Spin the thread for a time in milliseconds
* @time: milliseconds to delay
*
* Wrapper for ice_msec_delay with sleep sent to false.
*/
void
ice_msec_spin(uint32_t time)
{
ice_msec_delay(time, false);
}
/********************************************************************
* Manage DMA'able memory.
*******************************************************************/
/**
* ice_dmamap_cb - Callback function DMA maps
* @arg: pointer to return the segment address
* @segs: the segments array
* @nseg: number of segments in the array
* @error: error code
*
* Callback used by the bus DMA code to obtain the segment address.
*/
static void
ice_dmamap_cb(void *arg, bus_dma_segment_t * segs, int __unused nseg, int error)
{
if (error)
return;
*(bus_addr_t *) arg = segs->ds_addr;
return;
}
/**
* ice_alloc_dma_mem - Request OS to allocate DMA memory
* @hw: private hardware structure
* @mem: structure defining the DMA memory request
* @size: the allocation size
*
* Allocates some memory for DMA use. Use the FreeBSD bus DMA interface to
* track this memory using a bus DMA tag and map.
*
* Returns a pointer to the DMA memory address.
*/
void *
ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *mem, u64 size)
{
device_t dev = ice_hw_to_dev(hw);
int err;
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filtfunc, filtfuncarg */
size, /* maxsize */
1, /* nsegments */
size, /* maxsegsz */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&mem->tag);
if (err != 0) {
device_printf(dev,
"ice_alloc_dma: bus_dma_tag_create failed, "
"error %s\n", ice_err_str(err));
goto fail_0;
}
err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
if (err != 0) {
device_printf(dev,
"ice_alloc_dma: bus_dmamem_alloc failed, "
"error %s\n", ice_err_str(err));
goto fail_1;
}
err = bus_dmamap_load(mem->tag, mem->map, mem->va,
size,
ice_dmamap_cb,
&mem->pa,
BUS_DMA_NOWAIT);
if (err != 0) {
device_printf(dev,
"ice_alloc_dma: bus_dmamap_load failed, "
"error %s\n", ice_err_str(err));
goto fail_2;
}
mem->size = size;
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
return (mem->va);
fail_2:
bus_dmamem_free(mem->tag, mem->va, mem->map);
fail_1:
bus_dma_tag_destroy(mem->tag);
fail_0:
mem->map = NULL;
mem->tag = NULL;
return (NULL);
}
/**
* ice_free_dma_mem - Free DMA memory allocated by ice_alloc_dma_mem
* @hw: the hardware private structure
* @mem: DMA memory to free
*
* Release the bus DMA tag and map, and free the DMA memory associated with
* it.
*/
void
ice_free_dma_mem(struct ice_hw __unused *hw, struct ice_dma_mem *mem)
{
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(mem->tag, mem->map);
bus_dmamem_free(mem->tag, mem->va, mem->map);
bus_dma_tag_destroy(mem->tag);
mem->map = NULL;
mem->tag = NULL;
}

521
sys/dev/ice/ice_osdep.h Normal file
View File

@ -0,0 +1,521 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_osdep.h
* @brief OS compatibility layer
*
* Contains various definitions and functions which are part of an OS
* compatibility layer for sharing code with other operating systems.
*/
#ifndef _ICE_OSDEP_H_
#define _ICE_OSDEP_H_
#include <sys/endian.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/bus_dma.h>
#include <netinet/in.h>
#include <sys/counter.h>
#include <sys/sbuf.h>
#include "ice_alloc.h"
#define ICE_INTEL_VENDOR_ID 0x8086
#define ICE_STR_BUF_LEN 32
struct ice_hw;
device_t ice_hw_to_dev(struct ice_hw *hw);
/* configure hw->debug_mask to enable debug prints */
void ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...) __printflike(3, 4);
void ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
uint32_t groupsize, uint8_t *buf, size_t len);
#define ice_info(_hw, _fmt, args...) \
device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
#define ice_warn(_hw, _fmt, args...) \
device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
#define DIVIDE_AND_ROUND_UP howmany
#define ROUND_UP roundup
uint32_t rd32(struct ice_hw *hw, uint32_t reg);
uint64_t rd64(struct ice_hw *hw, uint32_t reg);
void wr32(struct ice_hw *hw, uint32_t reg, uint32_t val);
void wr64(struct ice_hw *hw, uint32_t reg, uint64_t val);
#define ice_flush(_hw) rd32((_hw), GLGEN_STAT)
MALLOC_DECLARE(M_ICE_OSDEP);
/**
* ice_calloc - Allocate an array of elementes
* @hw: the hardware private structure
* @count: number of elements to allocate
* @size: the size of each element
*
* Allocate memory for an array of items equal to size. Note that the OS
* compatibility layer assumes all allocation functions will provide zero'd
* memory.
*/
static inline void *
ice_calloc(struct ice_hw __unused *hw, size_t count, size_t size)
{
return malloc(count * size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
}
/**
* ice_malloc - Allocate memory of a specified size
* @hw: the hardware private structure
* @size: the size to allocate
*
* Allocates memory of the specified size. Note that the OS compatibility
* layer assumes that all allocations will provide zero'd memory.
*/
static inline void *
ice_malloc(struct ice_hw __unused *hw, size_t size)
{
return malloc(size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
}
/**
* ice_memdup - Allocate a copy of some other memory
* @hw: private hardware structure
* @src: the source to copy from
* @size: allocation size
* @dir: the direction of copying
*
* Allocate memory of the specified size, and copy bytes from the src to fill
* it. We don't need to zero this memory as we immediately initialize it by
* copying from the src pointer.
*/
static inline void *
ice_memdup(struct ice_hw __unused *hw, const void *src, size_t size,
enum ice_memcpy_type __unused dir)
{
void *dst = malloc(size, M_ICE_OSDEP, M_NOWAIT);
if (dst != NULL)
memcpy(dst, src, size);
return dst;
}
/**
* ice_free - Free previously allocated memory
* @hw: the hardware private structure
* @mem: pointer to the memory to free
*
* Free memory that was previously allocated by ice_calloc, ice_malloc, or
* ice_memdup.
*/
static inline void
ice_free(struct ice_hw __unused *hw, void *mem)
{
free(mem, M_ICE_OSDEP);
}
/* These are macros in order to drop the unused direction enumeration constant */
#define ice_memset(addr, c, len, unused) memset((addr), (c), (len))
#define ice_memcpy(dst, src, len, unused) memcpy((dst), (src), (len))
void ice_usec_delay(uint32_t time, bool sleep);
void ice_msec_delay(uint32_t time, bool sleep);
void ice_msec_pause(uint32_t time);
void ice_msec_spin(uint32_t time);
#define UNREFERENCED_PARAMETER(_p) _p = _p
#define UNREFERENCED_1PARAMETER(_p) do { \
UNREFERENCED_PARAMETER(_p); \
} while (0)
#define UNREFERENCED_2PARAMETER(_p, _q) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
} while (0)
#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
UNREFERENCED_PARAMETER(_r); \
} while (0)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
UNREFERENCED_PARAMETER(_r); \
UNREFERENCED_PARAMETER(_s); \
} while (0)
#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do { \
UNREFERENCED_PARAMETER(_p); \
UNREFERENCED_PARAMETER(_q); \
UNREFERENCED_PARAMETER(_r); \
UNREFERENCED_PARAMETER(_s); \
UNREFERENCED_PARAMETER(_t); \
} while (0)
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#define MAKEMASK(_m, _s) ((_m) << (_s))
#define LIST_HEAD_TYPE ice_list_head
#define LIST_ENTRY_TYPE ice_list_node
/**
* @struct ice_list_node
* @brief simplified linked list node API
*
* Represents a node in a linked list, which can be embedded into a structure
* to allow that structure to be inserted into a linked list. Access to the
* contained structure is done via __containerof
*/
struct ice_list_node {
LIST_ENTRY(ice_list_node) entries;
};
/**
* @struct ice_list_head
* @brief simplified linked list head API
*
* Represents the head of a linked list. The linked list should consist of
* a series of ice_list_node structures embedded into another structure
* accessed using __containerof. This way, the ice_list_head doesn't need to
* know the type of the structure it contains.
*/
LIST_HEAD(ice_list_head, ice_list_node);
#define INIT_LIST_HEAD LIST_INIT
/* LIST_EMPTY doesn't need to be changed */
#define LIST_ADD(entry, head) LIST_INSERT_HEAD(head, entry, entries)
#define LIST_ADD_AFTER(entry, elem) LIST_INSERT_AFTER(elem, entry, entries)
#define LIST_DEL(entry) LIST_REMOVE(entry, entries)
#define _osdep_LIST_ENTRY(ptr, type, member) \
__containerof(ptr, type, member)
#define LIST_FIRST_ENTRY(head, type, member) \
_osdep_LIST_ENTRY(LIST_FIRST(head), type, member)
#define LIST_NEXT_ENTRY(ptr, unused, member) \
_osdep_LIST_ENTRY(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
#define LIST_REPLACE_INIT(old_head, new_head) do { \
__typeof(new_head) _new_head = (new_head); \
LIST_INIT(_new_head); \
LIST_SWAP(old_head, _new_head, ice_list_node, entries); \
} while (0)
#define LIST_ENTRY_SAFE(_ptr, _type, _member) \
({ __typeof(_ptr) ____ptr = (_ptr); \
____ptr ? _osdep_LIST_ENTRY(____ptr, _type, _member) : NULL; \
})
/**
* ice_get_list_tail - Return the pointer to the last node in the list
* @head: the pointer to the head of the list
*
* A helper function for implementing LIST_ADD_TAIL and LIST_LAST_ENTRY.
* Returns the pointer to the last node in the list, or NULL of the list is
* empty.
*
* Note: due to the list implementation this is O(N), where N is the size of
* the list. An O(1) implementation requires replacing the underlying list
* datastructure with one that has a tail pointer. This is problematic,
* because using a simple TAILQ would require that the addition and deletion
* be given the head of the list.
*/
static inline struct ice_list_node *
ice_get_list_tail(struct ice_list_head *head)
{
struct ice_list_node *node = LIST_FIRST(head);
if (node == NULL)
return NULL;
while (LIST_NEXT(node, entries) != NULL)
node = LIST_NEXT(node, entries);
return node;
}
/* TODO: This is O(N). An O(1) implementation would require a different
* underlying list structure, such as a circularly linked list. */
#define LIST_ADD_TAIL(entry, head) do { \
struct ice_list_node *node = ice_get_list_tail(head); \
\
if (node == NULL) { \
LIST_ADD(entry, head); \
} else { \
LIST_INSERT_AFTER(node, entry, entries); \
} \
} while (0)
#define LIST_LAST_ENTRY(head, type, member) \
LIST_ENTRY_SAFE(ice_get_list_tail(head), type, member)
#define LIST_FIRST_ENTRY_SAFE(head, type, member) \
LIST_ENTRY_SAFE(LIST_FIRST(head), type, member)
#define LIST_NEXT_ENTRY_SAFE(ptr, member) \
LIST_ENTRY_SAFE(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
#define LIST_FOR_EACH_ENTRY(pos, head, unused, member) \
for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member); \
pos; \
pos = LIST_NEXT_ENTRY_SAFE(pos, member))
#define LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, unused, member) \
for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member); \
pos && ({ n = LIST_NEXT_ENTRY_SAFE(pos, member); 1; }); \
pos = n)
#define STATIC static
#define NTOHS ntohs
#define NTOHL ntohl
#define HTONS htons
#define HTONL htonl
#define LE16_TO_CPU le16toh
#define LE32_TO_CPU le32toh
#define LE64_TO_CPU le64toh
#define CPU_TO_LE16 htole16
#define CPU_TO_LE32 htole32
#define CPU_TO_LE64 htole64
#define CPU_TO_BE16 htobe16
#define CPU_TO_BE32 htobe32
#define SNPRINTF snprintf
/**
* @typedef u8
* @brief compatibility typedef for uint8_t
*/
typedef uint8_t u8;
/**
* @typedef u16
* @brief compatibility typedef for uint16_t
*/
typedef uint16_t u16;
/**
* @typedef u32
* @brief compatibility typedef for uint32_t
*/
typedef uint32_t u32;
/**
* @typedef u64
* @brief compatibility typedef for uint64_t
*/
typedef uint64_t u64;
/**
* @typedef s8
* @brief compatibility typedef for int8_t
*/
typedef int8_t s8;
/**
* @typedef s16
* @brief compatibility typedef for int16_t
*/
typedef int16_t s16;
/**
* @typedef s32
* @brief compatibility typedef for int32_t
*/
typedef int32_t s32;
/**
* @typedef s64
* @brief compatibility typedef for int64_t
*/
typedef int64_t s64;
#define __le16 u16
#define __le32 u32
#define __le64 u64
#define __be16 u16
#define __be32 u32
#define __be64 u64
#define ice_hweight8(x) bitcount16((u8)x)
#define ice_hweight16(x) bitcount16(x)
#define ice_hweight32(x) bitcount32(x)
#define ice_hweight64(x) bitcount64(x)
/**
* @struct ice_dma_mem
* @brief DMA memory allocation
*
* Contains DMA allocation bits, used to simplify DMA allocations.
*/
struct ice_dma_mem {
void *va;
uint64_t pa;
size_t size;
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
};
void * ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *mem, u64 size);
void ice_free_dma_mem(struct ice_hw __unused *hw, struct ice_dma_mem *mem);
/**
* @struct ice_lock
* @brief simplified lock API
*
* Contains a simple lock implementation used to lock various resources.
*/
struct ice_lock {
struct mtx mutex;
char name[ICE_STR_BUF_LEN];
};
extern u16 ice_lock_count;
/**
* ice_init_lock - Initialize a lock for use
* @lock: the lock memory to initialize
*
* OS compatibility layer to provide a simple locking mechanism. We use
* a mutex for this purpose.
*/
static inline void
ice_init_lock(struct ice_lock *lock)
{
/*
* Make each lock unique by incrementing a counter each time this
* function is called. Use of a u16 allows 65535 possible locks before
* we'd hit a duplicate.
*/
memset(lock->name, 0, sizeof(lock->name));
snprintf(lock->name, ICE_STR_BUF_LEN, "ice_lock_%u", ice_lock_count++);
mtx_init(&lock->mutex, lock->name, NULL, MTX_DEF);
}
/**
* ice_acquire_lock - Acquire the lock
* @lock: the lock to acquire
*
* Acquires the mutex specified by the lock pointer.
*/
static inline void
ice_acquire_lock(struct ice_lock *lock)
{
mtx_lock(&lock->mutex);
}
/**
* ice_release_lock - Release the lock
* @lock: the lock to release
*
* Releases the mutex specified by the lock pointer.
*/
static inline void
ice_release_lock(struct ice_lock *lock)
{
mtx_unlock(&lock->mutex);
}
/**
* ice_destroy_lock - Destroy the lock to de-allocate it
* @lock: the lock to destroy
*
* Destroys a previously initialized lock. We only do this if the mutex was
* previously initialized.
*/
static inline void
ice_destroy_lock(struct ice_lock *lock)
{
if (mtx_initialized(&lock->mutex))
mtx_destroy(&lock->mutex);
memset(lock->name, 0, sizeof(lock->name));
}
/* Some function parameters are unused outside of MPASS/KASSERT macros. Rather
* than marking these as __unused all the time, mark them as __invariant_only,
* and define this to __unused when INVARIANTS is disabled. Otherwise, define
* it empty so that __invariant_only parameters are caught as unused by the
* INVARIANTS build.
*/
#ifndef INVARIANTS
#define __invariant_only __unused
#else
#define __invariant_only
#endif
#define __ALWAYS_UNUSED __unused
/**
* ice_ilog2 - Calculate the integer log base 2 of a 64bit value
* @n: 64bit number
*
* Calculates the integer log base 2 of a 64bit value, rounded down.
*
* @remark The integer log base 2 of zero is technically undefined, but this
* function will return 0 in that case.
*
*/
static inline int
ice_ilog2(u64 n) {
if (n == 0)
return 0;
return flsll(n) - 1;
}
/**
* ice_is_pow2 - Check if the value is a power of 2
* @n: 64bit number
*
* Check if the given value is a power of 2.
*
* @remark FreeBSD's powerof2 function treats zero as a power of 2, while this
* function does not.
*
* @returns true or false
*/
static inline bool
ice_is_pow2(u64 n) {
if (n == 0)
return false;
return powerof2(n);
}
#endif /* _ICE_OSDEP_H_ */

View File

@ -0,0 +1,307 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_PROTOCOL_TYPE_H_
#define _ICE_PROTOCOL_TYPE_H_
#include "ice_flex_type.h"
#define ICE_IPV6_ADDR_LENGTH 16
/* Each recipe can match up to 5 different fields. Fields to match can be meta-
* data, values extracted from packet headers, or results from other recipes.
* One of the 5 fields is reserved for matching the switch ID. So, up to 4
* recipes can provide intermediate results to another one through chaining,
* e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
*/
#define ICE_NUM_WORDS_RECIPE 4
/* Max recipes that can be chained */
#define ICE_MAX_CHAIN_RECIPE 5
/* 1 word reserved for switch ID from allowed 5 words.
* So a recipe can have max 4 words. And you can chain 5 such recipes
* together. So maximum words that can be programmed for look up is 5 * 4.
*/
#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
/* Field vector index corresponding to chaining */
#define ICE_CHAIN_FV_INDEX_START 47
enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
ICE_IPV6_OFOS,
ICE_IPV6_IL,
ICE_TCP_IL,
ICE_UDP_OF,
ICE_UDP_ILOS,
ICE_SCTP_IL,
ICE_VXLAN,
ICE_GENEVE,
ICE_VXLAN_GPE,
ICE_NVGRE,
ICE_PROTOCOL_LAST
};
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN_GPE,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
* and GENEVE
*/
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
/* Decoders for ice_prot_id:
* - F: First
* - I: Inner
* - L: Last
* - O: Outer
* - S: Single
*/
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
ICE_PROT_MAC_O2 = 2,
ICE_PROT_MAC_IL = 4,
ICE_PROT_MAC_IN_MAC = 7,
ICE_PROT_ETYPE_OL = 9,
ICE_PROT_ETYPE_IL = 10,
ICE_PROT_PAY = 15,
ICE_PROT_EVLAN_O = 16,
ICE_PROT_VLAN_O = 17,
ICE_PROT_VLAN_IF = 18,
ICE_PROT_MPLS_OL_MINUS_1 = 27,
ICE_PROT_MPLS_OL_OR_OS = 28,
ICE_PROT_MPLS_IL = 29,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
ICE_PROT_NSH_F = 84,
ICE_PROT_ESP_F = 88,
ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_ICMP_IL = 98,
ICE_PROT_ICMPV6_IL = 100,
ICE_PROT_VRRP_F = 101,
ICE_PROT_OSPF = 102,
ICE_PROT_ATAOE_OF = 114,
ICE_PROT_CTRL_OF = 116,
ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_EAPOL_OF = 120,
ICE_PROT_META_ID = 255, /* when offset == metaddata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
#define ICE_IPV4_IL_HW 33
#define ICE_IPV6_OFOS_HW 40
#define ICE_IPV6_IL_HW 41
#define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53
#define ICE_SCTP_IL_HW 96
/* ICE_UDP_OF is used to identify all 3 tunnel types
* VXLAN, GENEVE and VXLAN_GPE. To differentiate further
* need to use flags from the field vector
*/
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_GRE_OF_HW 64 /* NVGRE */
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_TUN_FLAG_FV_IND 2
#define ICE_PROTOCOL_MAX_ENTRIES 16
/* Mapping of software defined protocol ID to hardware defined protocol ID */
struct ice_protocol_entry {
enum ice_protocol_type type;
u8 protocol_id;
};
struct ice_ether_hdr {
u8 dst_addr[ETH_ALEN];
u8 src_addr[ETH_ALEN];
};
struct ice_ethtype_hdr {
__be16 ethtype_id;
};
struct ice_ether_vlan_hdr {
u8 dst_addr[ETH_ALEN];
u8 src_addr[ETH_ALEN];
__be32 vlan_id;
};
struct ice_vlan_hdr {
__be16 vlan;
__be16 type;
};
struct ice_ipv4_hdr {
u8 version;
u8 tos;
__be16 total_length;
__be16 id;
__be16 frag_off;
u8 time_to_live;
u8 protocol;
__be16 check;
__be32 src_addr;
__be32 dst_addr;
};
struct ice_le_ver_tc_flow {
union {
struct {
u32 flow_label : 20;
u32 tc : 8;
u32 version : 4;
} fld;
u32 val;
} u;
};
struct ice_ipv6_hdr {
__be32 be_ver_tc_flow;
__be16 payload_len;
u8 next_hdr;
u8 hop_limit;
u8 src_addr[ICE_IPV6_ADDR_LENGTH];
u8 dst_addr[ICE_IPV6_ADDR_LENGTH];
};
struct ice_sctp_hdr {
__be16 src_port;
__be16 dst_port;
__be32 verification_tag;
__be32 check;
};
struct ice_l4_hdr {
__be16 src_port;
__be16 dst_port;
__be16 len;
__be16 check;
};
struct ice_udp_tnl_hdr {
__be16 field;
__be16 proto_type;
__be32 vni; /* only use lower 24-bits */
};
struct ice_nvgre {
__be16 flags;
__be16 protocol;
__be32 tni_flow;
};
union ice_prot_hdr {
struct ice_ether_hdr eth_hdr;
struct ice_ethtype_hdr ethertype;
struct ice_vlan_hdr vlan_hdr;
struct ice_ipv4_hdr ipv4_hdr;
struct ice_ipv6_hdr ipv6_hdr;
struct ice_l4_hdr l4_hdr;
struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre nvgre_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
* structure to the real byte offset as per the specification of that
* protocol header.
* for e.g. dst address is 3 words in ethertype header and corresponding bytes
* are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
*/
struct ice_prot_ext_tbl_entry {
enum ice_protocol_type prot_type;
/* Byte offset into header of given protocol type */
u8 offs[sizeof(union ice_prot_hdr)];
};
/* Extractions to be looked up for a given recipe */
struct ice_prot_lkup_ext {
u16 prot_type;
u8 n_val_words;
/* create a buffer to hold max words per recipe */
u16 field_off[ICE_MAX_CHAIN_WORDS];
u16 field_mask[ICE_MAX_CHAIN_WORDS];
struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
/* Indicate field offsets that have field vector indices assigned */
ice_declare_bitmap(done, ICE_MAX_CHAIN_WORDS);
};
struct ice_pref_recipe_group {
u8 n_val_pairs; /* Number of valid pairs */
struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
u16 mask[ICE_NUM_WORDS_RECIPE];
};
struct ice_recp_grp_entry {
struct LIST_ENTRY_TYPE l_entry;
#define ICE_INVAL_CHAIN_IND 0xFF
u16 rid;
u8 chain_idx;
u16 fv_idx[ICE_NUM_WORDS_RECIPE];
u16 fv_mask[ICE_NUM_WORDS_RECIPE];
struct ice_pref_recipe_group r_group;
};
#endif /* _ICE_PROTOCOL_TYPE_H_ */

228
sys/dev/ice/ice_resmgr.c Normal file
View File

@ -0,0 +1,228 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_resmgr.c
* @brief Resource allocation manager
*
* Manage device resource allocations for a PF, including assigning queues to
* VSIs, or managing interrupt allocations across the PF.
*
* It can handle contiguous and scattered resource allocations, and upon
* assigning them, will fill in the mapping array with a map of
* resource IDs to PF-space resource indices.
*/
#include "ice_resmgr.h"
/**
* @var M_ICE_RESMGR
* @brief PF resource manager allocation type
*
* malloc(9) allocation type used by the resource manager code.
*/
MALLOC_DEFINE(M_ICE_RESMGR, "ice-resmgr", "Intel(R) 100Gb Network Driver resmgr allocations");
/*
* Public resource manager allocation functions
*/
/**
* ice_resmgr_init - Initialize a resource manager structure
* @resmgr: structure to track the resource manager state
* @num_res: the maximum number of resources it can assign
*
* Initialize the state of a resource manager structure, allocating space to
* assign up to the requested number of resources. Uses bit strings to track
* which resources have been assigned. This type of resmgr is intended to be
* used for tracking LAN queue assignments between VSIs.
*/
int
ice_resmgr_init(struct ice_resmgr *resmgr, u16 num_res)
{
resmgr->resources = bit_alloc(num_res, M_ICE_RESMGR, M_NOWAIT);
if (resmgr->resources == NULL)
return (ENOMEM);
resmgr->num_res = num_res;
resmgr->contig_only = false;
return (0);
}
/**
* ice_resmgr_init_contig_only - Initialize a resource manager structure
* @resmgr: structure to track the resource manager state
* @num_res: the maximum number of resources it can assign
*
* Functions similarly to ice_resmgr_init(), but the resulting resmgr structure
* will only allow contiguous allocations. This type of resmgr is intended to
* be used with tracking device MSI-X interrupt allocations.
*/
int
ice_resmgr_init_contig_only(struct ice_resmgr *resmgr, u16 num_res)
{
int error;
error = ice_resmgr_init(resmgr, num_res);
if (error)
return (error);
resmgr->contig_only = true;
return (0);
}
/**
* ice_resmgr_destroy - Deallocate memory associated with a resource manager
* @resmgr: resource manager structure
*
* De-allocates the bit string associated with this resource manager. It is
* expected that this function will not be called until all of the assigned
* resources have been released.
*/
void
ice_resmgr_destroy(struct ice_resmgr *resmgr)
{
if (resmgr->resources != NULL) {
#ifdef INVARIANTS
int set;
bit_count(resmgr->resources, 0, resmgr->num_res, &set);
MPASS(set == 0);
#endif
free(resmgr->resources, M_ICE_RESMGR);
resmgr->resources = NULL;
}
resmgr->num_res = 0;
}
/*
* Resource allocation functions
*/
/**
* ice_resmgr_assign_contiguous - Assign contiguous mapping of resources
* @resmgr: resource manager structure
* @idx: memory to store mapping, at least num_res wide
* @num_res: the number of resources to assign
*
* Assign num_res number of contiguous resources into the idx mapping. On
* success, idx will be updated to map each index to a PF resource.
*
* This function guarantees that the resource mapping will be contiguous, and
* will fail if that is not possible.
*/
int
ice_resmgr_assign_contiguous(struct ice_resmgr *resmgr, u16 *idx, u16 num_res)
{
int start, i;
bit_ffc_area(resmgr->resources, resmgr->num_res, num_res, &start);
if (start < 0)
return (ENOSPC);
/* Set each bit and update the index array */
for (i = 0; i < num_res; i++) {
bit_set(resmgr->resources, start + i);
idx[i] = start + i;
}
return (0);
}
/**
* ice_resmgr_assign_scattered - Assign possibly scattered resources
* @resmgr: the resource manager structure
* @idx: memory to store associated resource mapping, at least num_res wide
* @num_res: the number of resources to assign
*
* Assign num_res number of resources into the idx_mapping. On success, idx
* will be updated to map each index to a PF-space resource.
*
* Queues may be allocated non-contiguously, and this function requires that
* num_res be less than the ICE_MAX_SCATTERED_QUEUES due to hardware
* limitations on scattered queue assignment.
*/
int
ice_resmgr_assign_scattered(struct ice_resmgr *resmgr, u16 *idx, u16 num_res)
{
int index = 0, i;
/* Scattered allocations won't work if they weren't allowed at resmgr
* creation time.
*/
if (resmgr->contig_only)
return (EPERM);
/* Hardware can only support a limited total of scattered queues for
* a single VSI
*/
if (num_res > ICE_MAX_SCATTERED_QUEUES)
return (EOPNOTSUPP);
for (i = 0; i < num_res; i++) {
bit_ffc_at(resmgr->resources, index, resmgr->num_res, &index);
if (index < 0)
goto err_no_space;
bit_set(resmgr->resources, index);
idx[i] = index;
}
return (0);
err_no_space:
/* Release any resources we did assign up to this point. */
ice_resmgr_release_map(resmgr, idx, i);
return (ENOSPC);
}
/**
* ice_resmgr_release_map - Release previously assigned resource mapping
* @resmgr: the resource manager structure
* @idx: previously assigned resource mapping
* @num_res: number of resources in the mapping
*
* Clears the assignment of each resource in the provided resource index. Updates
* the idx to indicate that each of the virtual indexes have invalid resource
* mappings by assigning them to ICE_INVALID_RES_IDX.
*/
void
ice_resmgr_release_map(struct ice_resmgr *resmgr, u16 *idx, u16 num_res)
{
int i;
for (i = 0; i < num_res; i++) {
if (idx[i] < resmgr->num_res)
bit_clear(resmgr->resources, idx[i]);
idx[i] = ICE_INVALID_RES_IDX;
}
}

111
sys/dev/ice/ice_resmgr.h Normal file
View File

@ -0,0 +1,111 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_resmgr.h
* @brief Resource manager interface
*
* Defines an interface for managing PF hardware queues and interrupts for assigning them to
* hardware VSIs and VFs.
*
* For queue management:
* The total number of available Tx and Rx queues is not equal, so it is
* expected that each PF will allocate two ice_resmgr structures, one for Tx
* and one for Rx. These should be allocated in attach() prior to initializing
* VSIs, and destroyed in detach().
*
* For interrupt management:
* The PF allocates an ice_resmgr structure that does not allow scattered
* allocations since interrupt allocations must be contiguous.
*/
#ifndef _ICE_RESMGR_H_
#define _ICE_RESMGR_H_
#include <sys/param.h>
#include "ice_osdep.h"
#include <sys/bitstring.h>
/*
* For managing VSI queue allocations
*/
/* Hardware only supports a limited number of resources in scattered mode */
#define ICE_MAX_SCATTERED_QUEUES 16
/* Use highest value to indicate invalid resource mapping */
#define ICE_INVALID_RES_IDX 0xFFFF
/*
* Structures
*/
/**
* @struct ice_resmgr
* @brief Resource manager
*
* Represent resource allocations using a bitstring, where bit zero represents
* the first resource. If a particular bit is set this indicates that the
* resource has been allocated and is not free.
*/
struct ice_resmgr {
bitstr_t *resources;
u16 num_res;
bool contig_only;
};
/**
* @enum ice_resmgr_alloc_type
* @brief resource manager allocation types
*
* Enumeration of possible allocation types that can be used when
* assigning resources. For now, SCATTERED is only used with
* managing queue allocations.
*/
enum ice_resmgr_alloc_type {
ICE_RESMGR_ALLOC_INVALID = 0,
ICE_RESMGR_ALLOC_CONTIGUOUS,
ICE_RESMGR_ALLOC_SCATTERED
};
/* Public resource manager allocation functions */
int ice_resmgr_init(struct ice_resmgr *resmgr, u16 num_res);
int ice_resmgr_init_contig_only(struct ice_resmgr *resmgr, u16 num_res);
void ice_resmgr_destroy(struct ice_resmgr *resmgr);
/* Public resource assignment functions */
int ice_resmgr_assign_contiguous(struct ice_resmgr *resmgr, u16 *idx, u16 num_res);
int ice_resmgr_assign_scattered(struct ice_resmgr *resmgr, u16 *idx, u16 num_res);
/* Release resources */
void ice_resmgr_release_map(struct ice_resmgr *resmgr, u16 *idx, u16 num_res);
#endif /* _ICE_RESMGR_H_ */

117
sys/dev/ice/ice_rss.h Normal file
View File

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_rss.h
* @brief default RSS values if kernel RSS is not enabled
*
* This header includes default definitions for RSS functionality if the
* kernel RSS interface is not enabled. This allows main driver code to avoid
* having to check the RSS ifdef throughout, but instead just use the RSS
* definitions, as they will fall back to these defaults when the kernel
* interface is disabled.
*/
#ifndef _ICE_RSS_H_
#define _ICE_RSS_H_
#ifdef RSS
// We have the kernel RSS interface available
#include <net/rss_config.h>
/* Make sure our key size buffer has enough space to store the kernel RSS key */
CTASSERT(ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE >= RSS_KEYSIZE);
#else
/* The kernel RSS interface is not enabled. Use suitable defaults for the RSS
* configuration functions.
*
* The RSS hash key will be a pre-generated random key.
* The number of buckets will just match the number of CPUs.
* The lookup table will be assigned using round-robin with no indirection.
* The RSS hash configuration will be set to suitable defaults.
*/
#define RSS_HASHTYPE_RSS_IPV4 (1 << 1) /* IPv4 2-tuple */
#define RSS_HASHTYPE_RSS_TCP_IPV4 (1 << 2) /* TCPv4 4-tuple */
#define RSS_HASHTYPE_RSS_IPV6 (1 << 3) /* IPv6 2-tuple */
#define RSS_HASHTYPE_RSS_TCP_IPV6 (1 << 4) /* TCPv6 4-tuple */
#define RSS_HASHTYPE_RSS_IPV6_EX (1 << 5) /* IPv6 2-tuple + ext hdrs */
#define RSS_HASHTYPE_RSS_TCP_IPV6_EX (1 << 6) /* TCPv6 4-tiple + ext hdrs */
#define RSS_HASHTYPE_RSS_UDP_IPV4 (1 << 7) /* IPv4 UDP 4-tuple */
#define RSS_HASHTYPE_RSS_UDP_IPV6 (1 << 9) /* IPv6 UDP 4-tuple */
#define RSS_HASHTYPE_RSS_UDP_IPV6_EX (1 << 10) /* IPv6 UDP 4-tuple + ext hdrs */
#define ICE_DEFAULT_RSS_HASH_CONFIG \
((u_int)(RSS_HASHTYPE_RSS_IPV4 | \
RSS_HASHTYPE_RSS_TCP_IPV4 | \
RSS_HASHTYPE_RSS_UDP_IPV4 | \
RSS_HASHTYPE_RSS_IPV6 | \
RSS_HASHTYPE_RSS_TCP_IPV6 | \
RSS_HASHTYPE_RSS_UDP_IPV6))
#define rss_getkey(key) ice_get_default_rss_key(key)
#define rss_getnumbuckets() (mp_ncpus)
#define rss_get_indirection_to_bucket(index) (index)
#define rss_gethashconfig() (ICE_DEFAULT_RSS_HASH_CONFIG)
/**
* rss_hash2bucket - Determine the bucket for a given hash value
* @hash_val: the hash value to use
* @hash_type: the type of the hash
* @bucket_id: on success, updated with the bucket
*
* This function simply verifies that the hash type is known. If it is, then
* we forward the hash value directly as the bucket id. If the hash type is
* unknown, we return -1.
*
* This is the simplest mechanism for converting a hash value into a bucket,
* and does not support any form of indirection table.
*/
static inline int
rss_hash2bucket(uint32_t hash_val, uint32_t hash_type, uint32_t *bucket_id)
{
switch (hash_type) {
case M_HASHTYPE_RSS_IPV4:
case M_HASHTYPE_RSS_TCP_IPV4:
case M_HASHTYPE_RSS_UDP_IPV4:
case M_HASHTYPE_RSS_IPV6:
case M_HASHTYPE_RSS_TCP_IPV6:
case M_HASHTYPE_RSS_UDP_IPV6:
*bucket_id = hash_val;
return (0);
default:
return (-1);
}
}
#endif /* !RSS */
#endif /* _ICE_COMMON_COMPAT_H_ */

121
sys/dev/ice/ice_sbq_cmd.h Normal file
View File

@ -0,0 +1,121 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_SBQ_CMD_H_
#define _ICE_SBQ_CMD_H_
/* This header file defines the Sideband Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
*/
/* Sideband Queue command structure and opcodes */
enum ice_sbq_opc {
/* Sideband Queue commands */
ice_sbq_opc_neigh_dev_req = 0x0C00,
ice_sbq_opc_neigh_dev_ev = 0x0C01
};
/* Sideband Queue descriptor. Indirect command
* and non posted
*/
struct ice_sbq_cmd_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 cmd_retval;
/* Opaque message data */
__le32 cookie_high;
__le32 cookie_low;
union {
__le16 cmd_len;
__le16 cmpl_len;
} param0;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
struct ice_sbq_evt_desc {
__le16 flags;
__le16 opcode;
__le16 datalen;
__le16 cmd_retval;
u8 data[24];
};
enum ice_sbq_msg_dev {
rmn_0 = 0x02,
rmn_1 = 0x03,
rmn_2 = 0x04,
cgu = 0x06
};
enum ice_sbq_msg_opcode {
ice_sbq_msg_rd = 0x00,
ice_sbq_msg_wr = 0x01
};
#define ICE_SBQ_MSG_FLAGS 0x40
#define ICE_SBQ_MSG_SBE_FBE 0x0F
struct ice_sbq_msg_req {
u8 dest_dev;
u8 src_dev;
u8 opcode;
u8 flags;
u8 sbe_fbe;
u8 func_id;
__le16 msg_addr_low;
__le32 msg_addr_high;
__le32 data;
};
struct ice_sbq_msg_cmpl {
u8 dest_dev;
u8 src_dev;
u8 opcode;
u8 flags;
__le32 data;
};
/* Internal struct */
struct ice_sbq_msg_input {
u8 dest_dev;
u8 opcode;
u16 msg_addr_low;
u32 msg_addr_high;
u32 data;
};
#endif /* _ICE_SBQ_CMD_H_ */

5541
sys/dev/ice/ice_sched.c Normal file

File diff suppressed because it is too large Load Diff

225
sys/dev/ice/ice_sched.h Normal file
View File

@ -0,0 +1,225 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_SCHED_H_
#define _ICE_SCHED_H_
#include "ice_common.h"
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_AGG_LAYER_OFFSET 6
#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
/* Burst size is a 12 bits register that is configured while creating the RL
* profile(s). MSB is a granularity bit and tells the granularity type
* 0 - LSB bits are in 64 bytes granularity
* 1 - LSB bits are in 1K bytes granularity
*/
#define ICE_64_BYTE_GRANULARITY 0
#define ICE_KBYTE_GRANULARITY BIT(11)
#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */
#define ICE_MAX_BURST_SIZE_ALLOWED \
((BIT(11) - 1) * 1024) /* In Bytes */
#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
((BIT(11) - 1) * 64) /* In Bytes */
#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
#define ICE_RL_PROF_ACCURACY_BYTES 128
#define ICE_RL_PROF_MULTIPLIER 10000
#define ICE_RL_PROF_TS_MULTIPLIER 32
#define ICE_RL_PROF_FRACTION 512
#define ICE_PSM_CLK_367MHZ_IN_HZ 367647059
#define ICE_PSM_CLK_416MHZ_IN_HZ 416666667
#define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
#define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
struct rl_profile_params {
u32 bw; /* in Kbps */
u16 rl_multiplier;
u16 wake_up_calc;
u16 rl_encode;
};
/* BW rate limit profile parameters list entry along
* with bandwidth maintained per layer in port info
*/
struct ice_aqc_rl_profile_info {
struct ice_aqc_rl_profile_elem profile;
struct LIST_ENTRY_TYPE list_entry;
u32 bw; /* requested */
u16 prof_id_ref; /* profile ID to node association ref count */
};
struct ice_sched_agg_vsi_info {
struct LIST_ENTRY_TYPE list_entry;
ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u16 vsi_handle;
/* save aggregator VSI TC bitmap */
ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
struct ice_sched_agg_info {
struct LIST_HEAD_TYPE agg_vsi_list;
struct LIST_ENTRY_TYPE list_entry;
ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
/* bw_t_info saves aggregator BW information */
struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
/* save aggregator TC bitmap */
ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
/* FW AQ command calls */
enum ice_status
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_generic_elem *buf,
u16 buf_size, struct ice_sq_cd *cd);
enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_data *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
/* Functions to cleanup scheduler SW DB */
void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
void ice_sched_clear_agg(struct ice_hw *hw);
/* Get a scheduling node from SW DB for given TEID */
struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
/* Add a scheduling node into SW DB for given info */
enum ice_status
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 vsi_handle);
bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node);
enum ice_status
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
/* Tx scheduler rate limiter functions */
enum ice_status
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, u8 tc_bitmap);
enum ice_status
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap);
enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id);
enum ice_status
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
enum ice_status
ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type);
enum ice_status
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
enum ice_status
ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type);
enum ice_status
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw);
enum ice_status
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle);
enum ice_status
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw);
enum ice_status
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id);
enum ice_status
ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
u8 *q_prio);
enum ice_status
ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc);
enum ice_status
ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
u16 num_vsis, u16 *vsi_handle_arr,
u8 *node_prio, u8 tc);
enum ice_status
ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc);
bool
ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
struct ice_sched_node *node);
enum ice_status
ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle);
enum ice_status
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
u32 bw);
enum ice_status
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw);
enum ice_status
ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
struct ice_sched_node *node, u8 priority);
enum ice_status
ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
#endif /* _ICE_SCHED_H_ */

193
sys/dev/ice/ice_sriov.c Normal file
View File

@ -0,0 +1,193 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "ice_common.h"
#include "ice_adminq_cmd.h"
#include "ice_sriov.h"
/**
* ice_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF ID to send msg
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cd: pointer to command details
*
* Send message to VF driver (0x0802) using mailbox
* queue and asynchronously sending message via
* ice_sq_send_cmd() function
*/
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
struct ice_aqc_pf_vf_msg *cmd;
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
cmd = &desc.params.virt;
cmd->id = CPU_TO_LE32(vfid);
desc.cookie_high = CPU_TO_LE32(v_opcode);
desc.cookie_low = CPU_TO_LE32(v_retval);
if (msglen)
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
/**
* ice_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cd: pointer to command details
*
* Send message to PF driver using mailbox queue. By default, this
* message is sent asynchronously, i.e. ice_sq_send_cmd()
* does not wait for completion before returning.
*/
enum ice_status
ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
enum ice_status v_retval, u8 *msg, u16 msglen,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_pf);
desc.cookie_high = CPU_TO_LE32(v_opcode);
desc.cookie_low = CPU_TO_LE32(v_retval);
if (msglen)
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
/**
* ice_conv_link_speed_to_virtchnl
* @adv_link_support: determines the format of the returned link speed
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
* If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
* expect the speed in Mbps.
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
if (adv_link_support)
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
speed = ICE_LINK_SPEED_10MBPS;
break;
case ICE_AQ_LINK_SPEED_100MB:
speed = ICE_LINK_SPEED_100MBPS;
break;
case ICE_AQ_LINK_SPEED_1000MB:
speed = ICE_LINK_SPEED_1000MBPS;
break;
case ICE_AQ_LINK_SPEED_2500MB:
speed = ICE_LINK_SPEED_2500MBPS;
break;
case ICE_AQ_LINK_SPEED_5GB:
speed = ICE_LINK_SPEED_5000MBPS;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = ICE_LINK_SPEED_10000MBPS;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = ICE_LINK_SPEED_20000MBPS;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = ICE_LINK_SPEED_25000MBPS;
break;
case ICE_AQ_LINK_SPEED_40GB:
speed = ICE_LINK_SPEED_40000MBPS;
break;
case ICE_AQ_LINK_SPEED_50GB:
speed = ICE_LINK_SPEED_50000MBPS;
break;
case ICE_AQ_LINK_SPEED_100GB:
speed = ICE_LINK_SPEED_100000MBPS;
break;
default:
speed = ICE_LINK_SPEED_UNKNOWN;
break;
}
else
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
case ICE_AQ_LINK_SPEED_100MB:
speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
break;
case ICE_AQ_LINK_SPEED_1000MB:
case ICE_AQ_LINK_SPEED_2500MB:
case ICE_AQ_LINK_SPEED_5GB:
speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
case ICE_AQ_LINK_SPEED_50GB:
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
default:
speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
break;
}
return speed;
}

48
sys/dev/ice/ice_sriov.h Normal file
View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
#include "ice_common.h"
enum ice_status
ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
enum ice_status v_retval, u8 *msg, u16 msglen,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
#endif /* _ICE_SRIOV_H_ */

75
sys/dev/ice/ice_status.h Normal file
View File

@ -0,0 +1,75 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_STATUS_H_
#define _ICE_STATUS_H_
/* Error Codes */
enum ice_status {
ICE_SUCCESS = 0,
/* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
ICE_ERR_RESET_FAILED = -9,
ICE_ERR_FW_API_VER = -10,
ICE_ERR_NO_MEMORY = -11,
ICE_ERR_CFG = -12,
ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
ICE_ERR_FW_DDP_MISMATCH = -20,
/* NVM specific error codes: Range -50..-59 */
ICE_ERR_NVM = -50,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
/* ARQ/ASQ specific error codes. Range -100..-109 */
ICE_ERR_AQ_ERROR = -100,
ICE_ERR_AQ_TIMEOUT = -101,
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
};
#endif /* _ICE_STATUS_H_ */

1034
sys/dev/ice/ice_strings.c Normal file

File diff suppressed because it is too large Load Diff

4126
sys/dev/ice/ice_switch.c Normal file

File diff suppressed because it is too large Load Diff

468
sys/dev/ice/ice_switch.h Normal file
View File

@ -0,0 +1,468 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_SWITCH_H_
#define _ICE_SWITCH_H_
#include "ice_common.h"
#include "ice_protocol_type.h"
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_MAX_SW 256
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_FLTR_RX BIT(0)
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
/* Worst case buffer length for ice_aqc_opc_get_res_alloc */
#define ICE_MAX_RES_TYPES 0x80
#define ICE_AQ_GET_RES_ALLOC_BUF_LEN \
(ICE_MAX_RES_TYPES * sizeof(struct ice_aqc_get_res_resp_elem))
#define ICE_VSI_INVAL_ID 0xFFFF
#define ICE_INVAL_Q_HANDLE 0xFFFF
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
u16 vsis_allocd;
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
u8 vf_num;
u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
/* This is to be used by add/update mirror rule Admin Queue command */
struct ice_mir_rule_buf {
u16 vsi_idx; /* VSI index */
/* For each VSI, user can specify whether corresponding VSI
* should be added/removed to/from mirror rule
*
* add mirror rule: this should always be TRUE.
* update mirror rule: add(true) or remove(false) VSI to/from
* mirror rule
*/
u8 add;
};
/* Switch recipe ID enum values are specific to hardware */
enum ice_sw_lkup_type {
ICE_SW_LKUP_ETHERTYPE = 0,
ICE_SW_LKUP_MAC = 1,
ICE_SW_LKUP_MAC_VLAN = 2,
ICE_SW_LKUP_PROMISC = 3,
ICE_SW_LKUP_VLAN = 4,
ICE_SW_LKUP_DFLT = 5,
ICE_SW_LKUP_ETHERTYPE_MAC = 8,
ICE_SW_LKUP_PROMISC_VLAN = 9,
ICE_SW_LKUP_LAST
};
/* type of filter src ID */
enum ice_src_id {
ICE_SRC_ID_UNKNOWN = 0,
ICE_SRC_ID_VSI,
ICE_SRC_ID_QUEUE,
ICE_SRC_ID_LPORT,
};
struct ice_fltr_info {
/* Look up information: how to look up packet */
enum ice_sw_lkup_type lkup_type;
/* Forward action: filter action to do after lookup */
enum ice_sw_fwd_act_type fltr_act;
/* rule ID returned by firmware once filter rule is created */
u16 fltr_rule_id;
u16 flag;
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
enum ice_src_id src_id;
union {
struct {
u8 mac_addr[ETH_ALEN];
} mac;
struct {
u8 mac_addr[ETH_ALEN];
u16 vlan_id;
} mac_vlan;
struct {
u16 vlan_id;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
* ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be
* passed in as filter.
*/
struct {
u16 ethertype;
u8 mac_addr[ETH_ALEN]; /* optional */
} ethertype_mac;
} l_data; /* Make sure to zero out the memory of l_data before using
* it or only set the data associated with lookup match
* rest everything should be zero
*/
/* Depending on filter action */
union {
/* queue ID in case of ICE_FWD_TO_Q and starting
* queue ID in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
u16 hw_vsi_id:10;
u16 vsi_id:10;
u16 vsi_list_id:10;
} fwd_id;
/* Sw VSI handle */
u16 vsi_handle;
/* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
* determines the range of queues the packet needs to be forwarded to.
* Note that qgrp_size must be set to a power of 2.
*/
u8 qgrp_size;
/* Rule creations populate these indicators basing on the switch type */
u8 lb_en; /* Indicate if packet can be looped back */
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
union ice_prot_hdr h_u; /* Header values */
union ice_prot_hdr m_u; /* Mask of header values to match */
};
struct ice_sw_act_ctrl {
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
u16 flag;
enum ice_sw_fwd_act_type fltr_act;
/* Depending on filter action */
union {
/* This is a queue ID in case of ICE_FWD_TO_Q and starting
* queue ID in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
u16 vsi_id:10;
u16 hw_vsi_id:10;
u16 vsi_list_id:10;
} fwd_id;
/* software VSI handle */
u16 vsi_handle;
u8 qgrp_size;
};
struct ice_rule_query_data {
/* Recipe ID for which the requested rule was added */
u16 rid;
/* Rule ID that was added or is supposed to be removed */
u16 rule_id;
/* vsi_handle for which Rule was added or is supposed to be removed */
u16 vsi_handle;
};
struct ice_adv_rule_info {
enum ice_sw_tunnel_type tun_type;
struct ice_sw_act_ctrl sw_act;
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
};
/* A collection of one or more four word recipe */
struct ice_sw_recipe {
/* For a chained recipe the root recipe is what should be used for
* programming rules
*/
u8 is_root;
u8 root_rid;
u8 recp_created;
/* Number of extraction words */
u8 n_ext_words;
/* Protocol ID and Offset pair (extraction word) to describe the
* recipe
*/
struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
u16 word_masks[ICE_MAX_CHAIN_WORDS];
/* if this recipe is a collection of other recipe */
u8 big_recp;
/* if this recipe is part of another bigger recipe then chain index
* corresponding to this recipe
*/
u8 chain_idx;
/* if this recipe is a collection of other recipe then count of other
* recipes and recipe IDs of those recipes
*/
u8 n_grp_count;
/* Bit map specifying the IDs associated with this group of recipe */
ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
enum ice_sw_tunnel_type tun_type;
/* List of type ice_fltr_mgmt_list_entry or adv_rule */
u8 adv_rule;
struct LIST_HEAD_TYPE filt_rules;
struct LIST_HEAD_TYPE filt_replay_rules;
struct ice_lock filt_rule_lock; /* protect filter rule structure */
/* Profiles this recipe should be associated with */
struct LIST_HEAD_TYPE fv_list;
/* Profiles this recipe is associated with */
u8 num_profs, *prof_ids;
/* Possible result indexes are 44, 45, 46 and 47 */
#define ICE_POSSIBLE_RES_IDX 0x0000F00000000000ULL
ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS);
/* This allows user to specify the recipe priority.
* For now, this becomes 'fwd_priority' when recipe
* is created, usually recipes can have 'fwd' and 'join'
* priority.
*/
u8 priority;
struct LIST_HEAD_TYPE rg_list;
/* AQ buffer associated with this recipe */
struct ice_aqc_recipe_data_elem *root_buf;
/* This struct saves the fv_words for a given lookup */
struct ice_prot_lkup_ext lkup_exts;
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
struct ice_vsi_list_map_info {
struct LIST_ENTRY_TYPE list_entry;
ice_declare_bitmap(vsi_map, ICE_MAX_VSI);
u16 vsi_list_id;
/* counter to track how many rules are reusing this VSI list */
u16 ref_cnt;
};
struct ice_fltr_list_entry {
struct LIST_ENTRY_TYPE list_entry;
enum ice_status status;
struct ice_fltr_info fltr_info;
};
/**
* enum ice_fltr_marker - Marker for syncing OS and driver filter lists
* @ICE_FLTR_NOT_FOUND: initial state, indicates filter has not been found
* @ICE_FLTR_FOUND: set when a filter has been found in both lists
*
* This enumeration is used to help sync an operating system provided filter
* list with the filters previously added.
*
* This is required for FreeBSD because the operating system does not provide
* individual indications of whether a filter has been added or deleted, but
* instead just notifies the driver with the entire new list.
*
* To use this marker state, the driver shall initially reset all filters to
* the ICE_FLTR_NOT_FOUND state. Then, for each filter in the OS list, it
* shall search the driver list for the filter. If found, the filter state
* will be set to ICE_FLTR_FOUND. If not found, that filter will be added.
* Finally, the driver shall search the internal filter list for all filters
* still marked as ICE_FLTR_NOT_FOUND and remove them.
*/
enum ice_fltr_marker {
ICE_FLTR_NOT_FOUND,
ICE_FLTR_FOUND,
};
/* This defines an entry in the list that maintains MAC or VLAN membership
* to HW list mapping, since multiple VSIs can subscribe to the same MAC or
* VLAN. As an optimization the VSI list should be created only when a
* second VSI becomes a subscriber to the same MAC address. VSI lists are always
* used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
/* back pointer to VSI list ID to VSI list mapping */
struct ice_vsi_list_map_info *vsi_list_info;
u16 vsi_count;
#define ICE_INVAL_LG_ACT_INDEX 0xffff
u16 lg_act_idx;
#define ICE_INVAL_SW_MARKER_ID 0xffff
u16 sw_marker_id;
struct LIST_ENTRY_TYPE list_entry;
struct ice_fltr_info fltr_info;
#define ICE_INVAL_COUNTER_ID 0xff
u8 counter_index;
enum ice_fltr_marker marker;
};
struct ice_adv_fltr_mgmt_list_entry {
struct LIST_ENTRY_TYPE list_entry;
struct ice_adv_lkup_elem *lkups;
struct ice_adv_rule_info rule_info;
u16 lkups_cnt;
struct ice_vsi_list_map_info *vsi_list_info;
u16 vsi_count;
};
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0x1,
ICE_PROMISC_UCAST_TX = 0x2,
ICE_PROMISC_MCAST_RX = 0x4,
ICE_PROMISC_MCAST_TX = 0x8,
ICE_PROMISC_BCAST_RX = 0x10,
ICE_PROMISC_BCAST_TX = 0x20,
ICE_PROMISC_VLAN_RX = 0x40,
ICE_PROMISC_VLAN_TX = 0x80,
};
/* VSI related commands */
enum ice_status
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
enum ice_status
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
enum ice_status
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
enum ice_status
ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
u16 count, struct ice_mir_rule_buf *mr_buf,
struct ice_sq_cd *cd, u16 *rule_id);
enum ice_status
ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
u32 *ctl_bitmask);
enum ice_status
ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
u32 ctl_bitmask);
/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
enum ice_status
ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id);
enum ice_status
ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id);
/* Switch/bridge related commands */
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status
ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
u16 *counter_id);
enum ice_status
ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id);
enum ice_status
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
u16 buf_size, struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_get_allocd_res_desc_resp *buf,
u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
struct ice_sq_cd *cd);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list);
enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list);
void ice_rem_all_sw_rules_info(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list);
enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list);
enum ice_status
ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
u16 sw_marker);
enum ice_status
ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
/* Promisc/defport setup for VSIs */
enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction);
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
enum ice_status
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
/* Get VSIs Promisc/defport settings */
enum ice_status
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid);
enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid);
enum ice_status ice_replay_all_fltr(struct ice_hw *hw);
enum ice_status
ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */

1072
sys/dev/ice/ice_type.h Normal file

File diff suppressed because it is too large Load Diff

2874
sys/dev/ice/if_ice_iflib.c Normal file

File diff suppressed because it is too large Load Diff

923
sys/dev/ice/virtchnl.h Normal file
View File

@ -0,0 +1,923 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
/* Description:
* This header file describes the VF-PF communication protocol used
* by the drivers for all devices starting from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally.
* The Firmware copies the cookie fields when sending messages between the
* PF and VF, but uses all other fields internally. Due to this limitation,
* we must send all messages as "indirect", i.e. using an external buffer.
*
* All the VSI indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value
* is of status_code type, defined in the shared type.h.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
* PF driver, then request a reset, then get resources, then configure
* queues and interrupts. After these operations are complete, the VF
* driver may start its queues, optionally add MAC and VLAN filters, and
* process traffic.
*/
/* START GENERIC DEFINES
* Need to ensure the following enums and defines hold the same meaning and
* value in current and future projects
*/
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
VIRTCHNL_STATUS_ERR_PARAM = -5,
VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
/* Backward compatibility */
#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
/* deprecated with AVF 1.0 */
enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure.
*/
enum virtchnl_ops {
/* The PF sends status change events to VFs using
* the VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops.
* Use of "advanced opcode" features must be negotiated as part of capabilities
* exchange and are not considered part of base mode feature set.
*/
VIRTCHNL_OP_UNKNOWN = 0,
VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
VIRTCHNL_OP_RESET_VF = 2,
VIRTCHNL_OP_GET_VF_RESOURCES = 3,
VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
VIRTCHNL_OP_ENABLE_QUEUES = 8,
VIRTCHNL_OP_DISABLE_QUEUES = 9,
VIRTCHNL_OP_ADD_ETH_ADDR = 10,
VIRTCHNL_OP_DEL_ETH_ADDR = 11,
VIRTCHNL_OP_ADD_VLAN = 12,
VIRTCHNL_OP_DEL_VLAN = 13,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
/* opcode 19 is reserved */
/* opcodes 20, 21, and 22 are reserved */
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
VIRTCHNL_OP_ENABLE_CHANNELS = 30,
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
/* opcodes 34, 35, 36, 37 and 38 are reserved */
/* opcodes 39, 40, 41 and 42 are reserved */
/* opcode 42 is reserved */
};
/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
* structure/union is not of the correct size, otherwise it creates an enum
* that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
*/
struct virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
enum virtchnl_status_code v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */
};
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1.
* If there is a major version mismatch, then the VF cannot operate.
* If there is a minor version mismatch, then the VF can operate but should
* add a warning to the system log.
*
* This enum element MUST always be specified as == 1, regardless of other
* changes in the API. The PF must always respond to this message without
* error regardless of version mismatch.
*/
#define VIRTCHNL_VERSION_MAJOR 1
#define VIRTCHNL_VERSION_MINOR 1
#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct virtchnl_version_info {
u32 major;
u32 minor;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
/* VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized
* after this operation.
*
* When reset is complete, PF must ensure that all queues in all VSIs associated
* with the VF are stopped, all queue configurations in the HMC are set to 0,
* and all MAC and VLAN filters (except the default MAC address) on all VSIs
* are cleared.
*/
/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
* vsi_type should always be 6 for backward compatibility. Add other fields
* as needed.
*/
enum virtchnl_vsi_type {
VIRTCHNL_VSI_TYPE_INVALID = 0,
VIRTCHNL_VSI_SRIOV = 6,
};
/* VIRTCHNL_OP_GET_VF_RESOURCES
* Version 1.0 VF sends this request to PF with no parameters
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* virtchnl_vf_resource and one or more
* virtchnl_vsi_resource structures.
*/
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum virtchnl_vsi_type vsi_type;
u16 qset_handle;
u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
/* 0X40000000 is reserved */
/* 0X80000000 is reserved */
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
u16 max_vectors;
u16 max_mtu;
u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
struct virtchnl_vsi_resource vsi_res[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of virtchnl_txq_info.
* PF configures requested queue and returns a status code.
*/
/* Tx queue config info */
struct virtchnl_txq_info {
u16 vsi_id;
u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled; /* deprecated with AVF 1.0 */
u64 dma_ring_addr;
u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
* PF configures requested queue and returns a status code. The
* crc_disable flag disables CRC stripping on the VF. Setting
* the crc_disable flag to 1 will disable CRC stripping for each
* queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
* offload must have been set prior to sending this info or the PF
* will ignore the request. This flag should be set the same for
* all of the queues for a VF.
*/
/* Rx queue config info */
struct virtchnl_rxq_info {
u16 vsi_id;
u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size;
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
u8 crc_disable;
u8 pad1[3];
u64 dma_ring_addr;
enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
u32 pad2;
};
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
* NOTE: The VF is not required to configure all queues in a single request.
* It may send multiple messages. PF drivers must correctly handle all VF
* requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
struct virtchnl_txq_info txq;
struct virtchnl_rxq_info rxq;
};
VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
struct virtchnl_queue_pair_info qpair[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
* additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the
* maximum number it is able to support. If the request is successful, PF will
* then reset the VF to institute required changes.
*/
/* VF resource request */
struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0. The VF may not request
* that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
* NOTE: due to hardware requirements, all active queues (both TX and RX)
* should be mapped to interrupts, even if the driver intends to operate
* only in polling mode. In this case the interrupt may be disabled, but
* the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
u16 vector_id;
u16 rxq_map;
u16 txq_map;
u16 rxitr_idx;
u16 txitr_idx;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
struct virtchnl_vector_map vecmap[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
* NOTE: The VF is not required to enable/disable all queues in a single
* request. It may send multiple messages.
* PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
u16 pad;
u32 rx_queues;
u32 tx_queues;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
/* VIRTCHNL_OP_ADD_ETH_ADDR
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
* PF adds the filters and returns status.
*/
/* VIRTCHNL_OP_DEL_ETH_ADDR
* VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI.
* PF removes the filters and returns status.
*/
struct virtchnl_ether_addr {
u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
u8 pad[2];
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
struct virtchnl_ether_addr list[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
/* VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
u16 vlan_id[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled.
*/
struct virtchnl_promisc_info {
u16 vsi_id;
u16 flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
#define FLAG_VF_UNICAST_PROMISC 0x00000001
#define FLAG_VF_MULTICAST_PROMISC 0x00000002
/* VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses
* the virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
* PF replies with struct virtchnl_eth_stats in an external buffer.
*/
struct virtchnl_eth_stats {
u64 rx_bytes; /* received bytes */
u64 rx_unicast; /* received unicast pkts */
u64 rx_multicast; /* received multicast pkts */
u64 rx_broadcast; /* received broadcast pkts */
u64 rx_discards;
u64 rx_unknown_protocol;
u64 tx_bytes; /* transmitted bytes */
u64 tx_unicast; /* transmitted unicast pkts */
u64 tx_multicast; /* transmitted multicast pkts */
u64 tx_broadcast; /* transmitted broadcast pkts */
u64 tx_discards;
u64 tx_errors;
};
/* VIRTCHNL_OP_CONFIG_RSS_KEY
* VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the RSS fields in
* the VF resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF.
*/
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
struct virtchnl_rss_hena {
u64 hena;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
/* This is used by PF driver to enforce how many channels can be supported.
* When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
* PF driver will allow only max 4 channels
*/
#define VIRTCHNL_MAX_ADQ_CHANNELS 4
#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
/* VIRTCHNL_OP_ENABLE_CHANNELS
* VIRTCHNL_OP_DISABLE_CHANNELS
* VF sends these messages to enable or disable channels based on
* the user specified queue count and queue offset for each traffic class.
* This struct encompasses all the information that the PF needs from
* VF to create a channel.
*/
struct virtchnl_channel_info {
u16 count; /* number of queues in a channel */
u16 offset; /* queues in a channel start from 'offset' */
u32 pad;
u64 max_tx_rate;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
struct virtchnl_tc_info {
u32 num_tc;
u32 pad;
struct virtchnl_channel_info list[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
/* VIRTCHNL_ADD_CLOUD_FILTER
* VIRTCHNL_DEL_CLOUD_FILTER
* VF sends these messages to add or delete a cloud filter based on the
* user specified match and action filters. These structures encompass
* all the information that the PF needs from the VF to add/delete a
* cloud filter.
*/
struct virtchnl_l4_spec {
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
/* vlan_prio is part of this 16 bit field even from OS perspective
* vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
* in future, when decided to offload vlan_prio, pass that information
* as part of the "vlan_id" field, Bit14..12
*/
__be16 vlan_id;
__be16 pad; /* reserved for future use */
__be32 src_ip[4];
__be32 dst_ip[4];
__be16 src_port;
__be16 dst_port;
};
VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
union virtchnl_flow_spec {
struct virtchnl_l4_spec tcp_spec;
u8 buffer[128]; /* reserved for future use */
};
VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
enum virtchnl_action {
/* action types */
VIRTCHNL_ACTION_DROP = 0,
VIRTCHNL_ACTION_TC_REDIRECT,
};
enum virtchnl_flow_type {
/* flow types */
VIRTCHNL_TCP_V4_FLOW = 0,
VIRTCHNL_TCP_V6_FLOW,
VIRTCHNL_UDP_V4_FLOW,
VIRTCHNL_UDP_V6_FLOW,
};
struct virtchnl_filter {
union virtchnl_flow_spec data;
union virtchnl_flow_spec mask;
enum virtchnl_flow_type flow_type;
enum virtchnl_action action;
u32 action_meta;
u8 field_flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
* messages in response to this one.
*/
enum virtchnl_event_codes {
VIRTCHNL_EVENT_UNKNOWN = 0,
VIRTCHNL_EVENT_LINK_CHANGE,
VIRTCHNL_EVENT_RESET_IMPENDING,
VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define PF_EVENT_SEVERITY_INFO 0
#define PF_EVENT_SEVERITY_ATTENTION 1
#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
* get the speed and link information. The ability to understand
* new speeds is indicated by setting the capability flag
* VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
* in virtchnl_vf_resource struct and can be used to determine
* which link event struct to use below.
*/
struct {
enum virtchnl_link_speed link_speed;
u8 link_status;
} link_event;
struct {
/* link_speed provided in Mbps */
u32 link_speed;
u8 link_status;
} link_event_adv;
} event_data;
int severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
/* Since VF messages are limited by u16 size, precalculate the maximum possible
* values of nested elements in virtchnl structures that virtual channel can
* possibly handle in a single message.
*/
enum virtchnl_vector_limits {
VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
sizeof(struct virtchnl_queue_pair_info),
VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
sizeof(struct virtchnl_vector_map),
VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr),
VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16),
VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
sizeof(struct virtchnl_channel_info),
};
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum virtchnl_vfr_states {
VIRTCHNL_VFR_INPROGRESS = 0,
VIRTCHNL_VFR_COMPLETED,
VIRTCHNL_VFR_VFACTIVE,
};
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
* @v_opcode: Opcode for the message
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* validate msg format against struct for each opcode
*/
static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
int valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct virtchnl_version_info);
break;
case VIRTCHNL_OP_RESET_VF:
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(ver))
valid_len = sizeof(u32);
break;
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct virtchnl_txq_info);
break;
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
err_msg_format = true;
break;
}
valid_len += (vqc->num_queue_pairs *
sizeof(struct
virtchnl_queue_pair_info));
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
if (vimi->num_vectors == 0 || vimi->num_vectors >
VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
err_msg_format = true;
break;
}
valid_len += (vimi->num_vectors *
sizeof(struct virtchnl_vector_map));
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
valid_len = sizeof(struct virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
if (veal->num_elements == 0 || veal->num_elements >
VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
err_msg_format = true;
break;
}
valid_len += veal->num_elements *
sizeof(struct virtchnl_ether_addr);
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
if (vfl->num_elements == 0 || vfl->num_elements >
VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
err_msg_format = true;
break;
}
valid_len += vfl->num_elements * sizeof(u16);
}
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct virtchnl_promisc_info);
break;
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
valid_len = sizeof(struct virtchnl_rss_key);
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
if (vrk->key_len == 0) {
/* zero length is allowed as input */
break;
}
valid_len += vrk->key_len - 1;
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
valid_len = sizeof(struct virtchnl_rss_lut);
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
if (vrl->lut_entries == 0) {
/* zero entries is allowed as input */
break;
}
valid_len += vrl->lut_entries - 1;
}
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
valid_len = sizeof(struct virtchnl_tc_info);
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
if (vti->num_tc == 0 || vti->num_tc >
VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
err_msg_format = true;
break;
}
valid_len += (vti->num_tc - 1) *
sizeof(struct virtchnl_channel_info);
}
break;
case VIRTCHNL_OP_DISABLE_CHANNELS:
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER:
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
}
#endif /* _VIRTCHNL_H_ */

View File

@ -0,0 +1,449 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _VIRTCHNL_INLINE_IPSEC_H_
#define _VIRTCHNL_INLINE_IPSEC_H_
#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3
#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16
#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128
#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
#define VIRTCHNL_IPSEC_SELECTED_SA_DESTROY 0
#define VIRTCHNL_IPSEC_ALL_SA_DESTROY 1
/* crypto type */
#define VIRTCHNL_AUTH 1
#define VIRTCHNL_CIPHER 2
#define VIRTCHNL_AEAD 3
/* algorithm type */
/* Hash Algorithm */
#define VIRTCHNL_NO_ALG 0 /* NULL algorithm */
#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */
#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */
#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */
#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */
#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */
#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */
#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */
#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
/* Cipher Algorithm */
#define VIRTCHNL_3DES_CBC 15 /* Triple DES algorithm in CBC mode */
#define VIRTCHNL_AES_CBC 16 /* AES algorithm in CBC mode */
#define VIRTCHNL_AES_CTR 17 /* AES algorithm in Counter mode */
/* AEAD Algorithm */
#define VIRTCHNL_AES_CCM 18 /* AES algorithm in CCM mode */
#define VIRTCHNL_AES_GCM 19 /* AES algorithm in GCM mode */
#define VIRTCHNL_CHACHA20_POLY1305 20 /* algorithm of ChaCha20-Poly1305 */
/* protocol type */
#define VIRTCHNL_PROTO_ESP 1
#define VIRTCHNL_PROTO_AH 2
#define VIRTCHNL_PROTO_RSVD1 3
/* sa mode */
#define VIRTCHNL_SA_MODE_TRANSPORT 1
#define VIRTCHNL_SA_MODE_TUNNEL 2
#define VIRTCHNL_SA_MODE_TRAN_TUN 3
#define VIRTCHNL_SA_MODE_UNKNOWN 4
/* sa direction */
#define VIRTCHNL_DIR_INGRESS 1
#define VIRTCHNL_DIR_EGRESS 2
#define VIRTCHNL_DIR_INGRESS_EGRESS 3
/* sa termination */
#define VIRTCHNL_TERM_SOFTWARE 1
#define VIRTCHNL_TERM_HARDWARE 2
/* sa ip type */
#define VIRTCHNL_IPV4 1
#define VIRTCHNL_IPV6 2
/* Not all valid, if certain field is invalid, set 1 for all bits */
struct virtchnl_algo_cap {
u32 algo_type;
u16 block_size;
u16 min_key_size;
u16 max_key_size;
u16 inc_key_size;
u16 min_iv_size;
u16 max_iv_size;
u16 inc_iv_size;
u16 min_digest_size;
u16 max_digest_size;
u16 inc_digest_size;
u16 min_aad_size;
u16 max_aad_size;
u16 inc_aad_size;
};
/* vf record the capability of crypto from the virtchnl */
struct virtchnl_sym_crypto_cap {
u8 crypto_type;
u8 algo_cap_num;
struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
};
/* VIRTCHNL_OP_GET_IPSEC_CAP
* VF pass virtchnl_ipsec_cap to PF
* and PF return capability of ipsec from virtchnl.
*/
struct virtchnl_ipsec_cap {
/* max number of SA per VF */
u16 max_sa_num;
/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
u8 virtchnl_protocol_type;
/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
u8 virtchnl_sa_mode;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* type of esn - !0:enable/0:disable */
u8 esn_enabled;
/* type of udp_encap - !0:enable/0:disable */
u8 udp_encap_enabled;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 termination_mode;
/* SA index mode - !0:enable/0:disable */
u8 sa_index_sw_enabled;
/* auditing mode - !0:enable/0:disable */
u8 audit_enabled;
/* lifetime byte limit - !0:enable/0:disable */
u8 byte_limit_enabled;
/* drop on authentication failure - !0:enable/0:disable */
u8 drop_on_auth_fail_enabled;
/* anti-replay window check - !0:enable/0:disable */
u8 arw_check_enabled;
/* number of supported crypto capability */
u8 crypto_cap_num;
/* descriptor ID */
u16 desc_id;
/* crypto capabilities */
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
};
/* using desc_id to record the format of rx descriptor */
struct virtchnl_rx_desc_fmt {
u16 desc_id;
};
/* using desc_id to record the format of tx descriptor */
struct virtchnl_tx_desc_fmt {
u8 desc_num;
u16 desc_ids[VIRTCHNL_IPSEC_MAX_TX_DESC_NUM];
};
/* configuration of crypto function */
struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;
u32 algo_type;
/* Length of valid IV data. */
u16 iv_len;
/* Length of digest */
u16 digest_len;
/* The length of the symmetric key */
u16 key_len;
/* key data buffer */
u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
};
struct virtchnl_ipsec_sym_crypto_cfg {
struct virtchnl_ipsec_crypto_cfg_item
items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
};
/* VIRTCHNL_OP_IPSEC_SA_CREATE
* VF send this SA configuration to PF using virtchnl;
* PF create SA as configuration and PF driver will return
* an unique index (sa_idx) for the created SA.
*/
struct virtchnl_ipsec_sa_cfg {
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* type of outer IP - IPv4/IPv6 */
u8 virtchnl_ip_type;
/* type of esn - !0:enable/0:disable */
u8 esn_enabled;
/* udp encap - !0:enable/0:disable */
u8 udp_encap_enabled;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* reserved */
u8 reserved1;
/* SA security parameter index */
u32 spi;
/* outer src ip address */
u8 src_addr[16];
/* outer dst ip address */
u8 dst_addr[16];
/* SA salt */
u32 salt;
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* When enabled, sa_index must be valid */
u8 sa_index_en;
/* SA index when sa_index_en is true */
u32 sa_index;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When enabled, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-reply window check - enable/disable
* When enabled, arw_size must be valid.
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* no ip offload mode - enable/disable
* When enabled, ip type and address must not be valid.
*/
u8 no_ip_offload_en;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* crypto configuration */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
/* VIRTCHNL_OP_IPSEC_SA_UPDATE
* VF send configuration of index of SA to PF
* PF will update SA according to configuration
*/
struct virtchnl_ipsec_sa_update {
u32 sa_index; /* SA to update */
u32 esn_hi; /* high 32 bits of esn */
u32 esn_low; /* low 32 bits of esn */
};
/* VIRTCHNL_OP_IPSEC_SA_DESTROY
* VF send configuration of index of SA to PF
* PF will destroy SA according to configuration
* flag bitmap indicate all SA or just selected SA will
* be destroyed
*/
struct virtchnl_ipsec_sa_destroy {
/* VIRTCHNL_SELECTED_SA_DESTROY: selected SA will be destroyed.
* VIRTCHNL_ALL_SA_DESTROY: all SA will be destroyed.
*/
u8 flag;
u8 pad1; /* pading */
u16 pad2; /* pading */
/* selected SA index */
u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
};
/* VIRTCHNL_OP_IPSEC_SA_READ
* VF send this SA configuration to PF using virtchnl;
* PF read SA and will return configuration for the created SA.
*/
struct virtchnl_ipsec_sa_read {
/* SA valid - invalid/valid */
u8 valid;
/* SA active - inactive/active */
u8 active;
/* SA SN rollover - not_rollover/rollover */
u8 sn_rollover;
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When set to limit, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-replay window check - enable/disable
* When set to check, arw_size, arw_top, and arw must be valid
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* reserved */
u8 reserved1;
/* top of anti-replay-window */
u64 arw_top;
/* anti-replay-window */
u8 arw[16];
/* packets processed */
u64 packets_processed;
/* bytes processed */
u64 bytes_processed;
/* packets dropped */
u32 packets_dropped;
/* authentication failures */
u32 auth_fails;
/* ARW check failures */
u32 arw_fails;
/* type of esn - enable/disable */
u8 esn;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* SA security parameter index */
u32 spi;
/* SA salt */
u32 salt;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* crypto configuration. Salt and keys are set to 0 */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */

View File

@ -142,6 +142,8 @@ SUBDIR= \
${_iavf} \
${_ibcore} \
${_ichwd} \
${_ice} \
${_ice_ddp} \
${_ida} \
if_bridge \
if_disc \
@ -532,6 +534,13 @@ _rtwnfw= rtwnfw
_cxgbe= cxgbe
.endif
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "arm64"
_ice= ice
.if ${MK_SOURCELESS_UCODE} != "no"
_ice_ddp= ice_ddp
.endif
.endif
# These rely on 64bit atomics
.if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \
${MACHINE_CPUARCH} != "mips"

15
sys/modules/ice/Makefile Normal file
View File

@ -0,0 +1,15 @@
#$FreeBSD$
.PATH: ${SRCTOP}/sys/dev/ice
KMOD = if_ice
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h
SRCS += ice_lib.c ice_osdep.c ice_resmgr.c ice_strings.c
SRCS += ice_iflib_recovery_txrx.c ice_iflib_txrx.c if_ice_iflib.c
# Shared source
SRCS += ice_common.c ice_controlq.c ice_dcb.c ice_flex_pipe.c ice_flow.c
SRCS += ice_nvm.c ice_sched.c ice_sriov.c ice_switch.c
.include <bsd.kmod.mk>

View File

@ -0,0 +1,24 @@
# $FreeBSD$
# Find the highest version DDP package file and build a .ko for it
PKG_FILE != find ${SRCTOP}/sys/contrib/dev/ice -name 'ice-*.pkg' | sort -V | tail -1
.if empty(PKG_FILE)
.error Unable to locate the DDP package binary file
.endif
.info Found ${PKG_FILE}
PKG_NAME != basename ${PKG_FILE}
PKG_VER_STR != basename -s .pkg ${PKG_NAME}
PKG_VER_STR := ${PKG_VER_STR:S/^ice-//}
PKG_VER_STR := ${PKG_VER_STR:S/-signed$//}
PKG_VER_MAJ != echo ${PKG_VER_STR} | cut -d. -f1
PKG_VER_MIN != echo ${PKG_VER_STR} | cut -d. -f2
PKG_VER_UPD != echo ${PKG_VER_STR} | cut -d. -f3
PKG_VER_DFT != echo ${PKG_VER_STR} | cut -d. -f4
PKG_VERSION != printf "0x%02x%02x%02x%02x" "${PKG_VER_MAJ}" "${PKG_VER_MIN}" "${PKG_VER_UPD}" "${PKG_VER_DFT}"
KMOD := ice_ddp
FIRMWS := ${PKG_FILE}:ice_ddp:${PKG_VERSION}
.include <bsd.kmod.mk>

View File

@ -0,0 +1,21 @@
# Doxyfile 1.5.2
# $FreeBSD$
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
PROJECT_NAME = "FreeBSD kernel ice device code"
OUTPUT_DIRECTORY = $(DOXYGEN_DEST_PATH)/dev_ice/
EXTRACT_ALL = YES # TODO: ice has @file comments.. disable this?
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
INPUT = $(DOXYGEN_SRC_PATH)/dev/ice/ \
$(NOTREVIEWED)
GENERATE_TAGFILE = dev_ice/dev_ice.tag
@INCLUDE_PATH = $(DOXYGEN_INCLUDE_PATH)
@INCLUDE = common-Doxyfile