ixl/iavf(4): Change ixlv to iavf and update it to use iflib(9)

Finishes the conversion of the 40Gb Intel Ethernet drivers to iflib(9) for
FreeBSD 12.0, and fixes numerous bugs in both ixl(4) and iavf(4).

This commit also re-adds the VF driver to GENERIC since it now compiles and
functions.

The VF driver name was changed from ixlv(4) to iavf(4) because the VF driver is
now intended to be used with future products, not just with Fortville/Fort Park
VFs.

A man page update that documents these drivers is forthcoming in a separate
commit.

Reviewed by:    sbruno@, kbowling@
Tested by:      jeffrey.e.pieper@intel.com
Approved by:	re (gjb@)
Relnotes:       yes
Sponsored by:   Intel Corporation
Differential Revision: https://reviews.freebsd.org/D16429
This commit is contained in:
Eric Joyner 2018-10-12 22:40:54 +00:00
parent 3cf1291d2e
commit 77c1fcec91
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=339338
25 changed files with 2188 additions and 3565 deletions

View File

@ -240,9 +240,8 @@ device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel PRO/10GbE PCIE PF Ethernet
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel XL710 40Gbe PCIE Ethernet
#options IXL_IW # Enable iWARP Client Interface in ixl(4)
#device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device ti # Alteon Networks Tigon I/II gigabit Ethernet
device txp # 3Com 3cR990 (``Typhoon'')

View File

@ -313,8 +313,6 @@ options DRM_DEBUG # Include debug printfs (slow)
# iwn: Intel Wireless WiFi Link 1000/105/135/2000/4965/5000/6000/6050 abgn
# 802.11 network adapters
# Requires the iwn firmware module
# ixl: Intel XL710 40Gbe PCIE Ethernet
# ixlv: Intel XL710 40Gbe VF PCIE Ethernet
# mthca: Mellanox HCA InfiniBand
# mlx4ib: Mellanox ConnectX HCA InfiniBand
# mlx4en: Mellanox ConnectX HCA Ethernet
@ -332,9 +330,8 @@ options ED_SIC
device ipw # Intel 2100 wireless NICs.
device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs.
device iwn # Intel 4965/1000/5000/6000 wireless NICs.
device ixl # Intel XL710 40Gbe PCIE Ethernet
#options IXL_IW # Enable iWARP Client Interface in ixl(4)
#device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
device mthca # Mellanox HCA InfiniBand
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand

View File

@ -271,25 +271,23 @@ dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_i2c.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
#dev/ixl/ixl_iw.c optional ixl pci \
# compile-with "${NORMAL_C} -I$S/dev/ixl"
#dev/ixl/if_ixlv.c optional ixlv pci \
# compile-with "${NORMAL_C} -I$S/dev/ixl"
#dev/ixl/ixlvc.c optional ixlv pci \
# compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_txrx.c optional ixl pci | ixlv pci \
dev/ixl/if_ixlv.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_osdep.c optional ixl pci | ixlv pci \
dev/ixl/ixlvc.c optional iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_lan_hmc.c optional ixl pci | ixlv pci \
dev/ixl/ixl_txrx.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_hmc.c optional ixl pci | ixlv pci \
dev/ixl/i40e_osdep.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_common.c optional ixl pci | ixlv pci \
dev/ixl/i40e_lan_hmc.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_nvm.c optional ixl pci | ixlv pci \
dev/ixl/i40e_hmc.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_adminq.c optional ixl pci | ixlv pci \
dev/ixl/i40e_common.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_nvm.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_adminq.c optional ixl pci | iavf pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_dcb.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"

View File

@ -1,410 +0,0 @@
ixl FreeBSD* Base Driver and ixlv VF Driver for the
Intel XL710 Ethernet Controller Family
/*$FreeBSD$*/
================================================================
August 26, 2014
Contents
========
- Overview
- Supported Adapters
- The VF Driver
- Building and Installation
- Additional Configurations
- Known Limitations
Overview
========
This file describes the IXL FreeBSD* Base driver and the IXLV VF Driver
for the XL710 Ethernet Family of Adapters. The Driver has been developed
for use with FreeBSD 10.0 or later, but should be compatible with any
supported release.
For questions related to hardware requirements, refer to the documentation
supplied with your Intel XL710 adapter. All hardware requirements listed
apply for use with FreeBSD.
Supported Adapters
==================
The drivers in this release are compatible with XL710 and X710-based
Intel Ethernet Network Connections.
SFP+ Devices with Pluggable Optics
----------------------------------
SR Modules
----------
Intel DUAL RATE 1G/10G SFP+ SR (bailed) FTLX8571D3BCV-IT
Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDZ-IN2
LR Modules
----------
Intel DUAL RATE 1G/10G SFP+ LR (bailed) FTLX1471D3BCV-IT
Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDZ-IN2
QSFP+ Modules
-------------
Intel TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed) E40GQSFPSR
Intel TRIPLE RATE 1G/10G/40G QSFP+ LR (bailed) E40GQSFPLR
QSFP+ 1G speed is not supported on XL710 based devices.
X710/XL710 Based SFP+ adapters support all passive and active limiting direct
attach cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
The VF Driver
==================
The VF driver is normally used in a virtualized environment where a host
driver manages SRIOV, and provides a VF device to the guest. With this
first release the only host environment tested was using Linux QEMU/KVM.
Support is planned for Xen and VMWare hosts at a later time.
In the FreeBSD guest the IXLV driver would be loaded and will function
using the VF device assigned to it.
The VF driver provides most of the same functionality as the CORE driver,
but is actually a slave to the Host, access to many controls are actually
accomplished by a request to the Host via what is called the "Admin queue".
These are startup and initialization events however, once in operation
the device is self-contained and should achieve near native performance.
Some notable limitations of the VF environment: for security reasons
the driver is never permitted to be promiscuous, therefore a tcpdump
will not behave the same with the interface. Second, media info is not
available from the PF, so it will always appear as auto.
Tarball Building and Installation
=========================
NOTE: You must have kernel sources installed to compile the driver tarball.
These instructions assume a standalone driver tarball, building the driver
already in the kernel source is simply a matter of adding the device entry
to the kernel config file, or building in the ixl or ixlv module directory.
In the instructions below, x.x.x is the driver version
as indicated in the name of the driver tarball. The example is
for ixl, the same procedure applies for ixlv.
1. Move the base driver tar file to the directory of your choice.
For example, use /home/username/ixl or /usr/local/src/ixl.
2. Untar/unzip the archive:
tar xfz ixl-x.x.x.tar.gz
3. To install man page:
cd ixl-x.x.x
gzip -c ixl.4 > /usr/share/man/man4/ixl.4.gz
4. To load the driver onto a running system:
cd ixl-x.x.x/src
make load
5. To assign an IP address to the interface, enter the following:
ifconfig ixl<interface_num> <IP_address>
6. Verify that the interface works. Enter the following, where <IP_address>
is the IP address for another machine on the same subnet as the interface
that is being tested:
ping <IP_address>
7. If you want the driver to load automatically when the system is booted:
cd ixl-x.x.x/src
make
make install
Edit /boot/loader.conf, and add the following line:
if_ixl_load="YES"
Edit /etc/rc.conf, and create the appropriate
ifconfig_ixl<interface_num> entry:
ifconfig_ixl<interface_num>="<ifconfig_settings>"
Example usage:
ifconfig_ixl0="inet 192.168.10.1 netmask 255.255.255.0"
NOTE: For assistance, see the ifconfig man page.
Configuration and Tuning
=========================
Both drivers supports Transmit/Receive Checksum Offload for IPv4 and IPv6,
TSO forIPv4 and IPv6, LRO, and Jumbo Frames on all 40 Gigabit adapters.
Jumbo Frames
------------
To enable Jumbo Frames, use the ifconfig utility to increase
the MTU beyond 1500 bytes.
- The Jumbo Frames setting on the switch must be set to at least
22 byteslarger than that of the adapter.
- The maximum MTU setting for Jumbo Frames is 9706. This value
coincides with the maximum jumbo frames size of 9728.
To modify the setting, enter the following:
ifconfig ixl<interface_num> <hostname or IP address> mtu 9000
- To confirm an interface's MTU value, use the ifconfig command.
To confirm the MTU used between two specific devices, use:
route get <destination_IP_address>
VLANs
-----
To create a new VLAN pseudo-interface:
ifconfig <vlan_name> create
To associate the VLAN pseudo-interface with a physical interface
and assign a VLAN ID, IP address, and netmask:
ifconfig <vlan_name> <ip_address> netmask <subnet_mask> vlan
<vlan_id> vlandev <physical_interface>
Example:
ifconfig vlan10 10.0.0.1 netmask 255.255.255.0 vlan 10 vlandev ixl0
In this example, all packets will be marked on egress with
802.1Q VLAN tags, specifying a VLAN ID of 10.
To remove a VLAN pseudo-interface:
ifconfig <vlan_name> destroy
Checksum Offload
----------------
Checksum offloading supports IPv4 and IPv6 with TCP and UDP packets
and is supported for both transmit and receive. Checksum offloading
for transmit and recieve is enabled by default for both IPv4 and IPv6.
Checksum offloading can be enabled or disabled using ifconfig.
Transmit and receive offloading for IPv4 and Ipv6 are enabled
and disabled seperately.
NOTE: TSO requires Tx checksum, so when Tx checksum
is disabled, TSO will also be disabled.
To enable Tx checksum offloading for ipv4:
ifconfig ixl<interface_num> txcsum4
To disable Tx checksum offloading for ipv4:
ifconfig ixl<interface_num> -txcsum4
(NOTE: This will disable TSO4)
To enable Rx checksum offloading for ipv6:
ifconfig ixl<interface_num> rxcsum6
To disable Rx checksum offloading for ipv6:
ifconfig ixl<interface_num> -rxcsum6
(NOTE: This will disable TSO6)
To confirm the current settings:
ifconfig ixl<interface_num>
TSO
---
TSO supports both IPv4 and IPv6 and is enabled by default. TSO can
be disabled and enabled using the ifconfig utility.
NOTE: TSO requires Tx checksum, so when Tx checksum is
disabled, TSO will also be disabled.
To disable TSO IPv4:
ifconfig ixl<interface_num> -tso4
To enable TSO IPv4:
ifconfig ixl<interface_num> tso4
To disable TSO IPv6:
ifconfig ixl<interface_num> -tso6
To enable TSO IPv6:
ifconfig ixl<interface_num> tso6
To disable BOTH TSO IPv4 and IPv6:
ifconfig ixl<interface_num> -tso
To enable BOTH TSO IPv4 and IPv6:
ifconfig ixl<interface_num> tso
LRO
---
Large Receive Offload is enabled by default. It can be enabled
or disabled by using the ifconfig utility.
NOTE: LRO should be disabled when forwarding packets.
To disable LRO:
ifconfig ixl<interface_num> -lro
To enable LRO:
ifconfig ixl<interface_num> lro
Flow Control (IXL only)
------------
Flow control is disabled by default. To change flow control settings use sysctl.
To enable flow control to Rx pause frames:
sysctl dev.ixl.<interface_num>.fc=1
To enable flow control to Tx pause frames:
sysctl dev.ixl.<interface_num>.fc=2
To enable flow control to Rx and Tx pause frames:
sysctl dev.ixl.<interface_num>.fc=3
To disable flow control:
sysctl dev.ixl.<interface_num>.fc=0
NOTE: You must have a flow control capable link partner.
NOTE: The VF driver does not have access to flow control, it must be
managed from the host side.
Important system configuration changes:
=======================================
-Change the file /etc/sysctl.conf, and add the line:
hw.intr_storm_threshold: 0 (the default is 1000)
-Best throughput results are seen with a large MTU; use 9706 if possible.
-The default number of descriptors per ring is 1024, increasing this may
improve performance depending on the use case.
-The VF driver uses a relatively large buf ring, this was found to eliminate
UDP transmit errors, it is a tuneable, and if no UDP traffic is used it can
be reduced. It is memory used per queue.
Known Limitations
=================
Network Memory Buffer allocation
--------------------------------
FreeBSD may have a low number of network memory buffers (mbufs) by default.
If your mbuf value is too low, it may cause the driver to fail to initialize
and/or cause the system to become unresponsive. You can check to see if the
system is mbuf-starved by running 'netstat -m'. Increase the number of mbufs
by editing the lines below in /etc/sysctl.conf:
kern.ipc.nmbclusters
kern.ipc.nmbjumbop
kern.ipc.nmbjumbo9
kern.ipc.nmbjumbo16
kern.ipc.nmbufs
The amount of memory that you allocate is system specific, and may
require some trial and error.
Also, increasing the follwing in /etc/sysctl.conf could help increase
network performance:
kern.ipc.maxsockbuf
net.inet.tcp.sendspace
net.inet.tcp.recvspace
net.inet.udp.maxdgram
net.inet.udp.recvspace
UDP Stress Test Dropped Packet Issue
------------------------------------
Under small packet UDP stress test with the ixl driver, the FreeBSD system
may drop UDP packets due to the fullness of socket buffers. You may want to
change the driver's Flow Control variables to the minimum value for
controlling packet reception.
Disable LRO when routing/bridging
---------------------------------
LRO must be turned off when forwarding traffic.
Lower than expected performance
-------------------------------
Some PCIe x8 slots are actually configured as x4 slots. These slots have
insufficient bandwidth for full line rate with dual port and quad port
devices.
In addition, if you put a PCIe Generation 3-capable adapter into a PCIe
Generation 2 slot, you cannot get full bandwidth. The driver detects this
situation and writes the following message in the system log:
"PCI-Express bandwidth available for this card is not sufficient for
optimal performance. For optimal performance a x8 PCI-Express slot
is required."
If this error occurs, moving your adapter to a true PCIe Generation 3 x8
slot will resolve the issue.
Support
=======
For general information and support, go to the Intel support website at:
http://support.intel.com
If an issue is identified with the released source code on the supported kernel
with a supported adapter, email the specific information related to the issue
to freebsdnic@mailbox.intel.com.
License
=======
This software program is released under the terms of a license agreement
between you ('Licensee') and Intel. Do not use or load this software or any
associated materials (collectively, the 'Software') until you have carefully
read the full terms and conditions of the LICENSE located in this software
package. By loadingor using the Software, you agree to the terms of this
Agreement. If you do not agree with the terms of this Agreement, do not
install or use the Software.
* Other names and brands may be claimed as the property of others.

View File

@ -161,27 +161,25 @@ i40e_destroy_spinlock(struct i40e_spinlock *lock)
mtx_destroy(&lock->mutex);
}
static inline int
ixl_ms_scale(int x)
{
if (hz == 1000)
return (x);
else if (hz > 1000)
return (x*(hz/1000));
else
return (max(1, x/(1000/hz)));
}
void
i40e_msec_pause(int msecs)
{
int ticks_to_pause = (msecs * hz) / 1000;
int start_ticks = ticks;
if (cold || SCHEDULER_STOPPED()) {
if (cold || SCHEDULER_STOPPED())
i40e_msec_delay(msecs);
return;
}
while (1) {
kern_yield(PRI_USER);
int yielded_ticks = ticks - start_ticks;
if (yielded_ticks > ticks_to_pause)
break;
else if (yielded_ticks < 0
&& (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) {
break;
}
}
else
// ERJ: (msecs * hz) could overflow
pause("ixl", ixl_ms_scale(msecs));
}
/*
@ -272,7 +270,5 @@ i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value)
{
pci_write_config(((struct i40e_osdep *)hw->back)->dev,
reg, value, 2);
return;
}

View File

@ -48,7 +48,7 @@
* Driver version
*********************************************************************/
#define IXL_DRIVER_VERSION_MAJOR 2
#define IXL_DRIVER_VERSION_MINOR 0
#define IXL_DRIVER_VERSION_MINOR 1
#define IXL_DRIVER_VERSION_BUILD 0
#define IXL_DRIVER_VERSION_STRING \
@ -115,10 +115,11 @@ static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
static void ixl_if_vflr_handle(if_ctx_t ctx);
// static void ixl_if_link_intr_enable(if_ctx_t ctx);
static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
#ifdef PCI_IOV
static void ixl_if_vflr_handle(if_ctx_t ctx);
#endif
/*** Other ***/
static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int);
@ -137,9 +138,9 @@ static device_method_t ixl_methods[] = {
DEVMETHOD(device_detach, iflib_device_detach),
DEVMETHOD(device_shutdown, iflib_device_shutdown),
#ifdef PCI_IOV
DEVMETHOD(pci_iov_init, ixl_iov_init),
DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
DEVMETHOD(pci_iov_init, iflib_device_iov_init),
DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
#endif
DEVMETHOD_END
};
@ -169,7 +170,6 @@ static device_method_t ixl_if_methods[] = {
DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
//DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable),
DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
@ -185,9 +185,14 @@ static device_method_t ixl_if_methods[] = {
DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
#ifdef PCI_IOV
DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
#endif
// ifdi_led_func
// ifdi_debug
DEVMETHOD_END
@ -202,7 +207,7 @@ static driver_t ixl_if_driver = {
*/
static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
"IXL driver parameters");
"ixl driver parameters");
/*
* Leave this on unless you need to send flow control
@ -222,6 +227,13 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
&ixl_i2c_access_method, 0,
IXL_SYSCTL_HELP_I2C_METHOD);
static int ixl_enable_vf_loopback = 1;
TUNABLE_INT("hw.ixl.enable_vf_loopback",
&ixl_enable_vf_loopback);
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
&ixl_enable_vf_loopback, 0,
IXL_SYSCTL_HELP_VF_LOOPBACK);
/*
* Different method for processing TX descriptor
* completion.
@ -333,9 +345,9 @@ ixl_register(device_t dev)
static int
ixl_allocate_pci_resources(struct ixl_pf *pf)
{
int rid;
struct i40e_hw *hw = &pf->hw;
device_t dev = iflib_get_dev(pf->vsi.ctx);
struct i40e_hw *hw = &pf->hw;
int rid;
/* Map BAR0 */
rid = PCIR_BAR(0);
@ -386,21 +398,17 @@ ixl_if_attach_pre(if_ctx_t ctx)
enum i40e_status_code status;
int error = 0;
INIT_DEBUGOUT("ixl_if_attach_pre: begin");
INIT_DBG_DEV(dev, "begin");
/* Allocate, clear, and link in our primary soft structure */
dev = iflib_get_dev(ctx);
pf = iflib_get_softc(ctx);
vsi = &pf->vsi;
vsi->back = pf;
pf->dev = dev;
hw = &pf->hw;
/*
** Note this assumes we have a single embedded VSI,
** this could be enhanced later to allocate multiple
*/
//vsi->dev = pf->dev;
vsi->dev = dev;
vsi->hw = &pf->hw;
vsi->id = 0;
vsi->num_vlans = 0;
@ -545,6 +553,7 @@ ixl_if_attach_pre(if_ctx_t ctx)
* sizeof(struct i40e_tx_desc), DBA_ALIGN);
scctx->isc_txrx = &ixl_txrx_dwb;
}
scctx->isc_txrx->ift_legacy_intr = ixl_intr;
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
* sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
@ -556,7 +565,7 @@ ixl_if_attach_pre(if_ctx_t ctx)
scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
INIT_DEBUGOUT("ixl_if_attach_pre: end");
INIT_DBG_DEV(dev, "end");
return (0);
err_mac_hmc:
@ -579,7 +588,7 @@ ixl_if_attach_post(if_ctx_t ctx)
int error = 0;
enum i40e_status_code status;
INIT_DEBUGOUT("ixl_if_attach_post: begin");
INIT_DBG_DEV(dev, "begin");
dev = iflib_get_dev(ctx);
pf = iflib_get_softc(ctx);
@ -587,6 +596,10 @@ ixl_if_attach_post(if_ctx_t ctx)
vsi->ifp = iflib_get_ifp(ctx);
hw = &pf->hw;
/* Save off determined number of queues for interface */
vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
/* Setup OS network interface / ifnet */
if (ixl_setup_interface(dev, pf)) {
device_printf(dev, "interface setup failed!\n");
@ -694,6 +707,10 @@ ixl_if_attach_post(if_ctx_t ctx)
return (error);
}
/**
* XXX: iflib always ignores the return value of detach()
* -> This means that this isn't allowed to fail
*/
static int
ixl_if_detach(if_ctx_t ctx)
{
@ -702,7 +719,7 @@ ixl_if_detach(if_ctx_t ctx)
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
enum i40e_status_code status;
#if defined(PCI_IOV) || defined(IXL_IW)
#ifdef IXL_IW
int error;
#endif
@ -713,16 +730,9 @@ ixl_if_detach(if_ctx_t ctx)
error = ixl_iw_pf_detach(pf);
if (error == EBUSY) {
device_printf(dev, "iwarp in use; stop it first.\n");
return (error);
//return (error);
}
}
#endif
#ifdef PCI_IOV
error = pci_iov_detach(dev);
if (error != 0) {
device_printf(dev, "SR-IOV in use; detach first.\n");
return (error);
}
#endif
/* Remove all previously allocated media types */
ifmedia_removeall(vsi->media);
@ -751,7 +761,6 @@ ixl_if_detach(if_ctx_t ctx)
return (0);
}
/* TODO: Do shutdown-specific stuff here */
static int
ixl_if_shutdown(if_ctx_t ctx)
{
@ -796,43 +805,13 @@ ixl_if_resume(if_ctx_t ctx)
return (0);
}
/* Set Report Status queue fields to 0 */
static void
ixl_init_tx_rsqs(struct ixl_vsi *vsi)
{
if_softc_ctx_t scctx = vsi->shared;
struct ixl_tx_queue *tx_que;
int i, j;
for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
}
}
static void
ixl_init_tx_cidx(struct ixl_vsi *vsi)
{
struct ixl_tx_queue *tx_que;
int i;
for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txr->tx_cidx_processed = 0;
}
}
void
ixl_if_init(if_ctx_t ctx)
{
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = &pf->hw;
struct ifnet *ifp = iflib_get_ifp(ctx);
device_t dev = iflib_get_dev(ctx);
u8 tmpaddr[ETHER_ADDR_LEN];
int ret;
@ -840,12 +819,12 @@ ixl_if_init(if_ctx_t ctx)
/*
* If the aq is dead here, it probably means something outside of the driver
* did something to the adapter, like a PF reset.
* So rebuild the driver's state here if that occurs.
* So, rebuild the driver's state here if that occurs.
*/
if (!i40e_check_asq_alive(&pf->hw)) {
device_printf(dev, "Admin Queue is down; resetting...\n");
ixl_teardown_hw_structs(pf);
ixl_reset(pf);
ixl_rebuild_hw_structs_after_reset(pf);
}
/* Get the latest mac address... User might use a LAA */
@ -872,8 +851,7 @@ ixl_if_init(if_ctx_t ctx)
return;
}
// TODO: Call iflib setup multicast filters here?
// It's called in ixgbe in D5213
/* Reconfigure multicast filters in HW */
ixl_if_multi_set(ctx);
/* Set up RSS */
@ -895,8 +873,12 @@ ixl_if_init(if_ctx_t ctx)
i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
/* Re-add configure filters to HW */
ixl_reconfigure_filters(vsi);
/* Configure promiscuous mode */
ixl_if_promisc_set(ctx, if_getflags(ifp));
#ifdef IXL_IW
if (ixl_enable_iwarp && pf->iw_enabled) {
ret = ixl_iw_pf_init(pf);
@ -923,7 +905,7 @@ ixl_if_stop(if_ctx_t ctx)
#endif
ixl_disable_rings_intr(vsi);
ixl_disable_rings(vsi);
ixl_disable_rings(pf, vsi, &pf->qtag);
}
static int
@ -936,6 +918,9 @@ ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
int err, i, rid, vector = 0;
char buf[16];
MPASS(vsi->shared->isc_nrxqsets > 0);
MPASS(vsi->shared->isc_ntxqsets > 0);
/* Admin Que must use vector 0*/
rid = vector + 1;
err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
@ -943,14 +928,14 @@ ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
if (err) {
iflib_irq_free(ctx, &vsi->irq);
device_printf(iflib_get_dev(ctx),
"Failed to register Admin que handler");
"Failed to register Admin Que handler");
return (err);
}
// TODO: Re-enable this at some point
// iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov");
/* Create soft IRQ for handling VFLRs */
iflib_softirq_alloc_generic(ctx, &pf->iov_irq, IFLIB_INTR_IOV, pf, 0, "iov");
/* Now set up the stations */
for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) {
for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
@ -960,7 +945,7 @@ ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
* what's expected in the iflib context? */
if (err) {
device_printf(iflib_get_dev(ctx),
"Failed to allocate q int %d err: %d", i, err);
"Failed to allocate queue RX int vector %d, err: %d\n", i, err);
vsi->num_rx_queues = i + 1;
goto fail;
}
@ -969,16 +954,16 @@ ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
bzero(buf, sizeof(buf));
for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
snprintf(buf, sizeof(buf), "txq%d", i);
iflib_softirq_alloc_generic(ctx,
&vsi->rx_queues[i % vsi->num_rx_queues].que_irq,
&vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
/* TODO: Maybe call a strategy function for this to figure out which
* interrupts to map Tx queues to. I don't know if there's an immediately
* better way than this other than a user-supplied map, though. */
tx_que->msix = (i % vsi->num_rx_queues) + 1;
tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
}
return (0);
@ -1051,11 +1036,10 @@ ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
{
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
struct i40e_hw *hw = vsi->hw;
struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
ixl_enable_queue(hw, tx_que->msix - 1);
return (0);
}
@ -1066,12 +1050,11 @@ ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxq
struct ixl_vsi *vsi = &pf->vsi;
if_softc_ctx_t scctx = vsi->shared;
struct ixl_tx_queue *que;
// int i;
int i, j, error = 0;
MPASS(vsi->num_tx_queues > 0);
MPASS(scctx->isc_ntxqsets > 0);
MPASS(ntxqs == 1);
MPASS(vsi->num_tx_queues == ntxqsets);
MPASS(scctx->isc_ntxqsets == ntxqsets);
/* Allocate queue structure memory */
if (!(vsi->tx_queues =
@ -1118,9 +1101,12 @@ ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxq
struct ixl_rx_queue *que;
int i, error = 0;
MPASS(vsi->num_rx_queues > 0);
#ifdef INVARIANTS
if_softc_ctx_t scctx = vsi->shared;
MPASS(scctx->isc_nrxqsets > 0);
MPASS(nrxqs == 1);
MPASS(vsi->num_rx_queues == nrxqsets);
MPASS(scctx->isc_nrxqsets == nrxqsets);
#endif
/* Allocate queue structure memory */
if (!(vsi->rx_queues =
@ -1156,7 +1142,7 @@ ixl_if_queues_free(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
if (vsi->enable_head_writeback) {
if (!vsi->enable_head_writeback) {
struct ixl_tx_queue *que;
int i = 0;
@ -1208,6 +1194,20 @@ ixl_update_link_status(struct ixl_pf *pf)
}
}
static void
ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
{
device_t dev = pf->dev;
u32 rxq_idx, qtx_ctl;
rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
qtx_ctl = e->desc.params.external.param1;
device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
}
static int
ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
{
@ -1248,7 +1248,7 @@ ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
* aren't currently configured.
*/
case i40e_aqc_opc_event_lan_overflow:
device_printf(dev, "LAN overflow event\n");
ixl_handle_lan_overflow_event(pf, &event);
break;
default:
break;
@ -1278,13 +1278,9 @@ ixl_if_update_admin_status(if_ctx_t ctx)
if (pf->state & IXL_PF_STATE_MDD_PENDING)
ixl_handle_mdd_event(pf);
#ifdef PCI_IOV
if (pf->state & IXL_PF_STATE_VF_RESET_REQ)
iflib_iov_intr_deferred(ctx);
#endif
ixl_process_adminq(pf, &pending);
ixl_update_link_status(pf);
ixl_update_stats_counters(pf);
/*
* If there are still messages to process, reschedule ourselves.
@ -1301,14 +1297,16 @@ ixl_if_multi_set(if_ctx_t ctx)
{
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
int mcnt = 0, flags;
struct i40e_hw *hw = vsi->hw;
int mcnt = 0, flags;
int del_mcnt;
IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
/* delete existing MC filters */
ixl_del_multi(vsi);
/* Delete filters for removed multicast addresses */
del_mcnt = ixl_del_multi(vsi);
vsi->num_macs -= del_mcnt;
if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
i40e_aq_set_vsi_multicast_promiscuous(hw,
@ -1316,13 +1314,17 @@ ixl_if_multi_set(if_ctx_t ctx)
return;
}
/* (re-)install filters for all mcast addresses */
/* XXX: This bypasses filter count tracking code! */
mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
if (mcnt > 0) {
vsi->num_macs += mcnt;
flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
ixl_add_hw_filters(vsi, flags, mcnt);
}
ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
__func__, vsi->num_macs);
IOCTL_DEBUGOUT("ixl_if_multi_set: end");
}
@ -1518,32 +1520,11 @@ ixl_if_promisc_set(if_ctx_t ctx, int flags)
static void
ixl_if_timer(if_ctx_t ctx, uint16_t qid)
{
struct ixl_pf *pf = iflib_get_softc(ctx);
//struct i40e_hw *hw = &pf->hw;
//struct ixl_tx_queue *que = &vsi->tx_queues[qid];
#if 0
u32 mask;
/*
** Check status of the queues
*/
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
/* If queue param has outstanding work, trigger sw irq */
// TODO: TX queues in iflib don't use HW interrupts; does this do anything?
if (que->busy)
wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask);
#endif
if (qid != 0)
return;
/* Fire off the adminq task */
iflib_admin_intr_deferred(ctx);
/* Update stats */
ixl_update_stats_counters(pf);
}
static void
@ -1612,13 +1593,15 @@ ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
}
}
#ifdef PCI_IOV
static void
ixl_if_vflr_handle(if_ctx_t ctx)
{
IXL_DEV_ERR(iflib_get_dev(ctx), "");
struct ixl_pf *pf = iflib_get_softc(ctx);
// TODO: call ixl_handle_vflr()
ixl_handle_vflr(pf);
}
#endif
static int
ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
@ -1676,6 +1659,7 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
pf->dbg_mask = ixl_core_debug_mask;
pf->hw.debug_mask = ixl_shared_debug_mask;
pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
#if 0
pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
pf->dynamic_tx_itr = ixl_dynamic_tx_itr;

File diff suppressed because it is too large Load Diff

View File

@ -32,7 +32,6 @@
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXL_H_
#define _IXL_H_
@ -136,8 +135,6 @@
#define IXL_MSIX_BAR 3
#define IXL_ADM_LIMIT 2
// TODO: Find out which TSO_SIZE to use
//#define IXL_TSO_SIZE 65535
#define IXL_TSO_SIZE ((255*1024)-1)
#define IXL_TX_BUF_SZ ((u32) 1514)
#define IXL_AQ_BUF_SZ ((u32) 4096)
@ -145,12 +142,14 @@
#define IXL_TX_ITR 1
#define IXL_ITR_NONE 3
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MIN_FRAME 17
#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_RX_SEGS 5
#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 7
#define IXL_MIN_TSO_MSS 64
#define IXL_MAX_TSO_MSS 9668
#define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
#define IXL_RSS_KEY_SIZE_REG 13
@ -210,16 +209,6 @@
#define IXL_RX_CTX_BASE_UNITS 128
#define IXL_TX_CTX_BASE_UNITS 128
#if 0
#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \
I40E_VPINT_LNKLSTN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \
I40E_VFINT_DYN_CTLN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
#endif
#define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA
#define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20
@ -299,6 +288,9 @@
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
/* For stats sysctl naming */
#define QUEUE_NAME_LEN 32
#define IXL_DEV_ERR(_dev, _format, ...) \
device_printf(_dev, "%s: " _format " (%s:%d)\n", __func__, ##__VA_ARGS__, __FILE__, __LINE__)
@ -415,16 +407,15 @@ struct ixl_vsi {
if_ctx_t ctx;
if_softc_ctx_t shared;
struct ifnet *ifp;
//device_t dev;
device_t dev;
struct i40e_hw *hw;
struct ifmedia *media;
#define num_rx_queues shared->isc_nrxqsets
#define num_tx_queues shared->isc_ntxqsets
int num_rx_queues;
int num_tx_queues;
void *back;
enum i40e_vsi_type type;
// TODO: Remove?
u64 que_mask;
int id;
u32 rx_itr_setting;
u32 tx_itr_setting;
@ -541,9 +532,18 @@ struct ixl_sysctl_info {
extern const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN];
/* Common function prototypes between PF/VF driver */
void ixl_debug_core(device_t dev, u32 enabled_mask, u32 mask, char *fmt, ...);
void ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que);
void ixl_get_default_rss_key(u32 *);
const char * i40e_vc_stat_str(struct i40e_hw *hw,
enum virtchnl_status_code stat_err);
u64 ixl_max_aq_speed_to_value(u8);
void ixl_init_tx_rsqs(struct ixl_vsi *vsi);
void ixl_init_tx_cidx(struct ixl_vsi *vsi);
u64 ixl_max_vc_speed_to_value(u8 link_speeds);
void ixl_add_vsi_sysctls(device_t dev, struct ixl_vsi *vsi,
struct sysctl_ctx_list *ctx, const char *sysctl_name);
void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct i40e_eth_stats *eth_stats);
void ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi);
#endif /* _IXL_H_ */

View File

@ -91,12 +91,9 @@ enum ixl_dbg_mask {
IXL_DBG_EN_DIS = 0x00000002,
IXL_DBG_AQ = 0x00000004,
IXL_DBG_NVMUPD = 0x00000008,
IXL_DBG_FILTER = 0x00000010,
IXL_DBG_IOCTL_KNOWN = 0x00000010,
IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
IXL_DBG_IOCTL_ALL = 0x00000030,
I40E_DEBUG_RSS = 0x00000100,
IXL_DEBUG_RSS = 0x00000100,
IXL_DBG_IOV = 0x00001000,
IXL_DBG_IOV_VC = 0x00002000,
@ -107,4 +104,20 @@ enum ixl_dbg_mask {
IXL_DBG_ALL = 0xFFFFFFFF
};
enum ixlv_dbg_mask {
IXLV_DBG_INFO = 0x00000001,
IXLV_DBG_EN_DIS = 0x00000002,
IXLV_DBG_AQ = 0x00000004,
IXLV_DBG_INIT = 0x00000008,
IXLV_DBG_FILTER = 0x00000010,
IXLV_DEBUG_RSS = 0x00000100,
IXLV_DBG_VC = 0x00001000,
IXLV_DBG_SWITCH_INFO = 0x00010000,
IXLV_DBG_ALL = 0xFFFFFFFF
};
#endif /* _IXL_DEBUG_H_ */

View File

@ -87,10 +87,6 @@ struct ixl_vf {
/* Physical controller structure */
struct ixl_pf {
/*
* This is first so that iflib_get_softc can return
* either the VSI or the PF structures.
*/
struct ixl_vsi vsi;
struct i40e_hw hw;
@ -103,7 +99,6 @@ struct ixl_pf {
int iw_msix;
bool iw_enabled;
#endif
int if_flags;
u32 state;
u8 supported_speeds;
@ -111,13 +106,12 @@ struct ixl_pf {
struct ixl_pf_qtag qtag;
/* Tunable values */
bool enable_msix;
int max_queues;
bool enable_tx_fc_filter;
int dynamic_rx_itr;
int dynamic_tx_itr;
int tx_itr;
int rx_itr;
int enable_vf_loopback;
bool link_up;
int advertised_speed;
@ -126,7 +120,6 @@ struct ixl_pf {
bool has_i2c;
/* Misc stats maintained by the driver */
u64 watchdog_events;
u64 admin_irq;
/* Statistics from hw */
@ -145,8 +138,7 @@ struct ixl_pf {
struct ixl_vf *vfs;
int num_vfs;
uint16_t veb_seid;
struct task vflr_task;
int vc_debug_lvl;
struct if_irq iov_irq;
};
/*
@ -226,6 +218,12 @@ struct ixl_pf {
"\t3 - Use Admin Queue command (best)\n" \
"Using the Admin Queue is only supported on 710 devices with FW version 1.7 or higher"
#define IXL_SYSCTL_HELP_VF_LOOPBACK \
"\nDetermines mode that embedded device switch will use when SR-IOV is initialized:\n" \
"\t0 - Disable (VEPA)\n" \
"\t1 - Enable (VEB)\n" \
"Enabling this will allow VFs in separate VMs to communicate over the hardware bridge."
extern const char * const ixl_fc_string[6];
MALLOC_DECLARE(M_IXL);
@ -242,14 +240,10 @@ MALLOC_DECLARE(M_IXL);
ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
/* Debug printing */
#define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__)
void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
/* For stats sysctl naming */
#define QUEUE_NAME_LEN 32
/* For netmap(4) compatibility */
#define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi)
#define ixl_dbg(pf, m, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, m, s, ##__VA_ARGS__)
#define ixl_dbg_info(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_INFO, s, ##__VA_ARGS__)
#define ixl_dbg_filter(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_FILTER, s, ##__VA_ARGS__)
#define ixl_dbg_iov(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_IOV, s, ##__VA_ARGS__)
/* PF-only function declarations */
int ixl_setup_interface(device_t, struct ixl_pf *);
@ -292,7 +286,6 @@ void ixl_stat_update32(struct i40e_hw *, u32, bool,
u64 *, u64 *);
void ixl_stop(struct ixl_pf *);
void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
int ixl_get_hw_capabilities(struct ixl_pf *);
void ixl_link_up_msg(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
@ -333,7 +326,7 @@ int ixl_aq_get_link_status(struct ixl_pf *,
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
void ixl_handle_empr_reset(struct ixl_pf *);
int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
void ixl_set_queue_rx_itr(struct ixl_rx_queue *);
void ixl_set_queue_tx_itr(struct ixl_tx_queue *);
@ -342,7 +335,7 @@ void ixl_add_filter(struct ixl_vsi *, const u8 *, s16 vlan);
void ixl_del_filter(struct ixl_vsi *, const u8 *, s16 vlan);
void ixl_reconfigure_filters(struct ixl_vsi *vsi);
int ixl_disable_rings(struct ixl_vsi *);
int ixl_disable_rings(struct ixl_pf *, struct ixl_vsi *, struct ixl_pf_qtag *);
int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
@ -364,11 +357,12 @@ void ixl_enable_intr(struct ixl_vsi *);
void ixl_disable_rings_intr(struct ixl_vsi *);
void ixl_set_promisc(struct ixl_vsi *);
void ixl_add_multi(struct ixl_vsi *);
void ixl_del_multi(struct ixl_vsi *);
int ixl_del_multi(struct ixl_vsi *);
void ixl_setup_vlan_filters(struct ixl_vsi *);
void ixl_init_filters(struct ixl_vsi *);
void ixl_add_hw_filters(struct ixl_vsi *, int, int);
void ixl_del_hw_filters(struct ixl_vsi *, int);
void ixl_del_default_hw_filters(struct ixl_vsi *);
struct ixl_mac_filter *
ixl_find_filter(struct ixl_vsi *, const u8 *, s16);
void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
@ -400,5 +394,6 @@ s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
int ixl_get_fw_lldp_status(struct ixl_pf *pf);
int ixl_attach_get_link_status(struct ixl_pf *);
u64 ixl_max_aq_speed_to_value(u8);
#endif /* _IXL_PF_H_ */

View File

@ -77,14 +77,21 @@ static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
static int ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
/*
* TODO: Move pieces of this into iflib and call the rest in a handler?
*
* e.g. ixl_if_iov_set_schema
*
* It's odd to do pci_iov_detach() there while doing pci_iov_attach()
* in the driver.
*/
void
ixl_initialize_sriov(struct ixl_pf *pf)
{
return;
#if 0
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
nvlist_t *pf_schema, *vf_schema;
@ -101,7 +108,7 @@ ixl_initialize_sriov(struct ixl_pf *pf)
IOV_SCHEMA_HASDEFAULT, FALSE);
pci_iov_schema_add_uint16(vf_schema, "num-queues",
IOV_SCHEMA_HASDEFAULT,
max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IXLV_MAX_QUEUES)));
iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
if (iov_error != 0) {
@ -110,9 +117,6 @@ ixl_initialize_sriov(struct ixl_pf *pf)
iov_error);
} else
device_printf(dev, "SR-IOV ready\n");
pf->vc_debug_lvl = 1;
#endif
}
@ -142,7 +146,9 @@ ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
vsi_ctx.info.switch_id = htole16(0);
if (pf->enable_vf_loopback)
vsi_ctx.info.switch_id =
htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
vsi_ctx.info.sec_flags = 0;
@ -157,7 +163,7 @@ ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
/* ERJ: Only scattered allocation is supported for VFs right now */
/* XXX: Only scattered allocation is supported for VFs right now */
for (i = 0; i < vf->qtag.num_active; i++)
vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
@ -172,8 +178,6 @@ ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
vf->vsi.seid = vsi_ctx.seid;
vf->vsi.vsi_num = vsi_ctx.vsi_number;
// TODO: How to deal with num tx queues / num rx queues split?
// I don't think just assigning this variable is going to work
vf->vsi.num_rx_queues = vf->qtag.num_active;
vf->vsi.num_tx_queues = vf->qtag.num_active;
@ -204,10 +208,15 @@ ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
if (error != 0)
return (error);
/* Let VF receive broadcast Ethernet frames */
error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
if (error)
device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
/* Re-add VF's MAC/VLAN filters to its VSI */
ixl_reconfigure_filters(&vf->vsi);
/* Reset stats? */
vf->vsi.hw_filters_add = 0;
vf->vsi.hw_filters_del = 0;
// ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
ixl_reconfigure_filters(&vf->vsi);
return (0);
}
@ -372,12 +381,16 @@ ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
hw = &pf->hw;
ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
ixl_flush(hw);
ixl_reinit_vf(pf, vf);
ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
}
static void
@ -413,7 +426,8 @@ ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
if (vf->vsi.seid != 0)
ixl_disable_rings(&vf->vsi);
ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
ixl_vf_release_resources(pf, vf);
ixl_vf_setup_vsi(pf, vf);
@ -649,7 +663,7 @@ ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
rxq.tphwdesc_ena = 1;
rxq.tphdata_ena = 1;
rxq.tphhead_ena = 1;
rxq.lrxqthresh = 2;
rxq.lrxqthresh = 1;
rxq.prefena = 1;
status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
@ -924,7 +938,7 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
continue;
/* Warn if this queue is already marked as enabled */
if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
vf->vf_num, i);
error = ixl_enable_tx_ring(pf, &vf->qtag, i);
@ -949,7 +963,7 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
continue;
/* Warn if this queue is already marked as enabled */
if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
vf->vf_num, i);
error = ixl_enable_rx_ring(pf, &vf->qtag, i);
if (error)
@ -1003,7 +1017,7 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
continue;
/* Warn if this queue is already marked as disabled */
if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
vf->vf_num, i);
continue;
}
@ -1029,7 +1043,7 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
continue;
/* Warn if this queue is already marked as disabled */
if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
vf->vf_num, i);
continue;
}
@ -1292,6 +1306,7 @@ ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
void *msg, uint16_t msg_size)
{
struct virtchnl_promisc_info *info;
struct i40e_hw *hw = &pf->hw;
enum i40e_status_code code;
if (msg_size != sizeof(*info)) {
@ -1301,8 +1316,11 @@ ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
}
if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
i40e_send_vf_nack(pf, vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
/*
* Do the same thing as the Linux PF driver -- lie to the VF
*/
ixl_send_vf_ack(pf, vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
return;
}
@ -1313,19 +1331,25 @@ ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
return;
}
code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
if (code != I40E_SUCCESS) {
device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
" error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
i40e_aq_str(hw, hw->aq.asq_last_status));
i40e_send_vf_nack(pf, vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
if (code != I40E_SUCCESS) {
device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
" error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
i40e_aq_str(hw, hw->aq.asq_last_status));
i40e_send_vf_nack(pf, vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
@ -1627,19 +1651,25 @@ ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
void
ixl_handle_vflr(void *arg, int pending)
ixl_handle_vflr(struct ixl_pf *pf)
{
struct ixl_pf *pf;
struct ixl_vf *vf;
struct i40e_hw *hw;
uint16_t global_vf_num;
uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
int i;
pf = arg;
hw = &pf->hw;
/* TODO: May need to lock this */
ixl_dbg_iov(pf, "%s: begin\n", __func__);
/* Re-enable VFLR interrupt cause so driver doesn't miss a
* reset interrupt for another VF */
icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
ixl_flush(hw);
for (i = 0; i < pf->num_vfs; i++) {
global_vf_num = hw->func_caps.vf_base_id + i;
@ -1654,17 +1684,12 @@ ixl_handle_vflr(void *arg, int pending)
wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
vflrstat_mask);
ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
ixl_reinit_vf(pf, vf);
ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
}
}
atomic_clear_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
ixl_flush(hw);
// IXL_PF_UNLOCK()
}
static int
@ -1721,23 +1746,52 @@ ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
}
}
int
ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
static int
ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
{
struct ixl_pf *pf;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_vsi_context ctxt;
int error;
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = vsi->seid;
if (pf->veb_seid != 0)
ctxt.uplink_seid = pf->veb_seid;
ctxt.pf_num = hw->pf_id;
ctxt.connection_type = IXL_VSI_DATA_PORT;
ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id = (enable) ?
htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
/* error is set to 0 on success */
error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (error) {
device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
" aq_error %d\n", error, hw->aq.asq_last_status);
}
return (error);
}
int
ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
{
struct ixl_pf *pf = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
struct i40e_hw *hw;
struct ixl_vsi *pf_vsi;
enum i40e_status_code ret;
int i, error;
pf = device_get_softc(dev);
hw = &pf->hw;
pf_vsi = &pf->vsi;
//IXL_PF_LOCK(pf);
pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
M_ZERO);
if (pf->vfs == NULL) {
error = ENOMEM;
goto fail;
@ -1746,65 +1800,77 @@ ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
for (i = 0; i < num_vfs; i++)
sysctl_ctx_init(&pf->vfs[i].ctx);
/*
* Add the VEB and ...
* - do nothing: VEPA mode
* - enable loopback mode on connected VSIs: VEB mode
*/
ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1, FALSE, &pf->veb_seid, FALSE, NULL);
if (ret != I40E_SUCCESS) {
error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
device_printf(dev, "add_veb failed; code=%d error=%d", ret,
error);
error = hw->aq.asq_last_status;
device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
goto fail;
}
if (pf->enable_vf_loopback)
ixl_config_pf_vsi_loopback(pf, true);
/*
* Adding a VEB brings back the default MAC filter(s). Remove them,
* and let the driver add the proper filters back.
*/
ixl_del_default_hw_filters(pf_vsi);
ixl_reconfigure_filters(pf_vsi);
pf->num_vfs = num_vfs;
//IXL_PF_UNLOCK(pf);
return (0);
fail:
free(pf->vfs, M_IXL);
pf->vfs = NULL;
//IXL_PF_UNLOCK(pf);
return (error);
}
void
ixl_iov_uninit(device_t dev)
ixl_if_iov_uninit(if_ctx_t ctx)
{
struct ixl_pf *pf;
struct ixl_pf *pf = iflib_get_softc(ctx);
struct i40e_hw *hw;
struct ixl_vsi *vsi;
struct ifnet *ifp;
struct ixl_vf *vfs;
int i, num_vfs;
pf = device_get_softc(dev);
hw = &pf->hw;
vsi = &pf->vsi;
ifp = vsi->ifp;
//IXL_PF_LOCK(pf);
for (i = 0; i < pf->num_vfs; i++) {
if (pf->vfs[i].vsi.seid != 0)
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
ixl_free_mac_filters(&pf->vfs[i].vsi);
DDPRINTF(dev, "VF %d: %d released\n",
ixl_dbg_iov(pf, "VF %d: %d released\n",
i, pf->vfs[i].qtag.num_allocated);
DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
}
if (pf->veb_seid != 0) {
i40e_aq_delete_element(hw, pf->veb_seid, NULL);
pf->veb_seid = 0;
}
/* Reset PF VSI loopback mode */
if (pf->enable_vf_loopback)
ixl_config_pf_vsi_loopback(pf, false);
vfs = pf->vfs;
num_vfs = pf->num_vfs;
pf->vfs = NULL;
pf->num_vfs = 0;
//IXL_PF_UNLOCK(pf);
/* Do this after the unlock as sysctl_ctx_free might sleep. */
/* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
for (i = 0; i < num_vfs; i++)
sysctl_ctx_free(&vfs[i].ctx);
free(vfs, M_IXL);
@ -1823,9 +1889,9 @@ ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
if (num_queues < 1) {
device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
num_queues = 1;
} else if (num_queues > 16) {
device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
num_queues = 16;
} else if (num_queues > IXLV_MAX_QUEUES) {
device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IXLV_MAX_QUEUES);
num_queues = IXLV_MAX_QUEUES;
}
error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
if (error) {
@ -1834,30 +1900,27 @@ ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
return (ENOSPC);
}
DDPRINTF(dev, "VF %d: %d allocated, %d active",
ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
return (0);
}
int
ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
{
struct ixl_pf *pf = iflib_get_softc(ctx);
device_t dev = pf->dev;
char sysctl_name[QUEUE_NAME_LEN];
struct ixl_pf *pf;
struct ixl_vf *vf;
const void *mac;
size_t size;
int error;
int vf_num_queues;
pf = device_get_softc(dev);
vf = &pf->vfs[vfnum];
//IXL_PF_LOCK(pf);
vf->vf_num = vfnum;
vf->vsi.back = pf;
vf->vf_flags = VF_FLAG_ENABLED;
SLIST_INIT(&vf->vsi.ftl);
@ -1893,12 +1956,12 @@ ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
vf->vf_flags |= VF_FLAG_VLAN_CAP;
/* VF needs to be reset before it can be used */
ixl_reset_vf(pf, vf);
out:
//IXL_PF_UNLOCK(pf);
if (error == 0) {
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
ixl_add_vsi_sysctls(dev, &vf->vsi, &vf->ctx, sysctl_name);
}
return (error);

View File

@ -45,19 +45,19 @@
/* Public functions */
/*
* These three are DEVMETHODs required for SR-IOV PF support.
* These three are DEVMETHODs required for SR-IOV PF support in iflib.
*/
int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
void ixl_iov_uninit(device_t dev);
int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
int ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params);
void ixl_if_iov_uninit(if_ctx_t ctx);
int ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params);
/*
* The standard PF driver needs to call these during normal execution when
* The base PF driver needs to call these during normal execution when
* SR-IOV mode is active.
*/
void ixl_initialize_sriov(struct ixl_pf *pf);
void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event);
void ixl_handle_vflr(void *arg, int pending);
void ixl_handle_vflr(struct ixl_pf *pf);
void ixl_broadcast_link_state(struct ixl_pf *pf);
#endif /* _IXL_PF_IOV_H_ */

View File

@ -46,7 +46,6 @@
static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
static void ixl_del_default_hw_filters(struct ixl_vsi *);
/* Sysctls */
static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
@ -113,21 +112,6 @@ static char *ixl_fec_string[3] = {
MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
void
ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
{
va_list args;
if (!(mask & pf->dbg_mask))
return;
/* Re-implement device_printf() */
device_print_prettyname(pf->dev);
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
/*
** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
*/
@ -354,6 +338,7 @@ ixl_teardown_hw_structs(struct ixl_pf *pf)
"init: Admin Queue shutdown failure; status %s\n",
i40e_stat_str(hw, status));
ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
err_out:
return (status);
}
@ -458,8 +443,7 @@ ixl_reset(struct ixl_pf *pf)
err_out:
return (error);
#endif
// TODO: Fix second parameter
ixl_rebuild_hw_structs_after_reset(pf, false);
ixl_rebuild_hw_structs_after_reset(pf);
/* The PF reset should have cleared any critical errors */
atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
@ -527,11 +511,11 @@ ixl_intr(void *arg)
int
ixl_msix_que(void *arg)
{
struct ixl_rx_queue *que = arg;
struct ixl_rx_queue *rx_que = arg;
++que->irqs;
++rx_que->irqs;
ixl_set_queue_rx_itr(que);
ixl_set_queue_rx_itr(rx_que);
// ixl_set_queue_tx_itr(que);
return (FILTER_SCHEDULE_THREAD);
@ -557,8 +541,10 @@ ixl_msix_adminq(void *arg)
++pf->admin_irq;
reg = rd32(hw, I40E_PFINT_ICR0);
// For masking off interrupt causes that need to be handled before
// they can be re-enabled
/*
* For masking off interrupt causes that need to be handled before
* they can be re-enabled
*/
mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* Check on the cause */
@ -637,11 +623,12 @@ ixl_msix_adminq(void *arg)
#ifdef PCI_IOV
if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
do_task = TRUE;
iflib_iov_intr_deferred(pf->vsi.ctx);
}
#endif
wr32(hw, I40E_PFINT_ICR0_ENA, mask);
ixl_enable_intr0(hw);
if (do_task)
return (FILTER_SCHEDULE_THREAD);
@ -703,7 +690,7 @@ ixl_add_multi(struct ixl_vsi *vsi)
IOCTL_DEBUGOUT("ixl_add_multi: end");
}
void
int
ixl_del_multi(struct ixl_vsi *vsi)
{
struct ifnet *ifp = vsi->ifp;
@ -738,6 +725,8 @@ ixl_del_multi(struct ixl_vsi *vsi)
if (mcnt > 0)
ixl_del_hw_filters(vsi, mcnt);
return (mcnt);
}
void
@ -1028,7 +1017,6 @@ ixl_setup_interface(device_t dev, struct ixl_pf *pf)
INIT_DBG_DEV(dev, "begin");
/* TODO: Remove VLAN_ENCAP_LEN? */
vsi->shared->isc_max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
@ -1067,6 +1055,29 @@ ixl_setup_interface(device_t dev, struct ixl_pf *pf)
return (0);
}
/*
* Input: bitmap of enum i40e_aq_link_speed
*/
u64
ixl_max_aq_speed_to_value(u8 link_speeds)
{
if (link_speeds & I40E_LINK_SPEED_40GB)
return IF_Gbps(40);
if (link_speeds & I40E_LINK_SPEED_25GB)
return IF_Gbps(25);
if (link_speeds & I40E_LINK_SPEED_20GB)
return IF_Gbps(20);
if (link_speeds & I40E_LINK_SPEED_10GB)
return IF_Gbps(10);
if (link_speeds & I40E_LINK_SPEED_1GB)
return IF_Gbps(1);
if (link_speeds & I40E_LINK_SPEED_100MB)
return IF_Mbps(100);
else
/* Minimum supported link speed */
return IF_Mbps(100);
}
/*
** Run when the Admin Queue gets a link state change interrupt.
*/
@ -1194,7 +1205,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
* the driver may not use all of them).
*/
tc_queues = fls(pf->qtag.num_allocated) - 1;
ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
& I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
& I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
@ -1493,23 +1504,6 @@ ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
return;
}
void
ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
struct sysctl_ctx_list *ctx, const char *sysctl_name)
{
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
struct sysctl_oid_list *vsi_list;
tree = device_get_sysctl_tree(pf->dev);
child = SYSCTL_CHILDREN(tree);
vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
CTLFLAG_RD, NULL, "VSI Number");
vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
}
#ifdef IXL_DEBUG
/**
* ixl_sysctl_qtx_tail_handler
@ -1634,133 +1628,19 @@ ixl_add_hw_stats(struct ixl_pf *pf)
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct sysctl_oid_list *vsi_list, *queue_list;
struct sysctl_oid *queue_node;
char queue_namebuf[32];
struct ixl_rx_queue *rx_que;
struct ixl_tx_queue *tx_que;
struct tx_ring *txr;
struct rx_ring *rxr;
/* Driver statistics */
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &pf->watchdog_events,
"Watchdog timeouts");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &pf->admin_irq,
"Admin Queue IRQ Handled");
"Admin Queue IRQs received");
ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
/* Queue statistics */
for (int q = 0; q < vsi->num_rx_queues; q++) {
snprintf(queue_namebuf, QUEUE_NAME_LEN, "rxq%02d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
queue_list = SYSCTL_CHILDREN(queue_node);
ixl_add_queues_sysctls(dev, vsi);
rx_que = &(vsi->rx_queues[q]);
rxr = &(rx_que->rxr);
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(rx_que->irqs),
"irqs on this queue (both Tx and Rx)");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
CTLFLAG_RD, &(rxr->rx_packets),
"Queue Packets Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
CTLFLAG_RD, &(rxr->desc_errs),
"Queue Rx Descriptor Errors");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
CTLFLAG_RD, &(rxr->itr), 0,
"Queue Rx ITR Interval");
#ifdef IXL_DEBUG
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
CTLTYPE_UINT | CTLFLAG_RD, rx_que,
sizeof(struct ixl_rx_queue),
ixl_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
#endif
}
for (int q = 0; q < vsi->num_tx_queues; q++) {
snprintf(queue_namebuf, QUEUE_NAME_LEN, "txq%02d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
queue_list = SYSCTL_CHILDREN(queue_node);
tx_que = &(vsi->tx_queues[q]);
txr = &(tx_que->txr);
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
CTLFLAG_RD, &(tx_que->tso),
"TSO");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
CTLFLAG_RD, &(txr->mss_too_small),
"TSO sends with an MSS less than 64");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
CTLFLAG_RD, &(txr->tx_packets),
"Queue Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
CTLFLAG_RD, &(txr->tx_bytes),
"Queue Bytes Transmitted");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
#ifdef IXL_DEBUG
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
CTLTYPE_UINT | CTLFLAG_RD, tx_que,
sizeof(struct ixl_tx_queue),
ixl_sysctl_qtx_tail_handler, "IU",
"Queue Transmit Descriptor Tail");
#endif
}
/* MAC stats */
ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
}
void
ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct i40e_eth_stats *eth_stats)
{
struct ixl_sysctl_info ctls[] =
{
{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
"Unicast Packets Received"},
{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
"Multicast Packets Received"},
{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
"Broadcast Packets Received"},
{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
{&eth_stats->tx_multicast, "mcast_pkts_txd",
"Multicast Packets Transmitted"},
{&eth_stats->tx_broadcast, "bcast_pkts_txd",
"Broadcast Packets Transmitted"},
// end
{0,0,0}
};
struct ixl_sysctl_info *entry = ctls;
while (entry->stat != 0)
{
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
CTLFLAG_RD, entry->stat,
entry->description);
entry++;
}
}
void
ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
@ -1986,7 +1866,7 @@ ixl_setup_vlan_filters(struct ixl_vsi *vsi)
* configured which interferes with filters managed by driver.
* Make sure it's removed.
*/
static void
void
ixl_del_default_hw_filters(struct ixl_vsi *vsi)
{
struct i40e_aqc_remove_macvlan_element_data e;
@ -2052,8 +1932,6 @@ ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
f->flags |= IXL_FILTER_MC;
else
printf("WARNING: no filter available!!\n");
return;
}
void
@ -2063,8 +1941,10 @@ ixl_reconfigure_filters(struct ixl_vsi *vsi)
}
/*
** This routine adds macvlan filters
*/
* This routine adds a MAC/VLAN filter to the software filter
* list, then adds that new filter to the HW if it doesn't already
* exist in the SW filter list.
*/
void
ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
@ -2104,8 +1984,8 @@ ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
else
vsi->num_macs++;
f->flags |= IXL_FILTER_USED;
ixl_add_hw_filters(vsi, f->flags, 1);
return;
}
void
@ -2165,12 +2045,15 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
enum i40e_status_code status;
int j = 0;
MPASS(cnt > 0);
pf = vsi->back;
dev = iflib_get_dev(vsi->ctx);
dev = vsi->dev;
hw = &pf->hw;
if (cnt < 1) {
ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
return;
}
a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (a == NULL) {
@ -2197,6 +2080,9 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
f->flags &= ~IXL_FILTER_ADD;
j++;
ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
MAC_FORMAT_ARGS(f->macaddr));
}
if (j == cnt)
break;
@ -2232,7 +2118,7 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
pf = vsi->back;
hw = &pf->hw;
dev = iflib_get_dev(vsi->ctx);
dev = vsi->dev;
d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
@ -2252,6 +2138,10 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
} else {
e->vlan_tag = f->vlan;
}
ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
MAC_FORMAT_ARGS(f->macaddr));
/* delete entry from vsi list */
SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
free(f, M_DEVBUF);
@ -2456,22 +2346,167 @@ ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
return (error);
}
/* For PF VSI only */
int
ixl_disable_rings(struct ixl_vsi *vsi)
ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
{
struct ixl_pf *pf = vsi->back;
int error = 0;
int error = 0;
for (int i = 0; i < vsi->num_tx_queues; i++)
error = ixl_disable_tx_ring(pf, &pf->qtag, i);
error = ixl_disable_tx_ring(pf, qtag, i);
for (int i = 0; i < vsi->num_rx_queues; i++)
error = ixl_disable_rx_ring(pf, &pf->qtag, i);
error = ixl_disable_rx_ring(pf, qtag, i);
return (error);
}
static void
ixl_handle_tx_mdd_event(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct ixl_vf *vf;
bool mdd_detected = false;
bool pf_mdd_detected = false;
bool vf_mdd_detected = false;
u16 vf_num, queue;
u8 pf_num, event;
u8 pf_mdet_num, vp_mdet_num;
u32 reg;
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
I40E_GL_MDET_TX_PF_NUM_SHIFT;
vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
I40E_GL_MDET_TX_VF_NUM_SHIFT;
event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
}
if (!mdd_detected)
return;
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
pf_mdet_num = hw->pf_id;
pf_mdd_detected = true;
}
/* Check if MDD was caused by a VF */
for (int i = 0; i < pf->num_vfs; i++) {
vf = &(pf->vfs[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
vp_mdet_num = i;
vf->num_mdd_events++;
vf_mdd_detected = true;
}
}
/* Print out an error message */
if (vf_mdd_detected && pf_mdd_detected)
device_printf(dev,
"Malicious Driver Detection event %d"
" on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
else if (vf_mdd_detected && !pf_mdd_detected)
device_printf(dev,
"Malicious Driver Detection event %d"
" on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
event, queue, pf_num, vf_num, vp_mdet_num);
else if (!vf_mdd_detected && pf_mdd_detected)
device_printf(dev,
"Malicious Driver Detection event %d"
" on TX queue %d, pf number %d (PF-%d)\n",
event, queue, pf_num, pf_mdet_num);
/* Theoretically shouldn't happen */
else
device_printf(dev,
"TX Malicious Driver Detection event (unknown)\n");
}
static void
ixl_handle_rx_mdd_event(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct ixl_vf *vf;
bool mdd_detected = false;
bool pf_mdd_detected = false;
bool vf_mdd_detected = false;
u16 queue;
u8 pf_num, event;
u8 pf_mdet_num, vp_mdet_num;
u32 reg;
/*
* GL_MDET_RX doesn't contain VF number information, unlike
* GL_MDET_TX.
*/
reg = rd32(hw, I40E_GL_MDET_RX);
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
I40E_GL_MDET_RX_FUNCTION_SHIFT;
event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
}
if (!mdd_detected)
return;
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
pf_mdet_num = hw->pf_id;
pf_mdd_detected = true;
}
/* Check if MDD was caused by a VF */
for (int i = 0; i < pf->num_vfs; i++) {
vf = &(pf->vfs[i]);
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
vp_mdet_num = i;
vf->num_mdd_events++;
vf_mdd_detected = true;
}
}
/* Print out an error message */
if (vf_mdd_detected && pf_mdd_detected)
device_printf(dev,
"Malicious Driver Detection event %d"
" on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
event, queue, pf_num, pf_mdet_num, vp_mdet_num);
else if (vf_mdd_detected && !pf_mdd_detected)
device_printf(dev,
"Malicious Driver Detection event %d"
" on RX queue %d, pf number %d, (VF-%d)\n",
event, queue, pf_num, vp_mdet_num);
else if (!vf_mdd_detected && pf_mdd_detected)
device_printf(dev,
"Malicious Driver Detection event %d"
" on RX queue %d, pf number %d (PF-%d)\n",
event, queue, pf_num, pf_mdet_num);
/* Theoretically shouldn't happen */
else
device_printf(dev,
"RX Malicious Driver Detection event (unknown)\n");
}
/**
* ixl_handle_mdd_event
*
@ -2482,93 +2517,15 @@ void
ixl_handle_mdd_event(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct ixl_vf *vf;
bool mdd_detected = false;
bool pf_mdd_detected = false;
bool vf_mdd_detected = false;
u32 reg;
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
I40E_GL_MDET_TX_PF_NUM_SHIFT;
u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
device_printf(dev,
"Malicious Driver Detection event %d"
" on TX queue %d, pf number %d\n",
event, queue, pf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
}
reg = rd32(hw, I40E_GL_MDET_RX);
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
I40E_GL_MDET_RX_FUNCTION_SHIFT;
u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
device_printf(dev,
"Malicious Driver Detection event %d"
" on RX queue %d, pf number %d\n",
event, queue, pf_num);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
}
/*
* Handle both TX/RX because it's possible they could
* both trigger in the same interrupt.
*/
ixl_handle_tx_mdd_event(pf);
ixl_handle_rx_mdd_event(pf);
if (mdd_detected) {
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
device_printf(dev,
"MDD TX event is for this function!\n");
pf_mdd_detected = true;
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
device_printf(dev,
"MDD RX event is for this function!\n");
pf_mdd_detected = true;
}
}
if (pf_mdd_detected) {
atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
goto end;
}
// Handle VF detection
for (int i = 0; i < pf->num_vfs && mdd_detected; i++) {
vf = &(pf->vfs[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
vf->num_mdd_events++;
device_printf(dev, "MDD TX event is for VF %d\n", i);
vf_mdd_detected = true;
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
vf->num_mdd_events++;
device_printf(dev, "MDD RX event is for VF %d\n", i);
vf_mdd_detected = true;
}
// TODO: Disable VF if there are too many MDD events from it
}
if (vf_mdd_detected)
atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
end:
atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
/* re-enable mdd interrupt cause */
@ -2578,14 +2535,12 @@ ixl_handle_mdd_event(struct ixl_pf *pf)
ixl_flush(hw);
}
/* This only enables HW interrupts for the RX queues */
void
ixl_enable_intr(struct ixl_vsi *vsi)
{
struct i40e_hw *hw = vsi->hw;
struct ixl_rx_queue *que = vsi->rx_queues;
// TODO: Check iflib interrupt mode instead?
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
for (int i = 0; i < vsi->num_rx_queues; i++, que++)
ixl_enable_queue(hw, que->rxr.me);
@ -2845,7 +2800,7 @@ ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
}
int
ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
@ -2902,6 +2857,25 @@ ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
if (error) {
device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
error);
error = EIO;
goto ixl_rebuild_hw_structs_after_reset_err;
}
error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
NULL);
if (error) {
device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
" aq_err %d\n", error, hw->aq.asq_last_status);
error = EIO;
goto ixl_rebuild_hw_structs_after_reset_err;
}
u8 set_fc_err_mask;
error = i40e_set_fc(hw, &set_fc_err_mask, true);
if (error) {
device_printf(dev, "init: setting link flow control failed; retcode %d,"
" fc_err_mask 0x%02x\n", error, set_fc_err_mask);
error = EIO;
goto ixl_rebuild_hw_structs_after_reset_err;
}
@ -2954,7 +2928,7 @@ ixl_handle_empr_reset(struct ixl_pf *pf)
ixl_dbg(pf, IXL_DBG_INFO,
"Reset wait count: %d\n", count);
ixl_rebuild_hw_structs_after_reset(pf, is_up);
ixl_rebuild_hw_structs_after_reset(pf);
atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
}
@ -3314,12 +3288,6 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
}
#ifdef PCI_IOV
SYSCTL_ADD_UINT(ctx, debug_list,
OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
0, "PF/VF Virtual Channel debug level");
#endif
}
/*
@ -3332,9 +3300,7 @@ ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
struct ixl_pf *pf = (struct ixl_pf *)arg1;
int queues;
//IXL_PF_LOCK(pf);
queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
//IXL_PF_UNLOCK(pf);
return sysctl_handle_int(oidp, NULL, queues, req);
}
@ -3998,44 +3964,72 @@ ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_mac_filter *f;
char *buf, *buf_i;
device_t dev = pf->dev;
int error = 0, ftl_len = 0, ftl_counter = 0;
int error = 0;
int ftl_len = 0;
int ftl_counter = 0;
int buf_len = 0;
int entry_len = 42;
struct sbuf *buf;
SLIST_FOREACH(f, &vsi->ftl, next) {
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for output.\n");
return (ENOMEM);
}
sbuf_printf(buf, "\n");
/* Print MAC filters */
sbuf_printf(buf, "PF Filters:\n");
SLIST_FOREACH(f, &vsi->ftl, next)
ftl_len++;
}
if (ftl_len < 1) {
sysctl_handle_string(oidp, "(none)", 6, req);
return (0);
}
buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
buf = buf_i = malloc(buf_len, M_DEVBUF, M_WAITOK);
sprintf(buf_i++, "\n");
SLIST_FOREACH(f, &vsi->ftl, next) {
sprintf(buf_i,
MAC_FORMAT ", vlan %4d, flags %#06x",
MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
buf_i += entry_len;
/* don't print '\n' for last entry */
if (++ftl_counter != ftl_len) {
sprintf(buf_i, "\n");
buf_i++;
if (ftl_len < 1)
sbuf_printf(buf, "(none)\n");
else {
SLIST_FOREACH(f, &vsi->ftl, next) {
sbuf_printf(buf,
MAC_FORMAT ", vlan %4d, flags %#06x",
MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
/* don't print '\n' for last entry */
if (++ftl_counter != ftl_len)
sbuf_printf(buf, "\n");
}
}
error = sysctl_handle_string(oidp, buf, strlen(buf), req);
#ifdef PCI_IOV
/* TODO: Give each VF its own filter list sysctl */
struct ixl_vf *vf;
if (pf->num_vfs > 0) {
sbuf_printf(buf, "\n\n");
for (int i = 0; i < pf->num_vfs; i++) {
vf = &pf->vfs[i];
if (!(vf->vf_flags & VF_FLAG_ENABLED))
continue;
vsi = &vf->vsi;
ftl_len = 0, ftl_counter = 0;
sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
SLIST_FOREACH(f, &vsi->ftl, next)
ftl_len++;
if (ftl_len < 1)
sbuf_printf(buf, "(none)\n");
else {
SLIST_FOREACH(f, &vsi->ftl, next) {
sbuf_printf(buf,
MAC_FORMAT ", vlan %4d, flags %#06x\n",
MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
}
}
}
}
#endif
error = sbuf_finish(buf);
if (error)
printf("sysctl error: %d\n", error);
free(buf, M_DEVBUF);
return error;
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return (error);
}
#define IXL_SW_RES_SIZE 0x14

View File

@ -45,7 +45,7 @@ ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues)
qmgr->num_queues = num_queues;
qmgr->qinfo = malloc(num_queues * sizeof(struct ixl_pf_qmgr_qinfo),
M_IXL, M_ZERO | M_WAITOK);
M_IXL, M_ZERO | M_NOWAIT);
if (qmgr->qinfo == NULL)
return ENOMEM;
@ -266,13 +266,29 @@ ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
return (qmgr->qinfo[pf_qidx].rx_configured);
}
void
ixl_pf_qmgr_clear_queue_flags(struct ixl_pf_qtag *qtag)
{
MPASS(qtag != NULL);
struct ixl_pf_qmgr *qmgr = qtag->qmgr;
for (u16 i = 0; i < qtag->num_allocated; i++) {
u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, i);
qmgr->qinfo[pf_qidx].tx_configured = 0;
qmgr->qinfo[pf_qidx].rx_configured = 0;
qmgr->qinfo[pf_qidx].rx_enabled = 0;
qmgr->qinfo[pf_qidx].tx_enabled = 0;
}
}
u16
ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index)
{
MPASS(index < qtag->num_allocated);
if (qtag->type == IXL_PF_QALLOC_CONTIGUOUS)
return qtag->qidx[0] + index;
return qtag->first_qidx + index;
else
return qtag->qidx[index];
}

View File

@ -53,11 +53,11 @@
/* Manager */
struct ixl_pf_qmgr_qinfo {
bool allocated;
bool tx_enabled;
bool rx_enabled;
bool tx_configured;
bool rx_configured;
u8 allocated;
u8 tx_enabled;
u8 rx_enabled;
u8 tx_configured;
u8 rx_configured;
};
struct ixl_pf_qmgr {
@ -74,7 +74,10 @@ enum ixl_pf_qmgr_qalloc_type {
struct ixl_pf_qtag {
struct ixl_pf_qmgr *qmgr;
enum ixl_pf_qmgr_qalloc_type type;
u16 qidx[IXL_MAX_SCATTERED_QUEUES];
union {
u16 qidx[IXL_MAX_SCATTERED_QUEUES];
u16 first_qidx;
};
u16 num_allocated;
u16 num_active;
};
@ -101,6 +104,7 @@ void ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, boo
void ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
bool ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
bool ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
void ixl_pf_qmgr_clear_queue_flags(struct ixl_pf_qtag *qtag);
/* Public tag functions */
u16 ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index);

View File

@ -65,8 +65,6 @@ static int ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
qidx_t budget);
static int ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
extern int ixl_intr(void *arg);
struct if_txrx ixl_txrx_hwb = {
ixl_isc_txd_encap,
ixl_isc_txd_flush,
@ -75,7 +73,7 @@ struct if_txrx ixl_txrx_hwb = {
ixl_isc_rxd_pkt_get,
ixl_isc_rxd_refill,
ixl_isc_rxd_flush,
ixl_intr
NULL
};
struct if_txrx ixl_txrx_dwb = {
@ -86,7 +84,7 @@ struct if_txrx ixl_txrx_dwb = {
ixl_isc_rxd_pkt_get,
ixl_isc_rxd_refill,
ixl_isc_rxd_flush,
ixl_intr
NULL
};
/*
@ -133,6 +131,21 @@ i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err)
return hw->err_str;
}
void
ixl_debug_core(device_t dev, u32 enabled_mask, u32 mask, char *fmt, ...)
{
va_list args;
if (!(mask & enabled_mask))
return;
/* Re-implement device_printf() */
device_print_prettyname(dev);
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
static bool
ixl_is_tx_desc_done(struct tx_ring *txr, int idx)
{
@ -236,6 +249,8 @@ ixl_tx_setup_offload(struct ixl_tx_queue *que,
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
*off |= (pi->ipi_tcp_hlen >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
/* Check for NO_HEAD MDD event */
MPASS(pi->ipi_tcp_hlen != 0);
}
break;
case IPPROTO_UDP:
@ -268,23 +283,37 @@ ixl_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
if_softc_ctx_t scctx;
struct i40e_tx_context_desc *TXD;
u32 cmd, mss, type, tsolen;
int idx;
int idx, total_hdr_len;
u64 type_cmd_tso_mss;
idx = pi->ipi_pidx;
TXD = (struct i40e_tx_context_desc *) &txr->tx_base[idx];
tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
total_hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
tsolen = pi->ipi_len - total_hdr_len;
scctx = txr->que->vsi->shared;
type = I40E_TX_DESC_DTYPE_CONTEXT;
cmd = I40E_TX_CTX_DESC_TSO;
/* TSO MSS must not be less than 64 */
/*
* TSO MSS must not be less than 64; this prevents a
* BAD_LSO_MSS MDD event when the MSS is too small.
*/
if (pi->ipi_tso_segsz < IXL_MIN_TSO_MSS) {
txr->mss_too_small++;
pi->ipi_tso_segsz = IXL_MIN_TSO_MSS;
}
mss = pi->ipi_tso_segsz;
/* Check for BAD_LS0_MSS MDD event (mss too large) */
MPASS(mss <= IXL_MAX_TSO_MSS);
/* Check for NO_HEAD MDD event (header lengths are 0) */
MPASS(pi->ipi_ehdrlen != 0);
MPASS(pi->ipi_ip_hlen != 0);
/* Partial check for BAD_LSO_LEN MDD event */
MPASS(tsolen != 0);
/* Partial check for WRONG_SIZE MDD event (during TSO) */
MPASS(total_hdr_len + mss <= IXL_MAX_FRAME);
type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
@ -319,20 +348,16 @@ ixl_isc_txd_encap(void *arg, if_pkt_info_t pi)
int i, j, mask, pidx_last;
u32 cmd, off, tx_intr;
// device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__);
cmd = off = 0;
i = pi->ipi_pidx;
tx_intr = (pi->ipi_flags & IPI_TX_INTR);
#if 0
device_printf(iflib_get_dev(vsi->ctx), "%s: tx_intr %d\n", __func__, tx_intr);
#endif
/* Set up the TSO/CSUM offload */
if (pi->ipi_csum_flags & CSUM_OFFLOAD) {
/* Set up the TSO context descriptor if required */
if (pi->ipi_csum_flags & CSUM_TSO) {
/* Prevent MAX_BUFF MDD event (for TSO) */
if (ixl_tso_detect_sparse(segs, nsegs, pi))
return (EFBIG);
i = ixl_tso_setup(txr, pi);
@ -344,12 +369,21 @@ ixl_isc_txd_encap(void *arg, if_pkt_info_t pi)
cmd |= I40E_TX_DESC_CMD_ICRC;
mask = scctx->isc_ntxd[0] - 1;
/* Check for WRONG_SIZE MDD event */
MPASS(pi->ipi_len >= IXL_MIN_FRAME);
#ifdef INVARIANTS
if (!(pi->ipi_csum_flags & CSUM_TSO))
MPASS(pi->ipi_len <= IXL_MAX_FRAME);
#endif
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
txd = &txr->tx_base[i];
seglen = segs[j].ds_len;
/* Check for ZERO_BSIZE MDD event */
MPASS(seglen != 0);
txd->buffer_addr = htole64(segs[j].ds_addr);
txd->cmd_type_offset_bsz =
htole64(I40E_TX_DESC_DTYPE_DATA
@ -387,6 +421,8 @@ ixl_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
* Advance the Transmit Descriptor Tail (Tdt), this tells the
* hardware that this frame is available to transmit.
*/
/* Check for ENDLESS_TX MDD event */
MPASS(pidx < vsi->shared->isc_ntxd[0]);
wr32(vsi->hw, txr->tail, pidx);
}
@ -406,9 +442,7 @@ ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que)
(sizeof(struct i40e_tx_desc)) *
(vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0)));
// TODO: Write max descriptor index instead of 0?
wr32(vsi->hw, txr->tail, 0);
wr32(vsi->hw, I40E_QTX_HEAD(txr->me), 0);
}
/*
@ -470,9 +504,14 @@ ixl_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear)
MPASS(cur != QIDX_INVALID);
is_done = ixl_is_tx_desc_done(txr, cur);
if (clear == false || !is_done)
if (!is_done)
return (0);
/* If clear is false just let caller know that there
* are descriptors to reclaim */
if (!clear)
return (1);
prev = txr->tx_cidx_processed;
ntxd = scctx->isc_ntxd[0];
do {
@ -547,14 +586,6 @@ ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
nrxd = vsi->shared->isc_nrxd[0];
if (budget == 1) {
rxd = &rxr->rx_base[idx];
qword = le64toh(rxd->wb.qword1.status_error_len);
status = (qword & I40E_RXD_QW1_STATUS_MASK)
>> I40E_RXD_QW1_STATUS_SHIFT;
return !!(status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
}
for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) {
rxd = &rxr->rx_base[i];
qword = le64toh(rxd->wb.qword1.status_error_len);
@ -657,7 +688,7 @@ ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
MPASS((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) != 0);
ri->iri_len += plen;
rxr->bytes += plen;
rxr->rx_bytes += plen;
cur->wb.qword1.status_error_len = 0;
eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
@ -745,25 +776,179 @@ ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype)
ri->iri_csum_data |= htons(0xffff);
}
/* Set Report Status queue fields to 0 */
void
ixl_init_tx_rsqs(struct ixl_vsi *vsi)
{
if_softc_ctx_t scctx = vsi->shared;
struct ixl_tx_queue *tx_que;
int i, j;
for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
}
}
void
ixl_init_tx_cidx(struct ixl_vsi *vsi)
{
struct ixl_tx_queue *tx_que;
int i;
for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txr->tx_cidx_processed = 0;
}
}
/*
* Input: bitmap of enum i40e_aq_link_speed
* Input: bitmap of enum virtchnl_link_speed
*/
u64
ixl_max_aq_speed_to_value(u8 link_speeds)
ixl_max_vc_speed_to_value(u8 link_speeds)
{
if (link_speeds & I40E_LINK_SPEED_40GB)
if (link_speeds & VIRTCHNL_LINK_SPEED_40GB)
return IF_Gbps(40);
if (link_speeds & I40E_LINK_SPEED_25GB)
if (link_speeds & VIRTCHNL_LINK_SPEED_25GB)
return IF_Gbps(25);
if (link_speeds & I40E_LINK_SPEED_20GB)
if (link_speeds & VIRTCHNL_LINK_SPEED_20GB)
return IF_Gbps(20);
if (link_speeds & I40E_LINK_SPEED_10GB)
if (link_speeds & VIRTCHNL_LINK_SPEED_10GB)
return IF_Gbps(10);
if (link_speeds & I40E_LINK_SPEED_1GB)
if (link_speeds & VIRTCHNL_LINK_SPEED_1GB)
return IF_Gbps(1);
if (link_speeds & I40E_LINK_SPEED_100MB)
if (link_speeds & VIRTCHNL_LINK_SPEED_100MB)
return IF_Mbps(100);
else
/* Minimum supported link speed */
return IF_Mbps(100);
}
void
ixl_add_vsi_sysctls(device_t dev, struct ixl_vsi *vsi,
struct sysctl_ctx_list *ctx, const char *sysctl_name)
{
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
struct sysctl_oid_list *vsi_list;
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
CTLFLAG_RD, NULL, "VSI Number");
vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
}
void
ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct i40e_eth_stats *eth_stats)
{
struct ixl_sysctl_info ctls[] =
{
{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
"Unicast Packets Received"},
{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
"Multicast Packets Received"},
{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
"Broadcast Packets Received"},
{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
{&eth_stats->tx_multicast, "mcast_pkts_txd",
"Multicast Packets Transmitted"},
{&eth_stats->tx_broadcast, "bcast_pkts_txd",
"Broadcast Packets Transmitted"},
// end
{0,0,0}
};
struct ixl_sysctl_info *entry = ctls;
while (entry->stat != 0)
{
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
CTLFLAG_RD, entry->stat,
entry->description);
entry++;
}
}
void
ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid_list *vsi_list, *queue_list;
struct sysctl_oid *queue_node;
char queue_namebuf[32];
struct ixl_rx_queue *rx_que;
struct ixl_tx_queue *tx_que;
struct tx_ring *txr;
struct rx_ring *rxr;
vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
/* Queue statistics */
for (int q = 0; q < vsi->num_rx_queues; q++) {
bzero(queue_namebuf, sizeof(queue_namebuf));
snprintf(queue_namebuf, QUEUE_NAME_LEN, "rxq%02d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
queue_list = SYSCTL_CHILDREN(queue_node);
rx_que = &(vsi->rx_queues[q]);
rxr = &(rx_que->rxr);
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(rx_que->irqs),
"irqs on this queue (both Tx and Rx)");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
CTLFLAG_RD, &(rxr->rx_packets),
"Queue Packets Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
CTLFLAG_RD, &(rxr->desc_errs),
"Queue Rx Descriptor Errors");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
CTLFLAG_RD, &(rxr->itr), 0,
"Queue Rx ITR Interval");
}
for (int q = 0; q < vsi->num_tx_queues; q++) {
bzero(queue_namebuf, sizeof(queue_namebuf));
snprintf(queue_namebuf, QUEUE_NAME_LEN, "txq%02d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
queue_list = SYSCTL_CHILDREN(queue_node);
tx_que = &(vsi->tx_queues[q]);
txr = &(tx_que->txr);
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
CTLFLAG_RD, &(tx_que->tso),
"TSO");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
CTLFLAG_RD, &(txr->mss_too_small),
"TSO sends with an MSS less than 64");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
CTLFLAG_RD, &(txr->tx_packets),
"Queue Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
CTLFLAG_RD, &(txr->tx_bytes),
"Queue Bytes Transmitted");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
}
}

View File

@ -36,13 +36,12 @@
#ifndef _IXLV_H_
#define _IXLV_H_
#include "ixlv_vc_mgr.h"
#include "ixl.h"
#define IXLV_AQ_MAX_ERR 200
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
#define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */
#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0)
#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
@ -58,42 +57,38 @@
#define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11)
#define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12)
#define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13)
#define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
#define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
/* printf %b arg */
/* printf %b flag args */
#define IXLV_FLAGS \
"\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \
"\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS"
"\12CONFIGURE_PROMISC\13GET_STATS\14CONFIG_RSS_KEY" \
"\15SET_RSS_HENA\16GET_RSS_HENA_CAPS\17CONFIG_RSS_LUT"
#define IXLV_PRINTF_VF_OFFLOAD_FLAGS \
"\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \
"\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \
"\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \
"\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \
"\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \
"\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \
"\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \
"\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \
"\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \
"\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF"
"\20\1L2" \
"\2IWARP" \
"\3RSVD" \
"\4RSS_AQ" \
"\5RSS_REG" \
"\6WB_ON_ITR" \
"\7REQ_QUEUES" \
"\21VLAN" \
"\22RX_POLLING" \
"\23RSS_PCTYPE_V2" \
"\24RSS_PF" \
"\25ENCAP" \
"\26ENCAP_CSUM" \
"\27RX_ENCAP_CSUM"
static MALLOC_DEFINE(M_IXLV, "ixlv", "ixlv driver allocations");
MALLOC_DECLARE(M_IXLV);
/* Driver state */
enum ixlv_state_t {
IXLV_START,
IXLV_FAILED,
IXLV_RESET_REQUIRED,
IXLV_RESET_PENDING,
IXLV_VERSION_CHECK,
IXLV_GET_RESOURCES,
IXLV_INIT_READY,
IXLV_INIT_START,
IXLV_INIT_CONFIG,
IXLV_INIT_MAPPING,
IXLV_INIT_ENABLE,
IXLV_INIT_COMPLETE,
IXLV_RUNNING,
};
@ -115,77 +110,48 @@ SLIST_HEAD(vlan_list, ixlv_vlan_filter);
/* Software controller structure */
struct ixlv_sc {
struct ixl_vsi vsi;
struct i40e_hw hw;
struct i40e_osdep osdep;
device_t dev;
struct resource *pci_mem;
struct resource *msix_mem;
enum ixlv_state_t init_state;
int init_in_progress;
/*
* Interrupt resources
*/
void *tag;
struct resource *res; /* For the AQ */
struct ifmedia media;
struct callout timer;
int msix;
int pf_version;
int if_flags;
struct virtchnl_version_info version;
enum ixl_dbg_mask dbg_mask;
u16 promisc_flags;
bool link_up;
enum virtchnl_link_speed link_speed;
struct mtx mtx;
u32 qbase;
u32 admvec;
struct timeout_task timeout;
#ifdef notyet
struct task aq_irq;
struct task aq_sched;
#endif
struct ixl_vsi vsi;
/* Tunable settings */
int tx_itr;
int rx_itr;
int dynamic_tx_itr;
int dynamic_rx_itr;
/* Filter lists */
struct mac_list *mac_filters;
struct vlan_list *vlan_filters;
/* Promiscuous mode */
u32 promiscuous_flags;
/* Admin queue task flags */
u32 aq_wait_count;
struct ixl_vc_mgr vc_mgr;
struct ixl_vc_cmd add_mac_cmd;
struct ixl_vc_cmd del_mac_cmd;
struct ixl_vc_cmd config_queues_cmd;
struct ixl_vc_cmd map_vectors_cmd;
struct ixl_vc_cmd enable_queues_cmd;
struct ixl_vc_cmd add_vlan_cmd;
struct ixl_vc_cmd del_vlan_cmd;
struct ixl_vc_cmd add_multi_cmd;
struct ixl_vc_cmd del_multi_cmd;
struct ixl_vc_cmd config_rss_key_cmd;
struct ixl_vc_cmd get_rss_hena_caps_cmd;
struct ixl_vc_cmd set_rss_hena_cmd;
struct ixl_vc_cmd config_rss_lut_cmd;
/* Virtual comm channel */
struct virtchnl_vf_resource *vf_res;
struct virtchnl_vsi_resource *vsi_res;
/* Misc stats maintained by the driver */
u64 watchdog_events;
u64 admin_irq;
/* Buffer used for reading AQ responses */
u8 aq_buffer[IXL_AQ_BUF_SZ];
/* State flag used in init/stop */
u32 queues_enabled;
u8 enable_queues_chan;
u8 disable_queues_chan;
};
/*
@ -203,6 +169,13 @@ ixlv_check_ether_addr(u8 *addr)
return (status);
}
/* Debug printing */
#define ixlv_dbg(sc, m, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, m, s, ##__VA_ARGS__)
#define ixlv_dbg_init(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_INIT, s, ##__VA_ARGS__)
#define ixlv_dbg_info(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_INFO, s, ##__VA_ARGS__)
#define ixlv_dbg_vc(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_VC, s, ##__VA_ARGS__)
#define ixlv_dbg_filter(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_FILTER, s, ##__VA_ARGS__)
/*
** VF Common function prototypes
*/
@ -214,28 +187,32 @@ int ixlv_send_vf_config_msg(struct ixlv_sc *);
int ixlv_get_vf_config(struct ixlv_sc *);
void ixlv_init(void *);
int ixlv_reinit_locked(struct ixlv_sc *);
void ixlv_configure_queues(struct ixlv_sc *);
void ixlv_enable_queues(struct ixlv_sc *);
void ixlv_disable_queues(struct ixlv_sc *);
void ixlv_map_queues(struct ixlv_sc *);
int ixlv_configure_queues(struct ixlv_sc *);
int ixlv_enable_queues(struct ixlv_sc *);
int ixlv_disable_queues(struct ixlv_sc *);
int ixlv_map_queues(struct ixlv_sc *);
void ixlv_enable_intr(struct ixl_vsi *);
void ixlv_disable_intr(struct ixl_vsi *);
void ixlv_add_ether_filters(struct ixlv_sc *);
void ixlv_del_ether_filters(struct ixlv_sc *);
void ixlv_request_stats(struct ixlv_sc *);
void ixlv_request_reset(struct ixlv_sc *);
int ixlv_add_ether_filters(struct ixlv_sc *);
int ixlv_del_ether_filters(struct ixlv_sc *);
int ixlv_request_stats(struct ixlv_sc *);
int ixlv_request_reset(struct ixlv_sc *);
void ixlv_vc_completion(struct ixlv_sc *,
enum virtchnl_ops, enum virtchnl_status_code,
u8 *, u16);
void ixlv_add_ether_filter(struct ixlv_sc *);
void ixlv_add_vlans(struct ixlv_sc *);
void ixlv_del_vlans(struct ixlv_sc *);
int ixlv_add_ether_filter(struct ixlv_sc *);
int ixlv_add_vlans(struct ixlv_sc *);
int ixlv_del_vlans(struct ixlv_sc *);
void ixlv_update_stats_counters(struct ixlv_sc *,
struct i40e_eth_stats *);
void ixlv_update_link_status(struct ixlv_sc *);
void ixlv_get_default_rss_key(u32 *, bool);
void ixlv_config_rss_key(struct ixlv_sc *);
void ixlv_set_rss_hena(struct ixlv_sc *);
void ixlv_config_rss_lut(struct ixlv_sc *);
int ixlv_get_default_rss_key(u32 *, bool);
int ixlv_config_rss_key(struct ixlv_sc *);
int ixlv_set_rss_hena(struct ixlv_sc *);
int ixlv_config_rss_lut(struct ixlv_sc *);
int ixlv_config_promisc_mode(struct ixlv_sc *);
int ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request);
char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
void *ixl_vc_get_op_chan(struct ixlv_sc *sc, uint32_t request);
#endif /* _IXLV_H_ */

View File

@ -1,76 +0,0 @@
/******************************************************************************
Copyright (c) 2013-2018, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXLV_VC_MGR_H_
#define _IXLV_VC_MGR_H_
#include <sys/queue.h>
struct ixl_vc_cmd;
typedef void ixl_vc_callback_t(struct ixl_vc_cmd *, void *,
enum i40e_status_code);
#define IXLV_VC_CMD_FLAG_BUSY 0x0001
struct ixl_vc_cmd
{
uint32_t request;
uint32_t flags;
ixl_vc_callback_t *callback;
void *arg;
TAILQ_ENTRY(ixl_vc_cmd) next;
};
struct ixl_vc_mgr
{
struct ixlv_sc *sc;
struct ixl_vc_cmd *current;
struct callout callout;
TAILQ_HEAD(, ixl_vc_cmd) pending;
};
#define IXLV_VC_TIMEOUT (2 * hz)
void ixl_vc_init_mgr(struct ixlv_sc *, struct ixl_vc_mgr *);
void ixl_vc_enqueue(struct ixl_vc_mgr *, struct ixl_vc_cmd *,
uint32_t, ixl_vc_callback_t *, void *);
void ixl_vc_flush(struct ixl_vc_mgr *mgr);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -153,6 +153,7 @@ SUBDIR= \
${_hwpmc_mips74k} \
${_hyperv} \
i2c \
${_iavf} \
${_ibcore} \
${_ibcs2} \
${_ichwd} \
@ -717,9 +718,9 @@ _x86bios= x86bios
.if ${MACHINE_CPUARCH} == "amd64"
_ccp= ccp
_efirt= efirt
_iavf= iavf
_ioat= ioat
_ixl= ixl
_ixlv= ixlv
_linux64= linux64
_linux_common= linux_common
_ntb= ntb

View File

@ -2,9 +2,9 @@
.PATH: ${SRCTOP}/sys/dev/ixl
KMOD = if_ixlv
KMOD = if_iavf
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h opt_global.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source
@ -12,5 +12,7 @@ SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c
# Debug messages / sysctls
# CFLAGS += -DIXL_DEBUG
# Enable asserts and other debugging facilities
# CFLAGS += -DINVARIANTS -DINVARIANTS_SUPPORT -DWITNESS
.include <bsd.kmod.mk>

View File

@ -6,7 +6,7 @@ KMOD = if_ixl
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
SRCS.PCI_IOV = pci_iov_if.h ixl_pf_iov.c
SRCS.PCI_IOV += pci_iov_if.h ixl_pf_iov.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c i40e_dcb.c
@ -14,7 +14,11 @@ SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c i40e
# Debug messages / sysctls
# CFLAGS += -DIXL_DEBUG
#CFLAGS += -DIXL_IW
#SRCS += ixl_iw.c
# Enable asserts and other debugging facilities
# CFLAGS += -DINVARIANTS -DINVARIANTS_SUPPORT -DWITNESS
# Enable iWARP client interface
# CFLAGS += -DIXL_IW
# SRCS += ixl_iw.c
.include <bsd.kmod.mk>

View File

@ -101,6 +101,10 @@ __FBSDID("$FreeBSD$");
#include <x86/iommu/busdma_dmar.h>
#endif
#ifdef PCI_IOV
#include <dev/pci/pci_iov.h>
#endif
#include <sys/bitstring.h>
/*
* enable accounting of every mbuf as it comes in to and goes out of
@ -157,9 +161,9 @@ typedef struct iflib_filter_info {
struct iflib_ctx {
KOBJ_FIELDS;
/*
* Pointer to hardware driver's softc
*/
/*
* Pointer to hardware driver's softc
*/
void *ifc_softc;
device_t ifc_dev;
if_t ifc_ifp;
@ -178,7 +182,6 @@ struct iflib_ctx {
uint32_t ifc_if_flags;
uint32_t ifc_flags;
uint32_t ifc_max_fl_buf_size;
int ifc_in_detach;
int ifc_link_state;
int ifc_link_irq;
@ -253,12 +256,6 @@ iflib_get_flags(if_ctx_t ctx)
return (ctx->ifc_flags);
}
void
iflib_set_detach(if_ctx_t ctx)
{
ctx->ifc_in_detach = 1;
}
void
iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
{
@ -571,6 +568,13 @@ rxd_info_zero(if_rxd_info_t ri)
#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
void
iflib_set_detach(if_ctx_t ctx)
{
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_IN_DETACH;
STATE_UNLOCK(ctx);
}
/* Our boot-time initialization hook */
static int iflib_module_event_handler(module_t, int, void *);
@ -738,6 +742,7 @@ static void iflib_add_device_sysctl_post(if_ctx_t ctx);
static void iflib_ifmp_purge(iflib_txq_t txq);
static void _iflib_pre_assert(if_softc_ctx_t scctx);
static void iflib_if_init_locked(if_ctx_t ctx);
static void iflib_free_intr_mem(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf * iflib_fixup_rx(struct mbuf *m);
#endif
@ -2072,6 +2077,16 @@ __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
_iflib_fl_refill(ctx, fl, min(max, reclaimable));
}
uint8_t
iflib_in_detach(if_ctx_t ctx)
{
bool in_detach;
STATE_LOCK(ctx);
in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
STATE_UNLOCK(ctx);
return (in_detach);
}
static void
iflib_fl_bufs_free(iflib_fl_t fl)
{
@ -2087,7 +2102,8 @@ iflib_fl_bufs_free(iflib_fl_t fl)
if (fl->ifl_sds.ifsd_map != NULL) {
bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
if (fl->ifl_rxq->ifr_ctx->ifc_in_detach)
// XXX: Should this get moved out?
if (iflib_in_detach(fl->ifl_rxq->ifr_ctx))
bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
}
if (*sd_m != NULL) {
@ -3842,7 +3858,7 @@ _task_fn_admin(void *context)
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
iflib_txq_t txq;
int i;
bool oactive, running, do_reset, do_watchdog;
bool oactive, running, do_reset, do_watchdog, in_detach;
uint32_t reset_on = hz / 2;
STATE_LOCK(ctx);
@ -3850,11 +3866,13 @@ _task_fn_admin(void *context)
oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
do_reset = (ctx->ifc_flags & IFC_DO_RESET);
do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
STATE_UNLOCK(ctx);
if ((!running & !oactive) &&
!(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
return;
if (in_detach)
return;
CTX_LOCK(ctx);
@ -3893,7 +3911,8 @@ _task_fn_iov(void *context)
{
if_ctx_t ctx = context;
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
!(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
return;
CTX_LOCK(ctx);
@ -4680,17 +4699,18 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
ctx->ifc_flags |= IFC_INIT_DONE;
CTX_UNLOCK(ctx);
return (0);
fail_detach:
ether_ifdetach(ctx->ifc_ifp);
fail_intr_free:
if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI)
pci_release_msi(ctx->ifc_dev);
fail_queues:
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
fail:
iflib_free_intr_mem(ctx);
IFDI_DETACH(ctx);
CTX_UNLOCK(ctx);
return (err);
}
@ -4975,12 +4995,21 @@ iflib_device_deregister(if_ctx_t ctx)
/* Make sure VLANS are not using driver */
if (if_vlantrunkinuse(ifp)) {
device_printf(dev,"Vlan in use, detach first\n");
device_printf(dev, "Vlan in use, detach first\n");
return (EBUSY);
}
#ifdef PCI_IOV
if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
device_printf(dev, "SR-IOV in use; detach first.\n");
return (EBUSY);
}
#endif
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_IN_DETACH;
STATE_UNLOCK(ctx);
CTX_LOCK(ctx);
ctx->ifc_in_detach = 1;
iflib_stop(ctx);
CTX_UNLOCK(ctx);
@ -5021,8 +5050,26 @@ iflib_device_deregister(if_ctx_t ctx)
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
CTX_LOCK_DESTROY(ctx);
device_set_softc(ctx->ifc_dev, NULL);
iflib_free_intr_mem(ctx);
bus_generic_detach(dev);
if_free(ifp);
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
free(ctx->ifc_softc, M_IFLIB);
STATE_LOCK_DESTROY(ctx);
free(ctx, M_IFLIB);
return (0);
}
static void
iflib_free_intr_mem(if_ctx_t ctx)
{
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
pci_release_msi(dev);
pci_release_msi(ctx->ifc_dev);
}
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
@ -5032,19 +5079,8 @@ iflib_device_deregister(if_ctx_t ctx)
ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
}
bus_generic_detach(dev);
if_free(ifp);
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
free(ctx->ifc_softc, M_IFLIB);
free(ctx, M_IFLIB);
return (0);
}
int
iflib_device_detach(device_t dev)
{
@ -5215,7 +5251,7 @@ iflib_register(if_ctx_t ctx)
CTX_LOCK_INIT(ctx);
STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "can not allocate ifnet structure\n");
return (ENOMEM);
@ -5399,7 +5435,7 @@ iflib_queues_alloc(if_ctx_t ctx)
fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
}
/* Allocate receive buffers for the ring*/
/* Allocate receive buffers for the ring */
if (iflib_rxsd_alloc(rxq)) {
device_printf(dev,
"Critical Failure setting up receive buffers\n");
@ -5554,6 +5590,8 @@ iflib_rx_structures_free(if_ctx_t ctx)
for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
iflib_rx_sds_free(rxq);
}
free(ctx->ifc_rxqs, M_IFLIB);
ctx->ifc_rxqs = NULL;
}
static int
@ -5814,7 +5852,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
}
void
iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
{
struct grouptask *gtask;
struct taskqgroup *tqg;
@ -6132,8 +6170,9 @@ iflib_msix_init(if_ctx_t ctx)
if (ctx->ifc_sysctl_qs_eq_override == 0) {
#ifdef INVARIANTS
if (tx_queues != rx_queues)
device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
min(rx_queues, tx_queues), min(rx_queues, tx_queues));
device_printf(dev,
"queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
min(rx_queues, tx_queues), min(rx_queues, tx_queues));
#endif
tx_queues = min(rx_queues, tx_queues);
rx_queues = min(rx_queues, tx_queues);
@ -6143,8 +6182,7 @@ iflib_msix_init(if_ctx_t ctx)
vectors = rx_queues + admincnt;
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
device_printf(dev,
"Using MSIX interrupts with %d vectors\n", vectors);
device_printf(dev, "Using MSIX interrupts with %d vectors\n", vectors);
scctx->isc_vectors = vectors;
scctx->isc_nrxqsets = rx_queues;
scctx->isc_ntxqsets = tx_queues;
@ -6152,7 +6190,8 @@ iflib_msix_init(if_ctx_t ctx)
return (vectors);
} else {
device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
device_printf(dev,
"failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
bus_release_resource(dev, SYS_RES_MEMORY, bar,
ctx->ifc_msix_mem);
ctx->ifc_msix_mem = NULL;
@ -6463,6 +6502,15 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
}
void
iflib_request_reset(if_ctx_t ctx)
{
STATE_LOCK(ctx);
ctx->ifc_flags |= IFC_DO_RESET;
STATE_UNLOCK(ctx);
}
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *
iflib_fixup_rx(struct mbuf *m)

View File

@ -246,7 +246,7 @@ struct if_shared_ctx {
/* fields necessary for probe */
pci_vendor_info_t *isc_vendor_info;
char *isc_driver_version;
/* optional function to transform the read values to match the table*/
/* optional function to transform the read values to match the table*/
void (*isc_parse_devinfo) (uint16_t *device_id, uint16_t *subvendor_id,
uint16_t *subdevice_id, uint16_t *rev_id);
int isc_nrxd_min[8];
@ -375,6 +375,8 @@ if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx);
if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx);
void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]);
void iflib_request_reset(if_ctx_t ctx);
uint8_t iflib_in_detach(if_ctx_t ctx);
/*
* If the driver can plug cleanly in to newbus use these

View File

@ -42,6 +42,7 @@
#define IFC_DO_WATCHDOG 0x100
#define IFC_CHECK_HUNG 0x200
#define IFC_PSEUDO 0x400
#define IFC_IN_DETACH 0x800
#define IFC_NETMAP_TX_IRQ 0x80000000