2015-03-17 18:32:28 +00:00
|
|
|
/******************************************************************************
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
Copyright (c) 2001-2017, Intel Corporation
|
2015-03-17 18:32:28 +00:00
|
|
|
All rights reserved.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
2015-03-17 18:32:28 +00:00
|
|
|
modification, are permitted provided that the following conditions are met:
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
1. Redistributions of source code must retain the above copyright notice,
|
2015-03-17 18:32:28 +00:00
|
|
|
this list of conditions and the following disclaimer.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in the
|
2015-03-17 18:32:28 +00:00
|
|
|
documentation and/or other materials provided with the distribution.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
3. Neither the name of the Intel Corporation nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived from
|
2015-03-17 18:32:28 +00:00
|
|
|
this software without specific prior written permission.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
2017-07-05 17:27:03 +00:00
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
2015-03-17 18:32:28 +00:00
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
******************************************************************************/
|
|
|
|
/*$FreeBSD$*/
|
|
|
|
|
|
|
|
|
|
|
|
#include "opt_inet.h"
|
|
|
|
#include "opt_inet6.h"
|
2019-11-05 06:34:20 +00:00
|
|
|
#include "opt_rss.h"
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
#include "ixgbe.h"
|
2017-12-20 18:15:06 +00:00
|
|
|
#include "ifdi_if.h"
|
|
|
|
|
|
|
|
#include <net/netmap.h>
|
|
|
|
#include <dev/netmap/netmap_kern.h>
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* Driver version
|
|
|
|
************************************************************************/
|
2018-03-19 20:55:05 +00:00
|
|
|
char ixv_driver_version[] = "2.0.1-k";
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* PCI Device ID Table
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Used by probe to select devices to load on
|
|
|
|
* Last field stores an index into ixv_strings
|
|
|
|
* Last entry must be all 0s
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
|
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static pci_vendor_info_t ixv_vendor_info_array[] =
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
|
|
|
|
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
|
|
|
|
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
|
|
|
|
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
|
|
|
|
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
|
2015-03-17 18:32:28 +00:00
|
|
|
/* required last entry */
|
2017-12-20 18:15:06 +00:00
|
|
|
PVID_END
|
2015-03-17 18:32:28 +00:00
|
|
|
};
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* Function prototypes
|
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static void *ixv_register(device_t dev);
|
|
|
|
static int ixv_if_attach_pre(if_ctx_t ctx);
|
|
|
|
static int ixv_if_attach_post(if_ctx_t ctx);
|
|
|
|
static int ixv_if_detach(if_ctx_t ctx);
|
|
|
|
|
|
|
|
static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
|
|
|
|
static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
|
|
|
|
static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
|
|
|
|
static void ixv_if_queues_free(if_ctx_t ctx);
|
|
|
|
static void ixv_identify_hardware(if_ctx_t ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
static void ixv_init_device_features(struct adapter *);
|
2017-12-20 18:15:06 +00:00
|
|
|
static int ixv_allocate_pci_resources(if_ctx_t ctx);
|
|
|
|
static void ixv_free_pci_resources(if_ctx_t ctx);
|
|
|
|
static int ixv_setup_interface(if_ctx_t ctx);
|
|
|
|
static void ixv_if_media_status(if_ctx_t , struct ifmediareq *);
|
|
|
|
static int ixv_if_media_change(if_ctx_t ctx);
|
|
|
|
static void ixv_if_update_admin_status(if_ctx_t ctx);
|
|
|
|
static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
|
|
|
|
|
|
|
|
static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
|
|
|
|
static void ixv_if_init(if_ctx_t ctx);
|
|
|
|
static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
|
|
|
|
static void ixv_if_stop(if_ctx_t ctx);
|
2017-08-24 22:56:22 +00:00
|
|
|
static int ixv_negotiate_api(struct adapter *);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static void ixv_initialize_transmit_units(if_ctx_t ctx);
|
|
|
|
static void ixv_initialize_receive_units(if_ctx_t ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
static void ixv_initialize_rss_mapping(struct adapter *);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static void ixv_setup_vlan_support(if_ctx_t ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
static void ixv_configure_ivars(struct adapter *);
|
2017-12-20 18:15:06 +00:00
|
|
|
static void ixv_if_enable_intr(if_ctx_t ctx);
|
|
|
|
static void ixv_if_disable_intr(if_ctx_t ctx);
|
|
|
|
static void ixv_if_multi_set(if_ctx_t ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static void ixv_if_register_vlan(if_ctx_t, u16);
|
|
|
|
static void ixv_if_unregister_vlan(if_ctx_t, u16);
|
|
|
|
|
|
|
|
static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
static void ixv_save_stats(struct adapter *);
|
|
|
|
static void ixv_init_stats(struct adapter *);
|
|
|
|
static void ixv_update_stats(struct adapter *);
|
2017-12-20 18:15:06 +00:00
|
|
|
static void ixv_add_stats_sysctls(struct adapter *adapter);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
|
|
|
|
static void ixv_set_ivar(struct adapter *, u8, u8, s8);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
|
|
|
|
|
|
|
|
/* The MSI-X Interrupt handlers */
|
|
|
|
static int ixv_msix_que(void *);
|
|
|
|
static int ixv_msix_mbx(void *);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* FreeBSD Device Interface Entry Points
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static device_method_t ixv_methods[] = {
|
|
|
|
/* Device interface */
|
2017-12-20 18:15:06 +00:00
|
|
|
DEVMETHOD(device_register, ixv_register),
|
|
|
|
DEVMETHOD(device_probe, iflib_device_probe),
|
|
|
|
DEVMETHOD(device_attach, iflib_device_attach),
|
|
|
|
DEVMETHOD(device_detach, iflib_device_detach),
|
|
|
|
DEVMETHOD(device_shutdown, iflib_device_shutdown),
|
2015-03-17 18:32:28 +00:00
|
|
|
DEVMETHOD_END
|
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t ixv_driver = {
|
|
|
|
"ixv", ixv_methods, sizeof(struct adapter),
|
|
|
|
};
|
|
|
|
|
2015-03-18 05:05:30 +00:00
|
|
|
devclass_t ixv_devclass;
|
|
|
|
DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
|
2018-10-11 22:27:12 +00:00
|
|
|
IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
|
2019-01-31 19:05:56 +00:00
|
|
|
MODULE_DEPEND(ixv, iflib, 1, 1, 1);
|
2015-03-17 18:32:28 +00:00
|
|
|
MODULE_DEPEND(ixv, pci, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(ixv, ether, 1, 1, 1);
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static device_method_t ixv_if_methods[] = {
|
|
|
|
DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
|
|
|
|
DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
|
|
|
|
DEVMETHOD(ifdi_detach, ixv_if_detach),
|
|
|
|
DEVMETHOD(ifdi_init, ixv_if_init),
|
|
|
|
DEVMETHOD(ifdi_stop, ixv_if_stop),
|
|
|
|
DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
|
|
|
|
DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
|
|
|
|
DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
|
|
|
|
DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
|
|
|
|
DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
|
|
|
|
DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
|
|
|
|
DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
|
|
|
|
DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
|
|
|
|
DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
|
|
|
|
DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
|
|
|
|
DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
|
|
|
|
DEVMETHOD(ifdi_media_status, ixv_if_media_status),
|
|
|
|
DEVMETHOD(ifdi_media_change, ixv_if_media_change),
|
|
|
|
DEVMETHOD(ifdi_timer, ixv_if_local_timer),
|
|
|
|
DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
|
|
|
|
DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
|
|
|
|
DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
|
|
|
|
DEVMETHOD_END
|
|
|
|
};
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static driver_t ixv_if_driver = {
|
|
|
|
"ixv_if", ixv_if_methods, sizeof(struct adapter)
|
|
|
|
};
|
2015-06-01 17:43:34 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
/*
|
2017-12-20 18:15:06 +00:00
|
|
|
* TUNEABLE PARAMETERS:
|
2017-07-05 17:27:03 +00:00
|
|
|
*/
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Flow control setting, default to full */
|
|
|
|
static int ixv_flow_control = ixgbe_fc_full;
|
|
|
|
TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Header split: this causes the hardware to DMA
|
2016-05-06 22:54:56 +00:00
|
|
|
* the header into a separate mbuf from the payload,
|
2015-03-17 18:32:28 +00:00
|
|
|
* it can be a performance win in some workloads, but
|
|
|
|
* in others it actually hurts, its off by default.
|
|
|
|
*/
|
|
|
|
static int ixv_header_split = FALSE;
|
|
|
|
TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
|
|
|
|
|
|
|
|
/*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Shadow VFTA table, this is needed because
|
|
|
|
* the real filter table gets cleared during
|
|
|
|
* a soft reset and we need to repopulate it.
|
|
|
|
*/
|
2015-03-17 18:32:28 +00:00
|
|
|
static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
|
2017-12-20 18:15:06 +00:00
|
|
|
extern struct if_txrx ixgbe_txrx;
|
|
|
|
|
|
|
|
static struct if_shared_ctx ixv_sctx_init = {
|
|
|
|
.isc_magic = IFLIB_MAGIC,
|
|
|
|
.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
|
2017-12-20 18:15:06 +00:00
|
|
|
.isc_tx_maxsegsize = PAGE_SIZE,
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
|
|
|
|
.isc_tso_maxsegsize = PAGE_SIZE,
|
2017-12-20 18:15:06 +00:00
|
|
|
.isc_rx_maxsize = MJUM16BYTES,
|
|
|
|
.isc_rx_nsegments = 1,
|
|
|
|
.isc_rx_maxsegsize = MJUM16BYTES,
|
|
|
|
.isc_nfl = 1,
|
|
|
|
.isc_ntxqs = 1,
|
|
|
|
.isc_nrxqs = 1,
|
|
|
|
.isc_admin_intrcnt = 1,
|
|
|
|
.isc_vendor_info = ixv_vendor_info_array,
|
|
|
|
.isc_driver_version = ixv_driver_version,
|
|
|
|
.isc_driver = &ixv_if_driver,
|
2019-03-19 18:07:44 +00:00
|
|
|
.isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
|
2017-12-20 18:15:06 +00:00
|
|
|
|
|
|
|
.isc_nrxd_min = {MIN_RXD},
|
|
|
|
.isc_ntxd_min = {MIN_TXD},
|
|
|
|
.isc_nrxd_max = {MAX_RXD},
|
|
|
|
.isc_ntxd_max = {MAX_TXD},
|
|
|
|
.isc_nrxd_default = {DEFAULT_RXD},
|
|
|
|
.isc_ntxd_default = {DEFAULT_TXD},
|
|
|
|
};
|
|
|
|
|
|
|
|
if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static void *
|
|
|
|
ixv_register(device_t dev)
|
|
|
|
{
|
|
|
|
return (ixv_sctx);
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_tx_queues_alloc
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
|
|
|
|
int ntxqs, int ntxqsets)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
if_softc_ctx_t scctx = adapter->shared;
|
|
|
|
struct ix_tx_queue *que;
|
|
|
|
int i, j, error;
|
|
|
|
|
|
|
|
MPASS(adapter->num_tx_queues == ntxqsets);
|
|
|
|
MPASS(ntxqs == 1);
|
|
|
|
|
|
|
|
/* Allocate queue structure memory */
|
|
|
|
adapter->tx_queues =
|
|
|
|
(struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
|
|
|
|
M_DEVBUF, M_NOWAIT | M_ZERO);
|
|
|
|
if (!adapter->tx_queues) {
|
|
|
|
device_printf(iflib_get_dev(ctx),
|
|
|
|
"Unable to allocate TX ring memory\n");
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
|
|
|
|
struct tx_ring *txr = &que->txr;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
txr->me = i;
|
|
|
|
txr->adapter = que->adapter = adapter;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Allocate report status array */
|
|
|
|
if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
|
|
|
|
error = ENOMEM;
|
|
|
|
goto fail;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
for (j = 0; j < scctx->isc_ntxd[0]; j++)
|
|
|
|
txr->tx_rsq[j] = QIDX_INVALID;
|
|
|
|
/* get the virtual and physical address of the hardware queues */
|
|
|
|
txr->tail = IXGBE_VFTDT(txr->me);
|
|
|
|
txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
|
|
|
|
txr->tx_paddr = paddrs[i*ntxqs];
|
|
|
|
|
|
|
|
txr->bytes = 0;
|
|
|
|
txr->total_packets = 0;
|
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
|
|
|
|
adapter->num_tx_queues);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
ixv_if_queues_free(ctx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
} /* ixv_if_tx_queues_alloc */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_rx_queues_alloc
|
|
|
|
************************************************************************/
|
|
|
|
static int
|
|
|
|
ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
|
|
|
|
int nrxqs, int nrxqsets)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ix_rx_queue *que;
|
|
|
|
int i, error;
|
|
|
|
|
|
|
|
MPASS(adapter->num_rx_queues == nrxqsets);
|
|
|
|
MPASS(nrxqs == 1);
|
|
|
|
|
|
|
|
/* Allocate queue structure memory */
|
|
|
|
adapter->rx_queues =
|
|
|
|
(struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
|
|
|
|
M_DEVBUF, M_NOWAIT | M_ZERO);
|
|
|
|
if (!adapter->rx_queues) {
|
|
|
|
device_printf(iflib_get_dev(ctx),
|
|
|
|
"Unable to allocate TX ring memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
|
|
|
|
struct rx_ring *rxr = &que->rxr;
|
|
|
|
rxr->me = i;
|
|
|
|
rxr->adapter = que->adapter = adapter;
|
|
|
|
|
|
|
|
|
|
|
|
/* get the virtual and physical address of the hw queues */
|
|
|
|
rxr->tail = IXGBE_VFRDT(rxr->me);
|
|
|
|
rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
|
|
|
|
rxr->rx_paddr = paddrs[i*nrxqs];
|
|
|
|
rxr->bytes = 0;
|
|
|
|
rxr->que = que;
|
|
|
|
}
|
|
|
|
|
|
|
|
device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
|
|
|
|
adapter->num_rx_queues);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
ixv_if_queues_free(ctx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
} /* ixv_if_rx_queues_alloc */
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_if_queues_free
|
|
|
|
************************************************************************/
|
|
|
|
static void
|
|
|
|
ixv_if_queues_free(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ix_tx_queue *que = adapter->tx_queues;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (que == NULL)
|
|
|
|
goto free;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++, que++) {
|
|
|
|
struct tx_ring *txr = &que->txr;
|
|
|
|
if (txr->tx_rsq == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
free(txr->tx_rsq, M_DEVBUF);
|
|
|
|
txr->tx_rsq = NULL;
|
|
|
|
}
|
|
|
|
if (adapter->tx_queues != NULL)
|
|
|
|
free(adapter->tx_queues, M_DEVBUF);
|
|
|
|
free:
|
|
|
|
if (adapter->rx_queues != NULL)
|
|
|
|
free(adapter->rx_queues, M_DEVBUF);
|
|
|
|
adapter->tx_queues = NULL;
|
|
|
|
adapter->rx_queues = NULL;
|
|
|
|
} /* ixv_if_queues_free */
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_if_attach_pre - Device initialization routine
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Called when the driver is being loaded.
|
|
|
|
* Identifies the type of hardware, allocates all resources
|
|
|
|
* and initializes the hardware.
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* return 0 on success, positive on failure
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_attach_pre(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-07-05 17:27:03 +00:00
|
|
|
struct adapter *adapter;
|
2017-12-20 18:15:06 +00:00
|
|
|
device_t dev;
|
|
|
|
if_softc_ctx_t scctx;
|
2015-03-17 18:32:28 +00:00
|
|
|
struct ixgbe_hw *hw;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("ixv_attach: begin");
|
|
|
|
|
|
|
|
/* Allocate, clear, and link in our adapter structure */
|
2017-12-20 18:15:06 +00:00
|
|
|
dev = iflib_get_dev(ctx);
|
|
|
|
adapter = iflib_get_softc(ctx);
|
2015-12-23 22:45:17 +00:00
|
|
|
adapter->dev = dev;
|
2017-12-20 18:15:06 +00:00
|
|
|
adapter->ctx = ctx;
|
2017-07-05 17:27:03 +00:00
|
|
|
adapter->hw.back = adapter;
|
2017-12-20 18:15:06 +00:00
|
|
|
scctx = adapter->shared = iflib_get_softc_ctx(ctx);
|
|
|
|
adapter->media = iflib_get_media(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
hw = &adapter->hw;
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* Do base PCI setup - map BAR0 */
|
2017-12-20 18:15:06 +00:00
|
|
|
if (ixv_allocate_pci_resources(ctx)) {
|
2017-07-05 17:27:03 +00:00
|
|
|
device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
|
|
|
|
error = ENXIO;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
/* SYSCTL APIs */
|
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
|
2017-07-05 17:27:03 +00:00
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
|
|
|
|
"Debug Info");
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Determine hardware revision */
|
|
|
|
ixv_identify_hardware(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
ixv_init_device_features(adapter);
|
|
|
|
|
|
|
|
/* Initialize the shared code */
|
|
|
|
error = ixgbe_init_ops_vf(hw);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
|
|
|
|
error = EIO;
|
|
|
|
goto err_out;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* Setup the mailbox */
|
|
|
|
ixgbe_init_mbx_params_vf(hw);
|
|
|
|
|
|
|
|
error = hw->mac.ops.reset_hw(hw);
|
|
|
|
if (error == IXGBE_ERR_RESET_FAILED)
|
|
|
|
device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
|
|
|
|
else if (error)
|
|
|
|
device_printf(dev, "...reset_hw() failed with error %d\n",
|
|
|
|
error);
|
|
|
|
if (error) {
|
|
|
|
error = EIO;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = hw->mac.ops.init_hw(hw);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "...init_hw() failed with error %d\n",
|
|
|
|
error);
|
|
|
|
error = EIO;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Negotiate mailbox API version */
|
2017-08-24 22:56:22 +00:00
|
|
|
error = ixv_negotiate_api(adapter);
|
2017-07-05 17:27:03 +00:00
|
|
|
if (error) {
|
2017-08-24 22:56:22 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Mailbox API negotiation failed during attach!\n");
|
2017-07-05 17:27:03 +00:00
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If no mac address was assigned, make a random one */
|
|
|
|
if (!ixv_check_ether_addr(hw->mac.addr)) {
|
|
|
|
u8 addr[ETHER_ADDR_LEN];
|
|
|
|
arc4rand(&addr, sizeof(addr), 0);
|
|
|
|
addr[0] &= 0xFE;
|
|
|
|
addr[0] |= 0x02;
|
|
|
|
bcopy(addr, hw->mac.addr, sizeof(addr));
|
|
|
|
bcopy(addr, hw->mac.perm_addr, sizeof(addr));
|
|
|
|
}
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Most of the iflib initialization... */
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
iflib_set_mac(ctx, hw->mac.addr);
|
|
|
|
switch (adapter->hw.mac.type) {
|
|
|
|
case ixgbe_mac_X550_vf:
|
|
|
|
case ixgbe_mac_X550EM_x_vf:
|
|
|
|
case ixgbe_mac_X550EM_a_vf:
|
|
|
|
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
scctx->isc_txqsizes[0] =
|
|
|
|
roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
|
|
|
|
sizeof(u32), DBA_ALIGN);
|
|
|
|
scctx->isc_rxqsizes[0] =
|
|
|
|
roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
|
|
|
|
DBA_ALIGN);
|
|
|
|
/* XXX */
|
|
|
|
scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
|
|
|
|
CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
|
|
|
|
scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
|
2019-09-24 17:06:32 +00:00
|
|
|
scctx->isc_msix_bar = pci_msix_table_bar(dev);
|
2017-12-20 18:15:06 +00:00
|
|
|
scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
|
|
|
|
scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
|
|
|
|
scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
|
|
|
|
|
|
|
|
scctx->isc_txrx = &ixgbe_txrx;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/*
|
|
|
|
* Tell the upper layer(s) we support everything the PF
|
|
|
|
* driver does except...
|
|
|
|
* Wake-on-LAN
|
|
|
|
*/
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
scctx->isc_capabilities = IXGBE_CAPS;
|
|
|
|
scctx->isc_capabilities ^= IFCAP_WOL;
|
|
|
|
scctx->isc_capenable = scctx->isc_capabilities;
|
2017-12-20 18:15:06 +00:00
|
|
|
|
|
|
|
INIT_DEBUGOUT("ixv_if_attach_pre: end");
|
|
|
|
|
|
|
|
return (0);
|
2015-06-01 17:43:34 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
err_out:
|
|
|
|
ixv_free_pci_resources(ctx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
} /* ixv_if_attach_pre */
|
|
|
|
|
|
|
|
static int
|
|
|
|
ixv_if_attach_post(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
/* Setup OS specific network interface */
|
|
|
|
error = ixv_setup_interface(ctx);
|
2015-12-23 22:45:17 +00:00
|
|
|
if (error) {
|
2017-12-20 18:15:06 +00:00
|
|
|
device_printf(dev, "Interface setup failed: %d\n", error);
|
|
|
|
goto end;
|
2015-12-23 22:45:17 +00:00
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Do the stats setup */
|
|
|
|
ixv_save_stats(adapter);
|
|
|
|
ixv_init_stats(adapter);
|
|
|
|
ixv_add_stats_sysctls(adapter);
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
end:
|
|
|
|
return error;
|
|
|
|
} /* ixv_if_attach_post */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_detach - Device removal routine
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Called when the driver is being removed.
|
|
|
|
* Stops the adapter and deallocates all the resources
|
|
|
|
* that were allocated for driver operation.
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* return 0 on success, positive on failure
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_detach(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
|
|
|
INIT_DEBUGOUT("ixv_detach: begin");
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_free_pci_resources(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return (0);
|
|
|
|
} /* ixv_if_detach */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_if_mtu_set
|
|
|
|
************************************************************************/
|
|
|
|
static int
|
|
|
|
ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ifnet *ifp = iflib_get_ifp(ctx);
|
|
|
|
int error = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
|
|
|
|
if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
|
|
|
|
error = EINVAL;
|
|
|
|
} else {
|
|
|
|
ifp->if_mtu = mtu;
|
|
|
|
adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return error;
|
|
|
|
} /* ixv_if_mtu_set */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_init - Init entry point
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Used in two ways: It is used by the stack as an init entry
|
|
|
|
* point in network interface structure. It is also used
|
|
|
|
* by the driver as a hw/sw initialization routine to get
|
|
|
|
* to a consistent state.
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* return 0 on success, positive on failure
|
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static void
|
|
|
|
ixv_if_init(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ifnet *ifp = iflib_get_ifp(ctx);
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
int error = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
INIT_DEBUGOUT("ixv_if_init: begin");
|
2017-07-05 17:27:03 +00:00
|
|
|
hw->adapter_stopped = FALSE;
|
|
|
|
hw->mac.ops.stop_adapter(hw);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* reprogram the RAR[0] in case user changed it. */
|
|
|
|
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* Get the latest mac address, User can use a LAA */
|
2017-12-20 18:15:06 +00:00
|
|
|
bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
|
2017-07-05 17:27:03 +00:00
|
|
|
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* Reset VF and renegotiate mailbox API version */
|
|
|
|
hw->mac.ops.reset_hw(hw);
|
2018-03-19 20:55:05 +00:00
|
|
|
hw->mac.ops.start_hw(hw);
|
2017-08-24 22:56:22 +00:00
|
|
|
error = ixv_negotiate_api(adapter);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev,
|
2017-12-20 18:15:06 +00:00
|
|
|
"Mailbox API negotiation failed in if_init!\n");
|
2017-08-24 22:56:22 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_initialize_transmit_units(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
/* Setup Multicast table */
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_multi_set(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2019-03-19 17:59:56 +00:00
|
|
|
adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
/* Configure RX settings */
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_initialize_receive_units(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Set up VLAN offload and filter */
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_setup_vlan_support(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* Set up MSI-X routing */
|
2015-03-17 18:32:28 +00:00
|
|
|
ixv_configure_ivars(adapter);
|
|
|
|
|
|
|
|
/* Set up auto-mask */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* Set moderation on the Link interrupt */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Stats init */
|
|
|
|
ixv_init_stats(adapter);
|
|
|
|
|
|
|
|
/* Config/Enable Link */
|
2017-07-05 17:27:03 +00:00
|
|
|
hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
|
|
|
|
FALSE);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* And now turn on interrupts */
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_enable_intr(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
return;
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_init */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_enable_queue
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static inline void
|
|
|
|
ixv_enable_queue(struct adapter *adapter, u32 vector)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-07-05 17:27:03 +00:00
|
|
|
u32 queue = 1 << vector;
|
|
|
|
u32 mask;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_enable_queue */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_disable_queue
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static inline void
|
|
|
|
ixv_disable_queue(struct adapter *adapter, u32 vector)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-07-05 17:27:03 +00:00
|
|
|
u64 queue = (u64)(1 << vector);
|
|
|
|
u32 mask;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_disable_queue */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_msix_que - MSI-X Queue Interrupt Service routine
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static int
|
2015-03-17 18:32:28 +00:00
|
|
|
ixv_msix_que(void *arg)
|
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct ix_rx_queue *que = arg;
|
|
|
|
struct adapter *adapter = que->adapter;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
ixv_disable_queue(adapter, que->msix);
|
|
|
|
++que->irqs;
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return (FILTER_SCHEDULE_THREAD);
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_msix_que */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_msix_mbx
|
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static int
|
2015-03-17 18:32:28 +00:00
|
|
|
ixv_msix_mbx(void *arg)
|
|
|
|
{
|
2017-07-05 17:27:03 +00:00
|
|
|
struct adapter *adapter = arg;
|
2015-03-17 18:32:28 +00:00
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-07-05 17:27:03 +00:00
|
|
|
u32 reg;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2015-04-30 22:53:27 +00:00
|
|
|
++adapter->link_irq;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* First get the cause */
|
|
|
|
reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
|
|
|
|
/* Clear interrupt with write */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
|
|
|
|
|
|
|
|
/* Link status change */
|
|
|
|
if (reg & IXGBE_EICR_LSC)
|
2017-12-20 18:15:06 +00:00
|
|
|
iflib_admin_intr_deferred(adapter->ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return (FILTER_HANDLED);
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_msix_mbx */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_media_status - Media Ioctl callback
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Called whenever the user queries the status of
|
|
|
|
* the interface using ifconfig.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
INIT_DEBUGOUT("ixv_media_status: begin");
|
2017-12-20 18:15:06 +00:00
|
|
|
|
|
|
|
iflib_admin_intr_deferred(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
ifmr->ifm_status = IFM_AVALID;
|
|
|
|
ifmr->ifm_active = IFM_ETHER;
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
if (!adapter->link_active)
|
2015-03-17 18:32:28 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
ifmr->ifm_status |= IFM_ACTIVE;
|
|
|
|
|
|
|
|
switch (adapter->link_speed) {
|
|
|
|
case IXGBE_LINK_SPEED_1GB_FULL:
|
|
|
|
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
|
|
|
|
break;
|
|
|
|
case IXGBE_LINK_SPEED_10GB_FULL:
|
2017-07-05 17:27:03 +00:00
|
|
|
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
|
|
|
|
break;
|
|
|
|
case IXGBE_LINK_SPEED_100_FULL:
|
|
|
|
ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
|
|
|
|
break;
|
|
|
|
case IXGBE_LINK_SPEED_10_FULL:
|
|
|
|
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
|
2015-03-17 18:32:28 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_media_status */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_media_change - Media Ioctl callback
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Called when the user changes speed/duplex using
|
|
|
|
* media/mediopt option with ifconfig.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_media_change(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ifmedia *ifm = iflib_get_media(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
INIT_DEBUGOUT("ixv_media_change: begin");
|
|
|
|
|
|
|
|
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
switch (IFM_SUBTYPE(ifm->ifm_media)) {
|
|
|
|
case IFM_AUTO:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
device_printf(adapter->dev, "Only auto media type\n");
|
2015-03-17 18:32:28 +00:00
|
|
|
return (EINVAL);
|
2017-07-05 17:27:03 +00:00
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
return (0);
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_media_change */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
|
2017-08-24 22:56:22 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_negotiate_api
|
|
|
|
*
|
|
|
|
* Negotiate the Mailbox API with the PF;
|
|
|
|
* start with the most featured API first.
|
|
|
|
************************************************************************/
|
|
|
|
static int
|
|
|
|
ixv_negotiate_api(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
int mbx_api[] = { ixgbe_mbox_api_11,
|
|
|
|
ixgbe_mbox_api_10,
|
|
|
|
ixgbe_mbox_api_unknown };
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
while (mbx_api[i] != ixgbe_mbox_api_unknown) {
|
|
|
|
if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
|
|
|
|
return (0);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (EINVAL);
|
|
|
|
} /* ixv_negotiate_api */
|
|
|
|
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_multi_set - Multicast Update
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Called whenever multicast address list is updated.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_multi_set(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-07-05 17:27:03 +00:00
|
|
|
u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
u8 *update_ptr;
|
|
|
|
struct ifmultiaddr *ifma;
|
2017-12-20 18:15:06 +00:00
|
|
|
if_t ifp = iflib_get_ifp(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
int mcnt = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
2015-03-17 18:32:28 +00:00
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
2017-07-05 17:27:03 +00:00
|
|
|
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
|
2015-03-17 18:32:28 +00:00
|
|
|
&mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
|
|
|
|
IXGBE_ETH_LENGTH_OF_ADDRESS);
|
|
|
|
mcnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
update_ptr = mta;
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
|
|
|
|
ixv_mc_array_itr, TRUE);
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_multi_set */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_mc_array_itr
|
|
|
|
*
|
|
|
|
* An iterator function needed by the multicast shared code.
|
|
|
|
* It feeds the shared code routine the addresses in the
|
|
|
|
* array of ixv_set_multi() one by one.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static u8 *
|
|
|
|
ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
|
|
|
|
{
|
|
|
|
u8 *addr = *update_ptr;
|
|
|
|
u8 *newptr;
|
2017-12-20 18:15:06 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
*vmdq = 0;
|
|
|
|
|
|
|
|
newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
|
|
|
|
*update_ptr = newptr;
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
return addr;
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_mc_array_itr */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_local_timer - Timer routine
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Checks for link status, updates statistics,
|
|
|
|
* and runs the watchdog check.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
if (qid != 0)
|
|
|
|
return;
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Fire off the adminq task */
|
|
|
|
iflib_admin_intr_deferred(ctx);
|
|
|
|
} /* ixv_if_local_timer */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_update_admin_status - Update OS on link state
|
2017-07-05 17:27:03 +00:00
|
|
|
*
|
|
|
|
* Note: Only updates the OS on the cached link state.
|
|
|
|
* The real check of the hardware only happens with
|
|
|
|
* a link interrupt.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_update_admin_status(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
2018-03-19 20:55:05 +00:00
|
|
|
s32 status;
|
2017-12-20 18:15:06 +00:00
|
|
|
|
|
|
|
adapter->hw.mac.get_link_status = TRUE;
|
2018-03-19 20:55:05 +00:00
|
|
|
|
|
|
|
status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
|
|
|
|
&adapter->link_up, FALSE);
|
|
|
|
|
|
|
|
if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
|
|
|
|
/* Mailbox's Clear To Send status is lost or timeout occurred.
|
|
|
|
* We need reinitialization. */
|
|
|
|
iflib_get_ifp(ctx)->if_init(ctx);
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
if (adapter->link_up) {
|
2015-03-17 18:32:28 +00:00
|
|
|
if (adapter->link_active == FALSE) {
|
|
|
|
if (bootverbose)
|
2017-12-20 18:15:06 +00:00
|
|
|
device_printf(dev, "Link is up %d Gbps %s \n",
|
2017-07-05 17:27:03 +00:00
|
|
|
((adapter->link_speed == 128) ? 10 : 1),
|
2015-03-17 18:32:28 +00:00
|
|
|
"Full Duplex");
|
|
|
|
adapter->link_active = TRUE;
|
2017-12-20 18:15:06 +00:00
|
|
|
iflib_link_state_change(ctx, LINK_STATE_UP,
|
|
|
|
IF_Gbps(10));
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
} else { /* Link down */
|
|
|
|
if (adapter->link_active == TRUE) {
|
|
|
|
if (bootverbose)
|
2017-12-20 18:15:06 +00:00
|
|
|
device_printf(dev, "Link is Down\n");
|
|
|
|
iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->link_active = FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Stats Update */
|
|
|
|
ixv_update_stats(adapter);
|
|
|
|
} /* ixv_if_update_admin_status */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_stop - Stop the hardware
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Disables all traffic on the adapter by issuing a
|
|
|
|
* global reset on the MAC and deallocates TX/RX buffers.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_stop(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
INIT_DEBUGOUT("ixv_stop: begin\n");
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_disable_intr(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
hw->mac.ops.reset_hw(hw);
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->hw.adapter_stopped = FALSE;
|
2017-07-05 17:27:03 +00:00
|
|
|
hw->mac.ops.stop_adapter(hw);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Update the stack */
|
|
|
|
adapter->link_up = FALSE;
|
|
|
|
ixv_if_update_admin_status(ctx);
|
|
|
|
|
|
|
|
/* reprogram the RAR[0] in case user changed it. */
|
|
|
|
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
|
|
|
|
} /* ixv_if_stop */
|
|
|
|
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_identify_hardware - Determine hardware revision.
|
|
|
|
************************************************************************/
|
|
|
|
static void
|
|
|
|
ixv_identify_hardware(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
|
|
|
/* Save off the information about this board */
|
|
|
|
hw->vendor_id = pci_get_vendor(dev);
|
|
|
|
hw->device_id = pci_get_device(dev);
|
|
|
|
hw->revision_id = pci_get_revid(dev);
|
|
|
|
hw->subsystem_vendor_id = pci_get_subvendor(dev);
|
|
|
|
hw->subsystem_device_id = pci_get_subdevice(dev);
|
|
|
|
|
|
|
|
/* A subset of set_mac_type */
|
|
|
|
switch (hw->device_id) {
|
|
|
|
case IXGBE_DEV_ID_82599_VF:
|
|
|
|
hw->mac.type = ixgbe_mac_82599_vf;
|
|
|
|
break;
|
|
|
|
case IXGBE_DEV_ID_X540_VF:
|
|
|
|
hw->mac.type = ixgbe_mac_X540_vf;
|
|
|
|
break;
|
|
|
|
case IXGBE_DEV_ID_X550_VF:
|
|
|
|
hw->mac.type = ixgbe_mac_X550_vf;
|
|
|
|
break;
|
|
|
|
case IXGBE_DEV_ID_X550EM_X_VF:
|
|
|
|
hw->mac.type = ixgbe_mac_X550EM_x_vf;
|
|
|
|
break;
|
|
|
|
case IXGBE_DEV_ID_X550EM_A_VF:
|
|
|
|
hw->mac.type = ixgbe_mac_X550EM_a_vf;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
device_printf(dev, "unknown mac type\n");
|
|
|
|
hw->mac.type = ixgbe_mac_unknown;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} /* ixv_identify_hardware */
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
|
|
|
|
************************************************************************/
|
|
|
|
static int
|
|
|
|
ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
struct ix_rx_queue *rx_que = adapter->rx_queues;
|
|
|
|
struct ix_tx_queue *tx_que;
|
|
|
|
int error, rid, vector = 0;
|
|
|
|
char buf[16];
|
|
|
|
|
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
|
|
|
|
rid = vector + 1;
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "rxq%d", i);
|
|
|
|
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
|
|
|
|
IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
device_printf(iflib_get_dev(ctx),
|
|
|
|
"Failed to allocate que int %d err: %d", i, error);
|
|
|
|
adapter->num_rx_queues = i + 1;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_que->msix = vector;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < adapter->num_tx_queues; i++) {
|
|
|
|
snprintf(buf, sizeof(buf), "txq%d", i);
|
|
|
|
tx_que = &adapter->tx_queues[i];
|
|
|
|
tx_que->msix = i % adapter->num_rx_queues;
|
|
|
|
iflib_softirq_alloc_generic(ctx,
|
|
|
|
&adapter->rx_queues[tx_que->msix].que_irq,
|
|
|
|
IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
|
|
|
|
}
|
|
|
|
rid = vector + 1;
|
|
|
|
error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
|
|
|
|
IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
|
|
|
|
if (error) {
|
|
|
|
device_printf(iflib_get_dev(ctx),
|
|
|
|
"Failed to register admin handler");
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
adapter->vector = vector;
|
|
|
|
/*
|
|
|
|
* Due to a broken design QEMU will fail to properly
|
|
|
|
* enable the guest for MSIX unless the vectors in
|
|
|
|
* the table are all set up, so we must rewrite the
|
|
|
|
* ENABLE in the MSIX control register again at this
|
|
|
|
* point to cause it to successfully initialize us.
|
|
|
|
*/
|
|
|
|
if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
|
|
|
|
int msix_ctrl;
|
|
|
|
pci_find_cap(dev, PCIY_MSIX, &rid);
|
|
|
|
rid += PCIR_MSIX_CTRL;
|
|
|
|
msix_ctrl = pci_read_config(dev, rid, 2);
|
|
|
|
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
|
|
|
|
pci_write_config(dev, rid, msix_ctrl, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
fail:
|
|
|
|
iflib_irq_free(ctx, &adapter->irq);
|
|
|
|
rx_que = adapter->rx_queues;
|
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
|
|
|
|
iflib_irq_free(ctx, &rx_que->que_irq);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return (error);
|
|
|
|
} /* ixv_if_msix_intr_assign */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_allocate_pci_resources
|
|
|
|
************************************************************************/
|
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_allocate_pci_resources(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
int rid;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
rid = PCIR_BAR(0);
|
|
|
|
adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
|
|
|
|
RF_ACTIVE);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
if (!(adapter->pci_mem)) {
|
|
|
|
device_printf(dev, "Unable to allocate bus resource: memory\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
|
|
|
|
adapter->osdep.mem_bus_space_handle =
|
|
|
|
rman_get_bushandle(adapter->pci_mem);
|
|
|
|
adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
return (0);
|
|
|
|
} /* ixv_allocate_pci_resources */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_free_pci_resources
|
|
|
|
************************************************************************/
|
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_free_pci_resources(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ix_rx_queue *que = adapter->rx_queues;
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2019-01-30 13:21:26 +00:00
|
|
|
/* Release all MSI-X queue resources */
|
2017-12-20 18:15:06 +00:00
|
|
|
if (adapter->intr_type == IFLIB_INTR_MSIX)
|
|
|
|
iflib_irq_free(ctx, &adapter->irq);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
if (que != NULL) {
|
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
|
|
|
|
iflib_irq_free(ctx, &que->que_irq);
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (adapter->pci_mem != NULL)
|
2017-12-20 18:15:06 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_MEMORY,
|
2019-01-30 13:21:26 +00:00
|
|
|
rman_get_rid(adapter->pci_mem), adapter->pci_mem);
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_free_pci_resources */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_setup_interface
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Setup networking device structure and register an interface.
|
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static int
|
|
|
|
ixv_setup_interface(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
if_softc_ctx_t scctx = adapter->shared;
|
|
|
|
struct ifnet *ifp = iflib_get_ifp(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
INIT_DEBUGOUT("ixv_setup_interface: begin");
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
if_setbaudrate(ifp, IF_Gbps(10));
|
|
|
|
ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
|
2017-12-20 18:15:06 +00:00
|
|
|
ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
|
|
|
|
ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return 0;
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_setup_interface */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_if_get_counter
|
|
|
|
************************************************************************/
|
|
|
|
static uint64_t
|
|
|
|
ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
if_t ifp = iflib_get_ifp(ctx);
|
|
|
|
|
|
|
|
switch (cnt) {
|
|
|
|
case IFCOUNTER_IPACKETS:
|
|
|
|
return (adapter->ipackets);
|
|
|
|
case IFCOUNTER_OPACKETS:
|
|
|
|
return (adapter->opackets);
|
|
|
|
case IFCOUNTER_IBYTES:
|
|
|
|
return (adapter->ibytes);
|
|
|
|
case IFCOUNTER_OBYTES:
|
|
|
|
return (adapter->obytes);
|
|
|
|
case IFCOUNTER_IMCASTS:
|
|
|
|
return (adapter->imcasts);
|
|
|
|
default:
|
|
|
|
return (if_get_counter_default(ifp, cnt));
|
|
|
|
}
|
|
|
|
} /* ixv_if_get_counter */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_initialize_transmit_units - Enable transmit unit.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_initialize_transmit_units(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
if_softc_ctx_t scctx = adapter->shared;
|
|
|
|
struct ix_tx_queue *que = adapter->tx_queues;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_tx_queues; i++, que++) {
|
|
|
|
struct tx_ring *txr = &que->txr;
|
|
|
|
u64 tdba = txr->tx_paddr;
|
|
|
|
u32 txctrl, txdctl;
|
|
|
|
int j = txr->me;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Set WTHRESH to 8, burst writeback */
|
2017-12-20 18:15:06 +00:00
|
|
|
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
|
2015-03-17 18:32:28 +00:00
|
|
|
txdctl |= (8 << 16);
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Set the HW Tx Head and Tail indices */
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
|
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Set Tx Tail register */
|
2017-12-20 18:15:06 +00:00
|
|
|
txr->tail = IXGBE_VFTDT(j);
|
|
|
|
|
intel iflib drivers: correct initialization of tx_cidx_processed
From Jake:
In r341156 ("Fix first-packet completion", 2018-11-28) a hack to work
around a delta calculation determining how many descriptors were used
was added to ixl_isc_tx_credits_update_dwb.
The same fix was also applied to the em and igb drivers in r340310, and
to ix in r341156.
The hack checked the case where prev and cur were equal, and then added
one. This works, because by the time we do the delta check, we already
know there is at least one packet available, so the delta should be at
least one.
However, it's not a complete fix, and as indicated by the comment is
really a hack to work around the real bug.
The real problem is that the first time that we transmit a packet,
tx_cidx_processed will be set to point to the start of the ring.
Ultimately, the credits_update function expects it to point to the
*last* descriptor that was processed. Since we haven't yet processed any
descriptors, pointing it to 0 results in this incorrect calculation.
Fix the initialization code to have it point to the end of the ring
instead. One way to think about this, is that we are setting the value
to be one prior to the first available descriptor.
Doing so, corrects the delta calculation in all cases. The original fix
only works if the first packet has exactly one descriptor. Otherwise, we
will report 1 less than the correct value.
As part of this fix, also update the MPASS assertions to match the real
expectations. First, ensure that prev is not equal to cur, since this
should never happen. Second, remove the assertion about prev==0 || delta
!= 0. It looks like that originated from when the em driver was
converted to iflib. It seems like it was supposed to ensure that delta
was non-zero. However, because we originally returned 0 delta for the
first calculation, the "prev == 0" was tacked on.
Instead, replace this with a check that delta is greater than zero,
after the correction necessary when the ring pointers wrap around.
This new solution should fix the same bug as r341156 did, but in a more
robust way.
Submitted by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed by: shurd@
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D18545
2019-01-24 01:03:00 +00:00
|
|
|
txr->tx_rs_cidx = txr->tx_rs_pidx;
|
|
|
|
/* Initialize the last processed descriptor to be the end of
|
|
|
|
* the ring, rather than the start, so that we avoid an
|
|
|
|
* off-by-one error when calculating how many descriptors are
|
|
|
|
* done in the credits_update function.
|
|
|
|
*/
|
|
|
|
txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
|
2017-12-20 18:15:06 +00:00
|
|
|
for (int k = 0; k < scctx->isc_ntxd[0]; k++)
|
|
|
|
txr->tx_rsq[k] = QIDX_INVALID;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Set Ring parameters */
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
|
2017-07-05 17:27:03 +00:00
|
|
|
(tdba & 0x00000000ffffffffULL));
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
|
|
|
|
scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
|
|
|
|
txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
|
2015-03-17 18:32:28 +00:00
|
|
|
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Now enable */
|
2017-12-20 18:15:06 +00:00
|
|
|
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
|
2015-03-17 18:32:28 +00:00
|
|
|
txdctl |= IXGBE_TXDCTL_ENABLE;
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_initialize_transmit_units */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_initialize_rss_mapping
|
|
|
|
************************************************************************/
|
|
|
|
static void
|
|
|
|
ixv_initialize_rss_mapping(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
u32 reta = 0, mrqc, rss_key[10];
|
|
|
|
int queue_id;
|
|
|
|
int i, j;
|
|
|
|
u32 rss_hash_config;
|
|
|
|
|
|
|
|
if (adapter->feat_en & IXGBE_FEATURE_RSS) {
|
|
|
|
/* Fetch the configured RSS key */
|
|
|
|
rss_getkey((uint8_t *)&rss_key);
|
|
|
|
} else {
|
|
|
|
/* set up random bits */
|
|
|
|
arc4rand(&rss_key, sizeof(rss_key), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now fill out hash function seeds */
|
|
|
|
for (i = 0; i < 10; i++)
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
|
|
|
|
|
|
|
|
/* Set up the redirection table */
|
|
|
|
for (i = 0, j = 0; i < 64; i++, j++) {
|
2017-12-20 18:15:06 +00:00
|
|
|
if (j == adapter->num_rx_queues)
|
2017-07-05 17:27:03 +00:00
|
|
|
j = 0;
|
|
|
|
|
|
|
|
if (adapter->feat_en & IXGBE_FEATURE_RSS) {
|
|
|
|
/*
|
|
|
|
* Fetch the RSS bucket id for the given indirection
|
|
|
|
* entry. Cap it at the number of configured buckets
|
2017-12-20 18:15:06 +00:00
|
|
|
* (which is num_rx_queues.)
|
2017-07-05 17:27:03 +00:00
|
|
|
*/
|
|
|
|
queue_id = rss_get_indirection_to_bucket(i);
|
2017-12-20 18:15:06 +00:00
|
|
|
queue_id = queue_id % adapter->num_rx_queues;
|
2017-07-05 17:27:03 +00:00
|
|
|
} else
|
|
|
|
queue_id = j;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The low 8 bits are for hash value (n+0);
|
|
|
|
* The next 8 bits are for hash value (n+1), etc.
|
|
|
|
*/
|
|
|
|
reta >>= 8;
|
|
|
|
reta |= ((uint32_t)queue_id) << 24;
|
|
|
|
if ((i & 3) == 3) {
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
|
|
|
|
reta = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform hash on these packet types */
|
|
|
|
if (adapter->feat_en & IXGBE_FEATURE_RSS)
|
|
|
|
rss_hash_config = rss_gethashconfig();
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Disable UDP - IP fragments aren't currently being handled
|
|
|
|
* and so we end up with a mix of 2-tuple and 4-tuple
|
|
|
|
* traffic.
|
|
|
|
*/
|
|
|
|
rss_hash_config = RSS_HASHTYPE_RSS_IPV4
|
|
|
|
| RSS_HASHTYPE_RSS_TCP_IPV4
|
|
|
|
| RSS_HASHTYPE_RSS_IPV6
|
|
|
|
| RSS_HASHTYPE_RSS_TCP_IPV6;
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
mrqc = IXGBE_MRQC_RSSEN;
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
|
|
|
|
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
|
|
|
|
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
|
|
|
|
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
|
|
|
|
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
|
|
|
|
device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
|
|
|
|
__func__);
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
|
|
|
|
device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
|
|
|
|
__func__);
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
|
|
|
|
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
|
|
|
|
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
|
|
|
|
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
|
|
|
|
device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
|
|
|
|
__func__);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
|
|
|
|
} /* ixv_initialize_rss_mapping */
|
|
|
|
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_initialize_receive_units - Setup receive registers and features.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_initialize_receive_units(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
struct ifnet *ifp = iflib_get_ifp(ctx);
|
|
|
|
struct ix_rx_queue *que = adapter->rx_queues;
|
|
|
|
u32 bufsz, psrtype;
|
2015-06-01 17:15:25 +00:00
|
|
|
|
2015-06-01 17:43:34 +00:00
|
|
|
if (ifp->if_mtu > ETHERMTU)
|
2015-06-01 17:35:29 +00:00
|
|
|
bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
2015-06-01 17:43:34 +00:00
|
|
|
else
|
2015-06-01 17:35:29 +00:00
|
|
|
bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
2015-06-01 17:43:34 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
psrtype = IXGBE_PSRTYPE_TCPHDR
|
|
|
|
| IXGBE_PSRTYPE_UDPHDR
|
|
|
|
| IXGBE_PSRTYPE_IPV4HDR
|
|
|
|
| IXGBE_PSRTYPE_IPV6HDR
|
|
|
|
| IXGBE_PSRTYPE_L2HDR;
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
if (adapter->num_rx_queues > 1)
|
2017-07-05 17:27:03 +00:00
|
|
|
psrtype |= 1 << 29;
|
2015-06-01 17:43:34 +00:00
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
|
|
|
|
|
2015-12-23 22:45:17 +00:00
|
|
|
/* Tell PF our max_frame size */
|
2017-07-05 17:27:03 +00:00
|
|
|
if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
|
|
|
|
device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
|
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
scctx = adapter->shared;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
|
|
|
|
struct rx_ring *rxr = &que->rxr;
|
|
|
|
u64 rdba = rxr->rx_paddr;
|
|
|
|
u32 reg, rxdctl;
|
|
|
|
int j = rxr->me;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2015-06-01 17:43:34 +00:00
|
|
|
/* Disable the queue */
|
2017-12-20 18:15:06 +00:00
|
|
|
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
|
2016-01-07 18:34:56 +00:00
|
|
|
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
|
|
|
|
for (int k = 0; k < 10; k++) {
|
|
|
|
if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
|
2015-06-01 17:43:34 +00:00
|
|
|
IXGBE_RXDCTL_ENABLE)
|
|
|
|
msec_delay(1);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
wmb();
|
2015-03-17 18:32:28 +00:00
|
|
|
/* Setup the Base and Length of the Rx Descriptor Ring */
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
|
2015-03-17 18:32:28 +00:00
|
|
|
(rdba & 0x00000000ffffffffULL));
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
|
|
|
|
scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2015-06-01 17:43:34 +00:00
|
|
|
/* Reset the ring indices */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
|
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
/* Set up the SRRCTL register */
|
2017-12-20 18:15:06 +00:00
|
|
|
reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
|
2015-03-17 18:32:28 +00:00
|
|
|
reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
|
|
|
|
reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
|
|
|
|
reg |= bufsz;
|
|
|
|
reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2015-06-01 17:43:34 +00:00
|
|
|
/* Capture Rx Tail index */
|
2015-03-17 18:32:28 +00:00
|
|
|
rxr->tail = IXGBE_VFRDT(rxr->me);
|
|
|
|
|
|
|
|
/* Do the queue enabling last */
|
2016-01-07 18:34:56 +00:00
|
|
|
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
|
|
|
|
for (int l = 0; l < 10; l++) {
|
|
|
|
if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
|
2015-03-17 18:32:28 +00:00
|
|
|
IXGBE_RXDCTL_ENABLE)
|
|
|
|
break;
|
2017-07-05 17:27:03 +00:00
|
|
|
msec_delay(1);
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
wmb();
|
2015-07-15 00:35:50 +00:00
|
|
|
|
|
|
|
/* Set the Tail Pointer */
|
2017-12-20 18:15:06 +00:00
|
|
|
#ifdef DEV_NETMAP
|
2015-07-15 01:02:01 +00:00
|
|
|
/*
|
|
|
|
* In netmap mode, we must preserve the buffers made
|
|
|
|
* available to userspace before the if_init()
|
|
|
|
* (this is true by default on the TX side, because
|
|
|
|
* init makes all buffers available to userspace).
|
|
|
|
*
|
|
|
|
* netmap_reset() and the device specific routines
|
|
|
|
* (e.g. ixgbe_setup_receive_rings()) map these
|
|
|
|
* buffers at the end of the NIC ring, so here we
|
|
|
|
* must set the RDT (tail) register to make sure
|
|
|
|
* they are not overwritten.
|
|
|
|
*
|
|
|
|
* In this driver the NIC ring starts at RDH = 0,
|
|
|
|
* RDT points to the last slot available for reception (?),
|
|
|
|
* so RDT = num_rx_desc - 1 means the whole ring is available.
|
|
|
|
*/
|
2017-12-20 18:15:06 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_NETMAP) {
|
|
|
|
struct netmap_adapter *na = NA(ifp);
|
netmap: align codebase to the current upstream (commit id 3fb001303718146)
Changelist:
- Turn tx_rings and rx_rings arrays into arrays of pointers to kring
structs. This patch includes fixes for ixv, ixl, ix, re, cxgbe, iflib,
vtnet and ptnet drivers to cope with the change.
- Generalize the nm_config() callback to accept a struct containing many
parameters.
- Introduce NKR_FAKERING to support buffers sharing (used for netmap
pipes)
- Improved API for external VALE modules.
- Various bug fixes and improvements to the netmap memory allocator,
including support for externally (userspace) allocated memory.
- Refactoring of netmap pipes: now linked rings share the same netmap
buffers, with a separate set of kring pointers (rhead, rcur, rtail).
Buffer swapping does not need to happen anymore.
- Large refactoring of the control API towards an extensible solution;
the goal is to allow the addition of more commands and extension of
existing ones (with new options) without the need of hacks or the
risk of running out of configuration space.
A new NIOCCTRL ioctl has been added to handle all the requests of the
new control API, which cover all the functionalities so far supported.
The netmap API bumps from 11 to 12 with this patch. Full backward
compatibility is provided for the old control command (NIOCREGIF), by
means of a new netmap_legacy module. Many parts of the old netmap.h
header has now been moved to netmap_legacy.h (included by netmap.h).
Approved by: hrs (mentor)
2018-04-12 07:20:50 +00:00
|
|
|
struct netmap_kring *kring = na->rx_rings[j];
|
2015-07-15 01:02:01 +00:00
|
|
|
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
|
|
|
|
} else
|
|
|
|
#endif /* DEV_NETMAP */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
|
2017-12-20 18:15:06 +00:00
|
|
|
scctx->isc_nrxd[0] - 1);
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
2019-11-05 06:34:20 +00:00
|
|
|
/*
|
|
|
|
* Do not touch RSS and RETA settings for older hardware
|
|
|
|
* as those are shared among PF and all VF.
|
|
|
|
*/
|
|
|
|
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
|
|
|
|
ixv_initialize_rss_mapping(adapter);
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_initialize_receive_units */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_setup_vlan_support
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_setup_vlan_support(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2018-09-11 18:33:43 +00:00
|
|
|
struct ifnet *ifp = iflib_get_ifp(ctx);
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-07-05 17:27:03 +00:00
|
|
|
u32 ctrl, vid, vfta, retry;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/*
|
2017-12-20 18:15:06 +00:00
|
|
|
* We get here thru if_init, meaning
|
2017-07-05 17:27:03 +00:00
|
|
|
* a soft reset, this has already cleared
|
|
|
|
* the VFTA and other state, so if there
|
|
|
|
* have been no vlan's registered do nothing.
|
|
|
|
*/
|
2015-03-17 18:32:28 +00:00
|
|
|
if (adapter->num_vlans == 0)
|
|
|
|
return;
|
|
|
|
|
2018-09-11 18:33:43 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
|
|
|
|
/* Enable the queues */
|
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
|
ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
|
|
|
|
ctrl |= IXGBE_RXDCTL_VME;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
|
|
|
|
/*
|
|
|
|
* Let Rx path know that it needs to store VLAN tag
|
|
|
|
* as part of extra mbuf info.
|
|
|
|
*/
|
|
|
|
adapter->rx_queues[i].rxr.vtag_strip = TRUE;
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 18:33:43 +00:00
|
|
|
/*
|
|
|
|
* If filtering VLAN tags is disabled,
|
|
|
|
* there is no need to fill VLAN Filter Table Array (VFTA).
|
|
|
|
*/
|
|
|
|
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
|
|
|
|
return;
|
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
/*
|
2017-07-05 17:27:03 +00:00
|
|
|
* A soft reset zero's out the VFTA, so
|
|
|
|
* we need to repopulate it now.
|
|
|
|
*/
|
2015-03-17 18:32:28 +00:00
|
|
|
for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
|
|
|
|
if (ixv_shadow_vfta[i] == 0)
|
|
|
|
continue;
|
|
|
|
vfta = ixv_shadow_vfta[i];
|
|
|
|
/*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Reconstruct the vlan id's
|
|
|
|
* based on the bits set in each
|
|
|
|
* of the array ints.
|
|
|
|
*/
|
2015-12-23 22:45:17 +00:00
|
|
|
for (int j = 0; j < 32; j++) {
|
2015-03-17 18:32:28 +00:00
|
|
|
retry = 0;
|
|
|
|
if ((vfta & (1 << j)) == 0)
|
|
|
|
continue;
|
|
|
|
vid = (i * 32) + j;
|
|
|
|
/* Call the shared code mailbox routine */
|
2017-07-05 17:27:03 +00:00
|
|
|
while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
|
2015-03-17 18:32:28 +00:00
|
|
|
if (++retry > 5)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_setup_vlan_support */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_register_vlan
|
2017-07-05 17:27:03 +00:00
|
|
|
*
|
|
|
|
* Run via a vlan config EVENT, it enables us to use the
|
|
|
|
* HW Filter table since we can get the vlan id. This just
|
|
|
|
* creates the entry in the soft version of the VFTA, init
|
|
|
|
* will repopulate the real table.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
u16 index, bit;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
index = (vtag >> 5) & 0x7F;
|
|
|
|
bit = vtag & 0x1F;
|
|
|
|
ixv_shadow_vfta[index] |= (1 << bit);
|
|
|
|
++adapter->num_vlans;
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_register_vlan */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_unregister_vlan
|
2017-07-05 17:27:03 +00:00
|
|
|
*
|
|
|
|
* Run via a vlan unconfig EVENT, remove our entry
|
|
|
|
* in the soft vfta.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2017-07-05 17:27:03 +00:00
|
|
|
u16 index, bit;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
index = (vtag >> 5) & 0x7F;
|
|
|
|
bit = vtag & 0x1F;
|
|
|
|
ixv_shadow_vfta[index] &= ~(1 << bit);
|
|
|
|
--adapter->num_vlans;
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_unregister_vlan */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_enable_intr
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_enable_intr(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-12-20 18:15:06 +00:00
|
|
|
struct ix_rx_queue *que = adapter->rx_queues;
|
2017-07-05 17:27:03 +00:00
|
|
|
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
|
|
|
|
|
|
|
|
mask = IXGBE_EIMS_ENABLE_MASK;
|
|
|
|
mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++, que++)
|
2015-03-17 18:32:28 +00:00
|
|
|
ixv_enable_queue(adapter, que->msix);
|
|
|
|
|
|
|
|
IXGBE_WRITE_FLUSH(hw);
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_enable_intr */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixv_if_disable_intr
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixv_if_disable_intr(if_ctx_t ctx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
2015-03-17 18:32:28 +00:00
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
|
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
|
|
|
|
IXGBE_WRITE_FLUSH(&adapter->hw);
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixv_if_disable_intr */
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_if_rx_queue_intr_enable
|
|
|
|
************************************************************************/
|
|
|
|
static int
|
|
|
|
ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = iflib_get_softc(ctx);
|
|
|
|
struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
|
|
|
|
|
|
|
|
ixv_enable_queue(adapter, que->rxr.me);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
} /* ixv_if_rx_queue_intr_enable */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_set_ivar
|
|
|
|
*
|
|
|
|
* Setup the correct IVAR register for a particular MSI-X interrupt
|
|
|
|
* - entry is the register array entry
|
|
|
|
* - vector is the MSI-X vector for this queue
|
|
|
|
* - type is RX/TX/MISC
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
|
|
|
ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-07-05 17:27:03 +00:00
|
|
|
u32 ivar, index;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
vector |= IXGBE_IVAR_ALLOC_VAL;
|
|
|
|
|
|
|
|
if (type == -1) { /* MISC IVAR */
|
|
|
|
ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
|
|
|
|
ivar &= ~0xFF;
|
|
|
|
ivar |= vector;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
|
2017-07-05 17:27:03 +00:00
|
|
|
} else { /* RX/TX IVARS */
|
2015-03-17 18:32:28 +00:00
|
|
|
index = (16 * (entry & 1)) + (8 * type);
|
|
|
|
ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
|
|
|
|
ivar &= ~(0xFF << index);
|
|
|
|
ivar |= (vector << index);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_set_ivar */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_configure_ivars
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
|
|
|
ixv_configure_ivars(struct adapter *adapter)
|
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct ix_rx_queue *que = adapter->rx_queues;
|
|
|
|
|
|
|
|
MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
|
2015-03-17 18:32:28 +00:00
|
|
|
/* First the RX queue entry */
|
2017-07-05 17:27:03 +00:00
|
|
|
ixv_set_ivar(adapter, i, que->msix, 0);
|
2015-03-17 18:32:28 +00:00
|
|
|
/* ... and the TX */
|
|
|
|
ixv_set_ivar(adapter, i, que->msix, 1);
|
|
|
|
/* Set an initial value in EITR */
|
2017-07-05 17:27:03 +00:00
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
|
|
|
|
IXGBE_EITR_DEFAULT);
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* For the mailbox interrupt */
|
2017-07-05 17:27:03 +00:00
|
|
|
ixv_set_ivar(adapter, 1, adapter->vector, -1);
|
|
|
|
} /* ixv_configure_ivars */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_save_stats
|
|
|
|
*
|
|
|
|
* The VF stats registers never have a truly virgin
|
|
|
|
* starting point, so this routine tries to make an
|
|
|
|
* artificial one, marking ground zero on attach as
|
|
|
|
* it were.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
|
|
|
ixv_save_stats(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
|
|
|
|
adapter->stats.vf.saved_reset_vfgprc +=
|
|
|
|
adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
|
|
|
|
adapter->stats.vf.saved_reset_vfgptc +=
|
|
|
|
adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
|
|
|
|
adapter->stats.vf.saved_reset_vfgorc +=
|
|
|
|
adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
|
|
|
|
adapter->stats.vf.saved_reset_vfgotc +=
|
|
|
|
adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
|
|
|
|
adapter->stats.vf.saved_reset_vfmprc +=
|
|
|
|
adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_save_stats */
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_init_stats
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
|
|
|
ixv_init_stats(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
|
|
|
|
adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
|
|
|
|
adapter->stats.vf.last_vfgorc |=
|
|
|
|
(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
|
|
|
|
|
|
|
|
adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
|
|
|
|
adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
|
|
|
|
adapter->stats.vf.last_vfgotc |=
|
|
|
|
(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
|
|
|
|
|
|
|
|
adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
|
|
|
|
|
|
|
|
adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
|
|
|
|
adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
|
|
|
|
adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
|
|
|
|
adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
|
|
|
|
adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_init_stats */
|
|
|
|
|
|
|
|
#define UPDATE_STAT_32(reg, last, count) \
|
|
|
|
{ \
|
|
|
|
u32 current = IXGBE_READ_REG(hw, reg); \
|
|
|
|
if (current < last) \
|
|
|
|
count += 0x100000000LL; \
|
|
|
|
last = current; \
|
|
|
|
count &= 0xFFFFFFFF00000000LL; \
|
|
|
|
count |= current; \
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
#define UPDATE_STAT_36(lsb, msb, last, count) \
|
|
|
|
{ \
|
|
|
|
u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
|
|
|
|
u64 cur_msb = IXGBE_READ_REG(hw, msb); \
|
|
|
|
u64 current = ((cur_msb << 32) | cur_lsb); \
|
|
|
|
if (current < last) \
|
|
|
|
count += 0x1000000000LL; \
|
|
|
|
last = current; \
|
|
|
|
count &= 0xFFFFFFF000000000LL; \
|
|
|
|
count |= current; \
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_update_stats - Update the board statistics counters.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
void
|
|
|
|
ixv_update_stats(struct adapter *adapter)
|
|
|
|
{
|
2017-07-05 17:27:03 +00:00
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->stats.vf.vfgprc);
|
2017-12-20 18:15:06 +00:00
|
|
|
UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->stats.vf.vfgptc);
|
2017-12-20 18:15:06 +00:00
|
|
|
UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
|
2017-12-20 18:15:06 +00:00
|
|
|
UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
|
2017-12-20 18:15:06 +00:00
|
|
|
UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
|
2015-03-17 18:32:28 +00:00
|
|
|
adapter->stats.vf.vfmprc);
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/* Fill out the OS statistics structure */
|
|
|
|
IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
|
|
|
|
IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
|
|
|
|
IXGBE_SET_IBYTES(adapter, stats->vfgorc);
|
|
|
|
IXGBE_SET_OBYTES(adapter, stats->vfgotc);
|
|
|
|
IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
|
|
|
|
} /* ixv_update_stats */
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_add_stats_sysctls - Add statistic sysctls for the VF.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
|
|
|
ixv_add_stats_sysctls(struct adapter *adapter)
|
|
|
|
{
|
2017-07-05 17:27:03 +00:00
|
|
|
device_t dev = adapter->dev;
|
2017-12-20 18:15:06 +00:00
|
|
|
struct ix_tx_queue *tx_que = adapter->tx_queues;
|
|
|
|
struct ix_rx_queue *rx_que = adapter->rx_queues;
|
2017-07-05 17:27:03 +00:00
|
|
|
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
|
|
|
|
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
|
|
|
|
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
|
2015-03-17 18:32:28 +00:00
|
|
|
struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
|
2017-07-05 17:27:03 +00:00
|
|
|
struct sysctl_oid *stat_node, *queue_node;
|
|
|
|
struct sysctl_oid_list *stat_list, *queue_list;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
#define QUEUE_NAME_LEN 32
|
|
|
|
char namebuf[QUEUE_NAME_LEN];
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Driver Statistics */
|
2015-03-18 20:11:59 +00:00
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
|
2017-07-05 17:27:03 +00:00
|
|
|
CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
|
|
|
|
CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
|
|
|
|
struct tx_ring *txr = &tx_que->txr;
|
2017-07-05 17:27:03 +00:00
|
|
|
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
|
|
|
|
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "Queue Name");
|
|
|
|
queue_list = SYSCTL_CHILDREN(queue_node);
|
|
|
|
|
2017-12-21 00:35:14 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
|
2017-12-20 18:15:06 +00:00
|
|
|
CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
|
2017-07-05 17:27:03 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
|
|
|
|
CTLFLAG_RD, &(txr->total_packets), "TX Packets");
|
|
|
|
}
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
|
|
|
|
struct rx_ring *rxr = &rx_que->rxr;
|
2017-07-05 17:27:03 +00:00
|
|
|
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
|
|
|
|
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "Queue Name");
|
|
|
|
queue_list = SYSCTL_CHILDREN(queue_node);
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
|
|
|
|
CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
|
2017-07-05 17:27:03 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
|
|
|
|
CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
|
|
|
|
CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
|
|
|
|
CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
|
2017-07-05 17:27:03 +00:00
|
|
|
CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
|
2015-03-17 18:32:28 +00:00
|
|
|
stat_list = SYSCTL_CHILDREN(stat_node);
|
|
|
|
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
|
2017-07-05 17:27:03 +00:00
|
|
|
CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
|
2015-03-17 18:32:28 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
|
2017-07-05 17:27:03 +00:00
|
|
|
CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
|
2015-03-17 18:32:28 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
|
2017-07-05 17:27:03 +00:00
|
|
|
CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
|
2015-03-17 18:32:28 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
|
2017-07-05 17:27:03 +00:00
|
|
|
CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
|
2015-03-17 18:32:28 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
|
2017-07-05 17:27:03 +00:00
|
|
|
CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
|
|
|
|
} /* ixv_add_stats_sysctls */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixv_print_debug_info
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Called only when em_display_debug_stats is enabled.
|
|
|
|
* Provides a way to take a look at important statistics
|
|
|
|
* maintained by the driver and hardware.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
|
|
|
ixv_print_debug_info(struct adapter *adapter)
|
|
|
|
{
|
2017-07-05 17:27:03 +00:00
|
|
|
device_t dev = adapter->dev;
|
|
|
|
struct ixgbe_hw *hw = &adapter->hw;
|
|
|
|
|
|
|
|
device_printf(dev, "Error Byte Count = %u \n",
|
|
|
|
IXGBE_READ_REG(hw, IXGBE_ERRBC));
|
|
|
|
|
|
|
|
device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
|
|
|
|
} /* ixv_print_debug_info */
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_sysctl_debug
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
|
|
|
ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct adapter *adapter;
|
2017-07-05 17:27:03 +00:00
|
|
|
int error, result;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
result = -1;
|
|
|
|
error = sysctl_handle_int(oidp, &result, 0, req);
|
|
|
|
|
|
|
|
if (error || !req->newptr)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if (result == 1) {
|
2017-07-05 17:27:03 +00:00
|
|
|
adapter = (struct adapter *)arg1;
|
2015-03-17 18:32:28 +00:00
|
|
|
ixv_print_debug_info(adapter);
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
return error;
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixv_sysctl_debug */
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixv_init_device_features
|
|
|
|
************************************************************************/
|
|
|
|
static void
|
|
|
|
ixv_init_device_features(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
adapter->feat_cap = IXGBE_FEATURE_NETMAP
|
|
|
|
| IXGBE_FEATURE_VF
|
|
|
|
| IXGBE_FEATURE_LEGACY_TX;
|
|
|
|
|
|
|
|
/* A tad short on feature flags for VFs, atm. */
|
|
|
|
switch (adapter->hw.mac.type) {
|
|
|
|
case ixgbe_mac_82599_vf:
|
|
|
|
break;
|
|
|
|
case ixgbe_mac_X540_vf:
|
|
|
|
break;
|
|
|
|
case ixgbe_mac_X550_vf:
|
|
|
|
case ixgbe_mac_X550EM_x_vf:
|
|
|
|
case ixgbe_mac_X550EM_a_vf:
|
|
|
|
adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
|
2019-11-05 06:34:20 +00:00
|
|
|
adapter->feat_cap |= IXGBE_FEATURE_RSS;
|
2017-07-05 17:27:03 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enabled by default... */
|
|
|
|
/* Is a virtual function (VF) */
|
|
|
|
if (adapter->feat_cap & IXGBE_FEATURE_VF)
|
|
|
|
adapter->feat_en |= IXGBE_FEATURE_VF;
|
|
|
|
/* Netmap */
|
|
|
|
if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
|
|
|
|
adapter->feat_en |= IXGBE_FEATURE_NETMAP;
|
|
|
|
/* Receive-Side Scaling (RSS) */
|
|
|
|
if (adapter->feat_cap & IXGBE_FEATURE_RSS)
|
|
|
|
adapter->feat_en |= IXGBE_FEATURE_RSS;
|
|
|
|
/* Needs advanced context descriptor regardless of offloads req'd */
|
|
|
|
if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
|
|
|
|
adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
|
|
|
|
} /* ixv_init_device_features */
|
|
|
|
|