2016-05-18 04:35:58 +00:00
|
|
|
/*-
|
2018-04-12 14:35:37 +00:00
|
|
|
* Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
|
2016-05-18 04:35:58 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Neither the name of Matthew Macy nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2016-05-18 14:18:03 +00:00
|
|
|
#include "opt_inet.h"
|
|
|
|
#include "opt_inet6.h"
|
|
|
|
#include "opt_acpi.h"
|
2017-12-20 01:03:34 +00:00
|
|
|
#include "opt_sched.h"
|
2016-05-18 14:18:03 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/eventhandler.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/jail.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/md5.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/kobj.h>
|
|
|
|
#include <sys/rman.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/proc.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/sbuf.h>
|
|
|
|
#include <sys/smp.h>
|
|
|
|
#include <sys/socket.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/sockio.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/taskqueue.h>
|
2016-08-12 21:29:44 +00:00
|
|
|
#include <sys/limits.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_var.h>
|
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
#include <net/bpf.h>
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/mp_ring.h>
|
2017-11-06 16:23:21 +00:00
|
|
|
#include <net/vnet.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_pcb.h>
|
|
|
|
#include <netinet/tcp_lro.h>
|
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/if_ether.h>
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/ip6.h>
|
|
|
|
#include <netinet/tcp.h>
|
2017-11-06 16:23:21 +00:00
|
|
|
#include <netinet/ip_var.h>
|
2018-05-06 00:57:52 +00:00
|
|
|
#include <netinet/netdump/netdump.h>
|
2017-11-06 16:23:21 +00:00
|
|
|
#include <netinet6/ip6_var.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <machine/in_cksum.h>
|
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
|
|
|
|
#include <dev/led/led.h>
|
|
|
|
#include <dev/pci/pcireg.h>
|
|
|
|
#include <dev/pci/pcivar.h>
|
|
|
|
#include <dev/pci/pci_private.h>
|
|
|
|
|
|
|
|
#include <net/iflib.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <net/iflib_private.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include "ifdi_if.h"
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
#include <sys/memdesc.h>
|
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <machine/md_var.h>
|
|
|
|
#include <machine/specialreg.h>
|
|
|
|
#include <x86/include/busdma_impl.h>
|
|
|
|
#include <x86/iommu/busdma_dmar.h>
|
|
|
|
#endif
|
|
|
|
|
2017-07-03 18:23:35 +00:00
|
|
|
#include <sys/bitstring.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
2017-03-13 22:53:06 +00:00
|
|
|
* enable accounting of every mbuf as it comes in to and goes out of
|
|
|
|
* iflib's software descriptor references
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
|
|
|
#define MEMORY_LOGGING 0
|
|
|
|
/*
|
|
|
|
* Enable mbuf vectors for compressing long mbuf chains
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NB:
|
|
|
|
* - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
|
|
|
|
* we prefetch needs to be determined by the time spent in m_free vis a vis
|
|
|
|
* the cost of a prefetch. This will of course vary based on the workload:
|
|
|
|
* - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
|
|
|
|
* is quite expensive, thus suggesting very little prefetch.
|
|
|
|
* - small packet forwarding which is just returning a single mbuf to
|
|
|
|
* UMA will typically be very fast vis a vis the cost of a memory
|
|
|
|
* access.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File organization:
|
|
|
|
* - private structures
|
|
|
|
* - iflib private utility functions
|
|
|
|
* - ifnet functions
|
|
|
|
* - vlan registry and other exported functions
|
|
|
|
* - iflib public core functions
|
|
|
|
*
|
|
|
|
*
|
|
|
|
*/
|
2018-05-11 20:08:28 +00:00
|
|
|
MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
struct iflib_txq;
|
|
|
|
typedef struct iflib_txq *iflib_txq_t;
|
|
|
|
struct iflib_rxq;
|
|
|
|
typedef struct iflib_rxq *iflib_rxq_t;
|
|
|
|
struct iflib_fl;
|
|
|
|
typedef struct iflib_fl *iflib_fl_t;
|
|
|
|
|
2017-01-15 00:50:10 +00:00
|
|
|
struct iflib_ctx;
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
typedef struct iflib_filter_info {
|
|
|
|
driver_filter_t *ifi_filter;
|
|
|
|
void *ifi_filter_arg;
|
|
|
|
struct grouptask *ifi_task;
|
2017-03-13 22:53:06 +00:00
|
|
|
void *ifi_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
} *iflib_filter_info_t;
|
|
|
|
|
|
|
|
struct iflib_ctx {
|
|
|
|
KOBJ_FIELDS;
|
|
|
|
/*
|
|
|
|
* Pointer to hardware driver's softc
|
|
|
|
*/
|
|
|
|
void *ifc_softc;
|
|
|
|
device_t ifc_dev;
|
|
|
|
if_t ifc_ifp;
|
|
|
|
|
|
|
|
cpuset_t ifc_cpus;
|
|
|
|
if_shared_ctx_t ifc_sctx;
|
|
|
|
struct if_softc_ctx ifc_softc_ctx;
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
struct sx ifc_ctx_sx;
|
2018-04-12 14:35:37 +00:00
|
|
|
struct mtx ifc_state_mtx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
uint16_t ifc_nhwtxqs;
|
|
|
|
|
|
|
|
iflib_txq_t ifc_txqs;
|
|
|
|
iflib_rxq_t ifc_rxqs;
|
|
|
|
uint32_t ifc_if_flags;
|
|
|
|
uint32_t ifc_flags;
|
|
|
|
uint32_t ifc_max_fl_buf_size;
|
|
|
|
int ifc_in_detach;
|
|
|
|
|
|
|
|
int ifc_link_state;
|
|
|
|
int ifc_link_irq;
|
|
|
|
int ifc_watchdog_events;
|
|
|
|
struct cdev *ifc_led_dev;
|
|
|
|
struct resource *ifc_msix_mem;
|
|
|
|
|
|
|
|
struct if_irq ifc_legacy_irq;
|
|
|
|
struct grouptask ifc_admin_task;
|
|
|
|
struct grouptask ifc_vflr_task;
|
|
|
|
struct iflib_filter_info ifc_filter_info;
|
|
|
|
struct ifmedia ifc_media;
|
|
|
|
|
|
|
|
struct sysctl_oid *ifc_sysctl_node;
|
|
|
|
uint16_t ifc_sysctl_ntxqs;
|
|
|
|
uint16_t ifc_sysctl_nrxqs;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint16_t ifc_sysctl_qs_eq_override;
|
2017-09-23 01:37:01 +00:00
|
|
|
uint16_t ifc_sysctl_rx_budget;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifc_sysctl_ntxds[8];
|
|
|
|
qidx_t ifc_sysctl_nrxds[8];
|
2016-05-18 04:35:58 +00:00
|
|
|
struct if_txrx ifc_txrx;
|
|
|
|
#define isc_txd_encap ifc_txrx.ift_txd_encap
|
|
|
|
#define isc_txd_flush ifc_txrx.ift_txd_flush
|
|
|
|
#define isc_txd_credits_update ifc_txrx.ift_txd_credits_update
|
|
|
|
#define isc_rxd_available ifc_txrx.ift_rxd_available
|
|
|
|
#define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
|
|
|
|
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
|
|
|
|
#define isc_rxd_flush ifc_txrx.ift_rxd_flush
|
|
|
|
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
|
|
|
|
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
|
|
|
|
#define isc_legacy_intr ifc_txrx.ift_legacy_intr
|
|
|
|
eventhandler_tag ifc_vlan_attach_event;
|
|
|
|
eventhandler_tag ifc_vlan_detach_event;
|
|
|
|
uint8_t ifc_mac[ETHER_ADDR_LEN];
|
|
|
|
char ifc_mtx_name[16];
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
void *
|
|
|
|
iflib_get_softc(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_softc);
|
|
|
|
}
|
|
|
|
|
|
|
|
device_t
|
|
|
|
iflib_get_dev(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
if_t
|
|
|
|
iflib_get_ifp(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ifmedia *
|
|
|
|
iflib_get_media(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (&ctx->ifc_media);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
uint32_t
|
|
|
|
iflib_get_flags(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
return (ctx->ifc_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_set_detach(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
ctx->ifc_in_detach = 1;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
void
|
|
|
|
iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
|
|
|
|
{
|
|
|
|
|
|
|
|
bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
if_softc_ctx_t
|
|
|
|
iflib_get_softc_ctx(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (&ctx->ifc_softc_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if_shared_ctx_t
|
|
|
|
iflib_get_sctx(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_sctx);
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
#define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
|
2016-05-18 04:35:58 +00:00
|
|
|
#define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
|
2017-03-14 22:25:07 +00:00
|
|
|
#define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
|
|
|
|
#define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
|
|
|
|
|
|
|
|
#define RX_SW_DESC_MAP_CREATED (1 << 0)
|
2017-09-16 02:41:38 +00:00
|
|
|
#define TX_SW_DESC_MAP_CREATED (1 << 1)
|
|
|
|
#define RX_SW_DESC_INUSE (1 << 3)
|
|
|
|
#define TX_SW_DESC_MAPPED (1 << 4)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-08-31 23:19:18 +00:00
|
|
|
#define M_TOOBIG M_PROTO1
|
2017-08-10 03:43:23 +00:00
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
typedef struct iflib_sw_rx_desc_array {
|
|
|
|
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
|
|
|
|
struct mbuf **ifsd_m; /* pkthdr mbufs */
|
|
|
|
caddr_t *ifsd_cl; /* direct cluster pointer for rx */
|
|
|
|
uint8_t *ifsd_flags;
|
|
|
|
} iflib_rxsd_array_t;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
typedef struct iflib_sw_tx_desc_array {
|
|
|
|
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
|
|
|
|
struct mbuf **ifsd_m; /* pkthdr mbufs */
|
|
|
|
uint8_t *ifsd_flags;
|
2017-03-13 22:53:06 +00:00
|
|
|
} if_txsd_vec_t;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* magic number that should be high enough for any hardware */
|
|
|
|
#define IFLIB_MAX_TX_SEGS 128
|
2017-10-30 21:20:33 +00:00
|
|
|
/* bnxt supports 64 with hardware LRO enabled */
|
|
|
|
#define IFLIB_MAX_RX_SEGS 64
|
2017-03-13 22:53:06 +00:00
|
|
|
#define IFLIB_RX_COPY_THRESH 128
|
2016-05-18 04:35:58 +00:00
|
|
|
#define IFLIB_MAX_RX_REFRESH 32
|
2017-03-13 22:53:06 +00:00
|
|
|
/* The minimum descriptors per second before we start coalescing */
|
|
|
|
#define IFLIB_MIN_DESC_SEC 16384
|
|
|
|
#define IFLIB_DEFAULT_TX_UPDATE_FREQ 16
|
2016-05-18 04:35:58 +00:00
|
|
|
#define IFLIB_QUEUE_IDLE 0
|
|
|
|
#define IFLIB_QUEUE_HUNG 1
|
|
|
|
#define IFLIB_QUEUE_WORKING 2
|
2017-03-13 22:53:06 +00:00
|
|
|
/* maximum number of txqs that can share an rx interrupt */
|
|
|
|
#define IFLIB_MAX_TX_SHARED_INTR 4
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* this should really scale with ring size - this is a fairly arbitrary value */
|
|
|
|
#define TX_BATCH_SIZE 32
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#define IFLIB_RESTART_BUDGET 8
|
|
|
|
|
|
|
|
|
|
|
|
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
|
|
|
|
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
|
|
|
|
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
|
|
|
|
struct iflib_txq {
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ift_in_use;
|
|
|
|
qidx_t ift_cidx;
|
|
|
|
qidx_t ift_cidx_processed;
|
|
|
|
qidx_t ift_pidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t ift_gen;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint8_t ift_br_offset;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint16_t ift_npending;
|
|
|
|
uint16_t ift_db_pending;
|
|
|
|
uint16_t ift_rs_pending;
|
2016-05-18 04:35:58 +00:00
|
|
|
/* implicit pad */
|
2017-03-13 22:53:06 +00:00
|
|
|
uint8_t ift_txd_size[8];
|
2016-05-18 04:35:58 +00:00
|
|
|
uint64_t ift_processed;
|
|
|
|
uint64_t ift_cleaned;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint64_t ift_cleaned_prev;
|
2016-05-18 04:35:58 +00:00
|
|
|
#if MEMORY_LOGGING
|
|
|
|
uint64_t ift_enqueued;
|
|
|
|
uint64_t ift_dequeued;
|
|
|
|
#endif
|
|
|
|
uint64_t ift_no_tx_dma_setup;
|
|
|
|
uint64_t ift_no_desc_avail;
|
|
|
|
uint64_t ift_mbuf_defrag_failed;
|
|
|
|
uint64_t ift_mbuf_defrag;
|
|
|
|
uint64_t ift_map_failed;
|
|
|
|
uint64_t ift_txd_encap_efbig;
|
|
|
|
uint64_t ift_pullups;
|
|
|
|
|
|
|
|
struct mtx ift_mtx;
|
|
|
|
struct mtx ift_db_mtx;
|
|
|
|
|
|
|
|
/* constant values */
|
|
|
|
if_ctx_t ift_ctx;
|
2017-03-13 22:53:06 +00:00
|
|
|
struct ifmp_ring *ift_br;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct grouptask ift_task;
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint16_t ift_id;
|
|
|
|
struct callout ift_timer;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
if_txsd_vec_t ift_sds;
|
|
|
|
uint8_t ift_qstatus;
|
|
|
|
uint8_t ift_closed;
|
|
|
|
uint8_t ift_update_freq;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct iflib_filter_info ift_filter_info;
|
|
|
|
bus_dma_tag_t ift_desc_tag;
|
|
|
|
bus_dma_tag_t ift_tso_desc_tag;
|
|
|
|
iflib_dma_info_t ift_ifdi;
|
|
|
|
#define MTX_NAME_LEN 16
|
|
|
|
char ift_mtx_name[MTX_NAME_LEN];
|
|
|
|
char ift_db_mtx_name[MTX_NAME_LEN];
|
|
|
|
bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
uint64_t ift_cpu_exec_count[256];
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
|
|
|
struct iflib_fl {
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifl_cidx;
|
|
|
|
qidx_t ifl_pidx;
|
|
|
|
qidx_t ifl_credits;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t ifl_gen;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint8_t ifl_rxd_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
#if MEMORY_LOGGING
|
|
|
|
uint64_t ifl_m_enqueued;
|
|
|
|
uint64_t ifl_m_dequeued;
|
|
|
|
uint64_t ifl_cl_enqueued;
|
|
|
|
uint64_t ifl_cl_dequeued;
|
|
|
|
#endif
|
|
|
|
/* implicit pad */
|
|
|
|
|
2017-07-03 18:23:35 +00:00
|
|
|
bitstr_t *ifl_rx_bitmap;
|
|
|
|
qidx_t ifl_fragidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
/* constant */
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifl_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint16_t ifl_buf_size;
|
|
|
|
uint16_t ifl_cltype;
|
|
|
|
uma_zone_t ifl_zone;
|
2017-01-27 23:08:06 +00:00
|
|
|
iflib_rxsd_array_t ifl_sds;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_rxq_t ifl_rxq;
|
|
|
|
uint8_t ifl_id;
|
|
|
|
bus_dma_tag_t ifl_desc_tag;
|
|
|
|
iflib_dma_info_t ifl_ifdi;
|
|
|
|
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
|
|
|
|
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
|
2016-05-18 04:35:58 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline qidx_t
|
|
|
|
get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t used;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (pidx > cidx)
|
|
|
|
used = pidx - cidx;
|
|
|
|
else if (pidx < cidx)
|
|
|
|
used = size - cidx + pidx;
|
|
|
|
else if (gen == 0 && pidx == cidx)
|
|
|
|
used = 0;
|
|
|
|
else if (gen == 1 && pidx == cidx)
|
|
|
|
used = size;
|
|
|
|
else
|
|
|
|
panic("bad state");
|
|
|
|
|
|
|
|
return (used);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
|
|
|
|
|
|
|
|
#define IDXDIFF(head, tail, wrap) \
|
|
|
|
((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
|
|
|
|
|
|
|
|
struct iflib_rxq {
|
|
|
|
/* If there is a separate completion queue -
|
|
|
|
* these are the cq cidx and pidx. Otherwise
|
|
|
|
* these are unused.
|
|
|
|
*/
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifr_size;
|
|
|
|
qidx_t ifr_cq_cidx;
|
|
|
|
qidx_t ifr_cq_pidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t ifr_cq_gen;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint8_t ifr_fl_offset;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if_ctx_t ifr_ctx;
|
|
|
|
iflib_fl_t ifr_fl;
|
|
|
|
uint64_t ifr_rx_irq;
|
|
|
|
uint16_t ifr_id;
|
|
|
|
uint8_t ifr_lro_enabled;
|
|
|
|
uint8_t ifr_nfl;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint8_t ifr_ntxqirq;
|
|
|
|
uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
|
2016-05-18 04:35:58 +00:00
|
|
|
struct lro_ctrl ifr_lc;
|
|
|
|
struct grouptask ifr_task;
|
|
|
|
struct iflib_filter_info ifr_filter_info;
|
|
|
|
iflib_dma_info_t ifr_ifdi;
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/* dynamically allocate if any drivers need a value substantially larger than this */
|
|
|
|
struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
uint64_t ifr_cpu_exec_count[256];
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
typedef struct if_rxsd {
|
|
|
|
caddr_t *ifsd_cl;
|
|
|
|
struct mbuf **ifsd_m;
|
|
|
|
iflib_fl_t ifsd_fl;
|
|
|
|
qidx_t ifsd_cidx;
|
|
|
|
} *if_rxsd_t;
|
|
|
|
|
|
|
|
/* multiple of word size */
|
|
|
|
#ifdef __LP64__
|
2017-09-16 02:41:38 +00:00
|
|
|
#define PKT_INFO_SIZE 6
|
2017-03-13 22:53:06 +00:00
|
|
|
#define RXD_INFO_SIZE 5
|
|
|
|
#define PKT_TYPE uint64_t
|
|
|
|
#else
|
2017-09-16 02:41:38 +00:00
|
|
|
#define PKT_INFO_SIZE 11
|
2017-03-13 22:53:06 +00:00
|
|
|
#define RXD_INFO_SIZE 8
|
|
|
|
#define PKT_TYPE uint32_t
|
|
|
|
#endif
|
|
|
|
#define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3)
|
|
|
|
#define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4)
|
|
|
|
|
|
|
|
typedef struct if_pkt_info_pad {
|
|
|
|
PKT_TYPE pkt_val[PKT_INFO_SIZE];
|
|
|
|
} *if_pkt_info_pad_t;
|
|
|
|
typedef struct if_rxd_info_pad {
|
|
|
|
PKT_TYPE rxd_val[RXD_INFO_SIZE];
|
|
|
|
} *if_rxd_info_pad_t;
|
|
|
|
|
|
|
|
CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
|
|
|
|
CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
|
|
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pkt_info_zero(if_pkt_info_t pi)
|
|
|
|
{
|
|
|
|
if_pkt_info_pad_t pi_pad;
|
|
|
|
|
|
|
|
pi_pad = (if_pkt_info_pad_t)pi;
|
|
|
|
pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
|
|
|
|
pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
|
|
|
|
#ifndef __LP64__
|
2017-09-16 02:41:38 +00:00
|
|
|
pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
|
|
|
|
pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
static device_method_t iflib_pseudo_methods[] = {
|
|
|
|
DEVMETHOD(device_attach, noop_attach),
|
|
|
|
DEVMETHOD(device_detach, iflib_pseudo_detach),
|
|
|
|
DEVMETHOD_END
|
|
|
|
};
|
|
|
|
|
|
|
|
driver_t iflib_pseudodriver = {
|
|
|
|
"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
|
|
|
|
};
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline void
|
|
|
|
rxd_info_zero(if_rxd_info_t ri)
|
|
|
|
{
|
|
|
|
if_rxd_info_pad_t ri_pad;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ri_pad = (if_rxd_info_pad_t)ri;
|
|
|
|
for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
|
|
|
|
ri_pad->rxd_val[i] = 0;
|
|
|
|
ri_pad->rxd_val[i+1] = 0;
|
|
|
|
ri_pad->rxd_val[i+2] = 0;
|
|
|
|
ri_pad->rxd_val[i+3] = 0;
|
|
|
|
}
|
|
|
|
#ifdef __LP64__
|
|
|
|
ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Only allow a single packet to take up most 1/nth of the tx ring
|
|
|
|
*/
|
|
|
|
#define MAX_SINGLE_PACKET_FRACTION 12
|
|
|
|
#define IF_BAD_DMA (bus_addr_t)-1
|
|
|
|
|
2017-09-13 01:18:42 +00:00
|
|
|
#define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
|
2017-08-30 18:56:24 +00:00
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
#define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
|
|
|
|
#define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
|
|
|
|
#define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
|
|
|
|
#define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
|
2018-04-12 14:35:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
#define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
|
|
|
|
#define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
|
|
|
|
#define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
|
|
|
|
#define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
|
|
|
|
#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
|
|
|
|
|
|
|
|
|
|
|
|
/* Our boot-time initialization hook */
|
|
|
|
static int iflib_module_event_handler(module_t, int, void *);
|
|
|
|
|
|
|
|
static moduledata_t iflib_moduledata = {
|
|
|
|
"iflib",
|
|
|
|
iflib_module_event_handler,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
|
|
|
|
MODULE_VERSION(iflib, 1);
|
|
|
|
|
|
|
|
MODULE_DEPEND(iflib, pci, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(iflib, ether, 1, 1, 1);
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
|
|
|
|
TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifndef IFLIB_DEBUG_COUNTERS
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
#define IFLIB_DEBUG_COUNTERS 1
|
|
|
|
#else
|
|
|
|
#define IFLIB_DEBUG_COUNTERS 0
|
|
|
|
#endif /* !INVARIANTS */
|
|
|
|
#endif
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
|
|
|
|
"iflib driver parameters");
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* XXX need to ensure that this can't accidentally cause the head to be moved backwards
|
|
|
|
*/
|
|
|
|
static int iflib_min_tx_latency = 0;
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
|
2016-11-18 04:19:21 +00:00
|
|
|
&iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
|
2017-03-13 22:53:06 +00:00
|
|
|
static int iflib_no_tx_batch = 0;
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
|
|
|
|
&iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
#if IFLIB_DEBUG_COUNTERS
|
|
|
|
|
|
|
|
static int iflib_tx_seen;
|
|
|
|
static int iflib_tx_sent;
|
|
|
|
static int iflib_tx_encap;
|
|
|
|
static int iflib_rx_allocs;
|
|
|
|
static int iflib_fl_refills;
|
|
|
|
static int iflib_fl_refills_large;
|
|
|
|
static int iflib_tx_frees;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
|
|
|
|
&iflib_tx_seen, 0, "# tx mbufs seen");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
|
|
|
|
&iflib_tx_sent, 0, "# tx mbufs sent");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
|
|
|
|
&iflib_tx_encap, 0, "# tx mbufs encapped");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
|
|
|
|
&iflib_tx_frees, 0, "# tx frees");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
|
|
|
|
&iflib_rx_allocs, 0, "# rx allocations");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
|
|
|
|
&iflib_fl_refills, 0, "# refills");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
|
|
|
|
&iflib_fl_refills_large, 0, "# large refills");
|
|
|
|
|
|
|
|
|
|
|
|
static int iflib_txq_drain_flushing;
|
|
|
|
static int iflib_txq_drain_oactive;
|
|
|
|
static int iflib_txq_drain_notready;
|
|
|
|
static int iflib_txq_drain_encapfail;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
|
|
|
|
&iflib_txq_drain_flushing, 0, "# drain flushes");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
|
|
|
|
&iflib_txq_drain_oactive, 0, "# drain oactives");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
|
|
|
|
&iflib_txq_drain_notready, 0, "# drain notready");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD,
|
|
|
|
&iflib_txq_drain_encapfail, 0, "# drain encap fails");
|
|
|
|
|
|
|
|
|
|
|
|
static int iflib_encap_load_mbuf_fail;
|
2017-12-05 21:00:31 +00:00
|
|
|
static int iflib_encap_pad_mbuf_fail;
|
2016-05-18 04:35:58 +00:00
|
|
|
static int iflib_encap_txq_avail_fail;
|
|
|
|
static int iflib_encap_txd_encap_fail;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
|
2017-12-05 21:00:31 +00:00
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_txq_avail_fail, 0, "# txq avail failures");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_txd_encap_fail, 0, "# driver encap failures");
|
|
|
|
|
|
|
|
static int iflib_task_fn_rxs;
|
|
|
|
static int iflib_rx_intr_enables;
|
|
|
|
static int iflib_fast_intrs;
|
|
|
|
static int iflib_intr_link;
|
|
|
|
static int iflib_intr_msix;
|
|
|
|
static int iflib_rx_unavail;
|
|
|
|
static int iflib_rx_ctx_inactive;
|
|
|
|
static int iflib_rx_zero_len;
|
|
|
|
static int iflib_rx_if_input;
|
|
|
|
static int iflib_rx_mbuf_null;
|
|
|
|
static int iflib_rxd_flush;
|
|
|
|
|
|
|
|
static int iflib_verbose_debug;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD,
|
|
|
|
&iflib_intr_link, 0, "# intr link calls");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD,
|
|
|
|
&iflib_intr_msix, 0, "# intr msix calls");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
|
|
|
|
&iflib_task_fn_rxs, 0, "# task_fn_rx calls");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
|
|
|
|
&iflib_rx_intr_enables, 0, "# rx intr enables");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
|
|
|
|
&iflib_fast_intrs, 0, "# fast_intr calls");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
|
|
|
|
&iflib_rx_unavail, 0, "# times rxeof called with no available data");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
|
|
|
|
&iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD,
|
|
|
|
&iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
|
|
|
|
&iflib_rx_if_input, 0, "# times rxeof called if_input");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
|
|
|
|
&iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
|
|
|
|
&iflib_rxd_flush, 0, "# times rxd_flush called");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
|
|
|
|
&iflib_verbose_debug, 0, "enable verbose debugging");
|
|
|
|
|
|
|
|
#define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
|
2016-11-18 04:19:21 +00:00
|
|
|
static void
|
|
|
|
iflib_debug_reset(void)
|
|
|
|
{
|
|
|
|
iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
|
|
|
|
iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
|
|
|
|
iflib_txq_drain_flushing = iflib_txq_drain_oactive =
|
|
|
|
iflib_txq_drain_notready = iflib_txq_drain_encapfail =
|
2017-12-05 21:00:31 +00:00
|
|
|
iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
|
|
|
|
iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
|
|
|
|
iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
|
|
|
|
iflib_intr_link = iflib_intr_msix = iflib_rx_unavail =
|
2016-11-18 04:19:21 +00:00
|
|
|
iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input =
|
|
|
|
iflib_rx_mbuf_null = iflib_rxd_flush = 0;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
#define DBG_COUNTER_INC(name)
|
2016-11-18 04:19:21 +00:00
|
|
|
static void iflib_debug_reset(void) {}
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define IFLIB_DEBUG 0
|
|
|
|
|
|
|
|
static void iflib_tx_structures_free(if_ctx_t ctx);
|
|
|
|
static void iflib_rx_structures_free(if_ctx_t ctx);
|
|
|
|
static int iflib_queues_alloc(if_ctx_t ctx);
|
|
|
|
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
|
2017-03-13 22:53:06 +00:00
|
|
|
static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
|
2016-05-18 04:35:58 +00:00
|
|
|
static int iflib_qset_structures_setup(if_ctx_t ctx);
|
|
|
|
static int iflib_msix_init(if_ctx_t ctx);
|
2018-05-29 21:56:39 +00:00
|
|
|
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
|
2016-05-18 04:35:58 +00:00
|
|
|
static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
|
|
|
|
static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
|
|
|
|
static int iflib_register(if_ctx_t);
|
|
|
|
static void iflib_init_locked(if_ctx_t ctx);
|
|
|
|
static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
|
|
|
|
static void iflib_add_device_sysctl_post(if_ctx_t ctx);
|
2016-11-18 04:19:21 +00:00
|
|
|
static void iflib_ifmp_purge(iflib_txq_t txq);
|
2017-01-02 00:56:33 +00:00
|
|
|
static void _iflib_pre_assert(if_softc_ctx_t scctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
static void iflib_if_init_locked(if_ctx_t ctx);
|
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
static struct mbuf * iflib_fixup_rx(struct mbuf *m);
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-06 00:57:52 +00:00
|
|
|
NETDUMP_DEFINE(iflib);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
#include <sys/selinfo.h>
|
|
|
|
#include <net/netmap.h>
|
|
|
|
#include <dev/netmap/netmap_kern.h>
|
|
|
|
|
|
|
|
MODULE_DEPEND(iflib, netmap, 1, 1, 1);
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* device-specific sysctl variables:
|
|
|
|
*
|
2016-07-08 17:04:21 +00:00
|
|
|
* iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
|
2016-05-18 04:35:58 +00:00
|
|
|
* During regular operations the CRC is stripped, but on some
|
|
|
|
* hardware reception of frames not multiple of 64 is slower,
|
|
|
|
* so using crcstrip=0 helps in benchmarks.
|
|
|
|
*
|
2016-07-08 17:04:21 +00:00
|
|
|
* iflib_rx_miss, iflib_rx_miss_bufs:
|
2016-05-18 04:35:58 +00:00
|
|
|
* count packets that might be missed due to lost interrupts.
|
|
|
|
*/
|
|
|
|
SYSCTL_DECL(_dev_netmap);
|
|
|
|
/*
|
|
|
|
* The xl driver by default strips CRCs and we do not override it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int iflib_crcstrip = 1;
|
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
|
|
|
|
CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
|
|
|
|
|
|
|
|
int iflib_rx_miss, iflib_rx_miss_bufs;
|
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
|
|
|
|
CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
|
2016-07-08 17:04:21 +00:00
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
|
2016-05-18 04:35:58 +00:00
|
|
|
CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register/unregister. We are already under netmap lock.
|
|
|
|
* Only called on the first register or the last unregister.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
iflib_netmap_register(struct netmap_adapter *na, int onoff)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = na->ifp;
|
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
2017-03-13 22:53:06 +00:00
|
|
|
int status;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
|
|
|
|
/* Tell the stack that the interface is no longer active */
|
|
|
|
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
|
|
|
|
|
|
|
|
if (!CTX_IS_VF(ctx))
|
2017-01-02 00:56:33 +00:00
|
|
|
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* enable or disable flags and callbacks in na and ifp */
|
|
|
|
if (onoff) {
|
|
|
|
nm_set_native_flags(na);
|
|
|
|
} else {
|
|
|
|
nm_clear_native_flags(na);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
iflib_stop(ctx);
|
|
|
|
iflib_init_locked(ctx);
|
2017-01-02 00:56:33 +00:00
|
|
|
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
|
2017-03-13 22:53:06 +00:00
|
|
|
status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
|
|
|
|
if (status)
|
|
|
|
nm_clear_native_flags(na);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
return (status);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
static int
|
|
|
|
netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = kring->na;
|
|
|
|
u_int const lim = kring->nkr_num_slots - 1;
|
|
|
|
u_int head = kring->rhead;
|
|
|
|
struct netmap_ring *ring = kring->ring;
|
|
|
|
bus_dmamap_t *map;
|
|
|
|
struct if_rxd_update iru;
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
iflib_fl_t fl = &rxq->ifr_fl[0];
|
|
|
|
uint32_t refill_pidx, nic_i;
|
|
|
|
|
|
|
|
if (nm_i == head && __predict_true(!init))
|
|
|
|
return 0;
|
|
|
|
iru_init(&iru, rxq, 0 /* flid */);
|
|
|
|
map = fl->ifl_sds.ifsd_map;
|
|
|
|
refill_pidx = netmap_idx_k2n(kring, nm_i);
|
|
|
|
/*
|
|
|
|
* IMPORTANT: we must leave one free slot in the ring,
|
|
|
|
* so move head back by one unit
|
|
|
|
*/
|
|
|
|
head = nm_prev(head, lim);
|
2018-05-04 18:57:05 +00:00
|
|
|
nic_i = UINT_MAX;
|
2017-10-30 21:14:31 +00:00
|
|
|
while (nm_i != head) {
|
|
|
|
for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
|
|
|
|
struct netmap_slot *slot = &ring->slot[nm_i];
|
|
|
|
void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
|
|
|
|
uint32_t nic_i_dma = refill_pidx;
|
|
|
|
nic_i = netmap_idx_k2n(kring, nm_i);
|
|
|
|
|
|
|
|
MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
|
|
|
|
|
|
|
|
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
|
|
|
|
return netmap_ring_reinit(kring);
|
|
|
|
|
|
|
|
fl->ifl_vm_addrs[tmp_pidx] = addr;
|
|
|
|
if (__predict_false(init) && map) {
|
|
|
|
netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
|
|
|
|
} else if (map && (slot->flags & NS_BUF_CHANGED)) {
|
|
|
|
/* buffer has changed, reload map */
|
|
|
|
netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr);
|
|
|
|
}
|
|
|
|
slot->flags &= ~NS_BUF_CHANGED;
|
|
|
|
|
|
|
|
nm_i = nm_next(nm_i, lim);
|
|
|
|
fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
|
|
|
|
if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
iru.iru_pidx = refill_pidx;
|
|
|
|
iru.iru_count = tmp_pidx+1;
|
|
|
|
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
|
|
|
|
|
|
|
|
refill_pidx = nic_i;
|
|
|
|
if (map == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (int n = 0; n < iru.iru_count; n++) {
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma],
|
|
|
|
BUS_DMASYNC_PREREAD);
|
|
|
|
/* XXX - change this to not use the netmap func*/
|
|
|
|
nic_i_dma = nm_next(nic_i_dma, lim);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kring->nr_hwcur = head;
|
|
|
|
|
|
|
|
if (map)
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2018-05-04 18:57:05 +00:00
|
|
|
if (__predict_true(nic_i != UINT_MAX))
|
|
|
|
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
|
2017-10-30 21:14:31 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Reconcile kernel and user view of the transmit ring.
|
|
|
|
*
|
|
|
|
* All information is in the kring.
|
|
|
|
* Userspace wants to send packets up to the one before kring->rhead,
|
|
|
|
* kernel knows kring->nr_hwcur is the first unsent packet.
|
|
|
|
*
|
|
|
|
* Here we push packets out (as many as possible), and possibly
|
|
|
|
* reclaim buffers from previously completed transmission.
|
|
|
|
*
|
|
|
|
* The caller (netmap) guarantees that there is only one instance
|
|
|
|
* running at any time. Any interference with other driver
|
|
|
|
* methods should be handled by the individual drivers.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
iflib_netmap_txsync(struct netmap_kring *kring, int flags)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = kring->na;
|
|
|
|
struct ifnet *ifp = na->ifp;
|
|
|
|
struct netmap_ring *ring = kring->ring;
|
|
|
|
u_int nm_i; /* index into the netmap ring */
|
|
|
|
u_int nic_i; /* index into the NIC ring */
|
|
|
|
u_int n;
|
|
|
|
u_int const lim = kring->nkr_num_slots - 1;
|
|
|
|
u_int const head = kring->rhead;
|
|
|
|
struct if_pkt_info pi;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* interrupts on every tx packet are expensive so request
|
|
|
|
* them every half ring, or where NS_REPORT is set
|
|
|
|
*/
|
|
|
|
u_int report_frequency = kring->nkr_num_slots >> 1;
|
|
|
|
/* device-specific */
|
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
|
|
|
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
if (txq->ift_sds.ifsd_map)
|
|
|
|
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First part: process new packets to send.
|
|
|
|
* nm_i is the current index in the netmap ring,
|
|
|
|
* nic_i is the corresponding index in the NIC ring.
|
|
|
|
*
|
|
|
|
* If we have packets to send (nm_i != head)
|
|
|
|
* iterate over the netmap ring, fetch length and update
|
|
|
|
* the corresponding slot in the NIC ring. Some drivers also
|
|
|
|
* need to update the buffer's physical address in the NIC slot
|
|
|
|
* even NS_BUF_CHANGED is not set (PNMB computes the addresses).
|
|
|
|
*
|
|
|
|
* The netmap_reload_map() calls is especially expensive,
|
|
|
|
* even when (as in this case) the tag is 0, so do only
|
|
|
|
* when the buffer has actually changed.
|
|
|
|
*
|
|
|
|
* If possible do not set the report/intr bit on all slots,
|
|
|
|
* but only a few times per ring or when NS_REPORT is set.
|
|
|
|
*
|
|
|
|
* Finally, on 10G and faster drivers, it might be useful
|
|
|
|
* to prefetch the next slot and txr entry.
|
|
|
|
*/
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
nm_i = netmap_idx_n2k(kring, kring->nr_hwcur);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (nm_i != head) { /* we have new packets to send */
|
2018-05-16 21:03:22 +00:00
|
|
|
pkt_info_zero(&pi);
|
|
|
|
pi.ipi_segs = txq->ift_segs;
|
|
|
|
pi.ipi_qsidx = kring->ring_id;
|
2016-05-18 04:35:58 +00:00
|
|
|
nic_i = netmap_idx_k2n(kring, nm_i);
|
|
|
|
|
|
|
|
__builtin_prefetch(&ring->slot[nm_i]);
|
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (txq->ift_sds.ifsd_map)
|
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
for (n = 0; nm_i != head; n++) {
|
|
|
|
struct netmap_slot *slot = &ring->slot[nm_i];
|
|
|
|
u_int len = slot->len;
|
2017-03-14 15:08:56 +00:00
|
|
|
uint64_t paddr;
|
2016-05-18 04:35:58 +00:00
|
|
|
void *addr = PNMB(na, slot, &paddr);
|
|
|
|
int flags = (slot->flags & NS_REPORT ||
|
|
|
|
nic_i == 0 || nic_i == report_frequency) ?
|
|
|
|
IPI_TX_INTR : 0;
|
|
|
|
|
|
|
|
/* device-specific */
|
2017-03-13 22:53:06 +00:00
|
|
|
pi.ipi_len = len;
|
|
|
|
pi.ipi_segs[0].ds_addr = paddr;
|
|
|
|
pi.ipi_segs[0].ds_len = len;
|
|
|
|
pi.ipi_nsegs = 1;
|
|
|
|
pi.ipi_ndescs = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
pi.ipi_pidx = nic_i;
|
|
|
|
pi.ipi_flags = flags;
|
|
|
|
|
|
|
|
/* Fill the slot in the NIC ring. */
|
|
|
|
ctx->isc_txd_encap(ctx->ifc_softc, &pi);
|
|
|
|
|
|
|
|
/* prefetch for next round */
|
|
|
|
__builtin_prefetch(&ring->slot[nm_i + 1]);
|
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (txq->ift_sds.ifsd_map) {
|
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
NM_CHECK_ADDR_LEN(na, addr, len);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
if (slot->flags & NS_BUF_CHANGED) {
|
|
|
|
/* buffer has changed, reload map */
|
|
|
|
netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
|
|
|
|
}
|
|
|
|
/* make sure changes to the buffer are synced */
|
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
|
|
|
|
nm_i = nm_next(nm_i, lim);
|
|
|
|
nic_i = nm_next(nic_i, lim);
|
|
|
|
}
|
|
|
|
kring->nr_hwcur = head;
|
|
|
|
|
|
|
|
/* synchronize the NIC ring */
|
2017-03-13 22:53:06 +00:00
|
|
|
if (txq->ift_sds.ifsd_map)
|
|
|
|
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
|
2016-05-18 04:35:58 +00:00
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
|
|
|
|
|
|
|
/* (re)start the tx unit up to slot nic_i (excluded) */
|
|
|
|
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Second part: reclaim buffers for completed transmissions.
|
2018-05-16 21:03:22 +00:00
|
|
|
*
|
|
|
|
* If there are unclaimed buffers, attempt to reclaim them.
|
|
|
|
* If none are reclaimed, and TX IRQs are not in use, do an initial
|
|
|
|
* minimal delay, then trigger the tx handler which will spin in the
|
|
|
|
* group task queue.
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
2018-05-16 21:03:22 +00:00
|
|
|
if (kring->nr_hwtail != nm_prev(head, lim)) {
|
|
|
|
if (iflib_tx_credits_update(ctx, txq)) {
|
|
|
|
/* some tx completed, increment avail */
|
|
|
|
nic_i = txq->ift_cidx_processed;
|
|
|
|
kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
|
|
|
|
DELAY(1);
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txq->ift_id].ift_task);
|
|
|
|
}
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reconcile kernel and user view of the receive ring.
|
|
|
|
* Same as for the txsync, this routine must be efficient.
|
|
|
|
* The caller guarantees a single invocations, but races against
|
|
|
|
* the rest of the driver should be handled here.
|
|
|
|
*
|
|
|
|
* On call, kring->rhead is the first packet that userspace wants
|
|
|
|
* to keep, and kring->rcur is the wakeup point.
|
|
|
|
* The kernel has previously reported packets up to kring->rtail.
|
|
|
|
*
|
|
|
|
* If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
|
|
|
|
* of whether or not we received an interrupt.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = kring->na;
|
|
|
|
struct netmap_ring *ring = kring->ring;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint32_t nm_i; /* index into the netmap ring */
|
2017-10-30 21:14:31 +00:00
|
|
|
uint32_t nic_i; /* index into the NIC ring */
|
2016-05-18 04:35:58 +00:00
|
|
|
u_int i, n;
|
|
|
|
u_int const lim = kring->nkr_num_slots - 1;
|
2017-10-30 21:14:31 +00:00
|
|
|
u_int const head = netmap_idx_n2k(kring, kring->rhead);
|
2016-05-18 04:35:58 +00:00
|
|
|
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
|
2017-09-16 02:41:38 +00:00
|
|
|
struct if_rxd_info ri;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
struct ifnet *ifp = na->ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
|
|
|
iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
|
|
|
|
iflib_fl_t fl = rxq->ifr_fl;
|
|
|
|
if (head > lim)
|
|
|
|
return netmap_ring_reinit(kring);
|
|
|
|
|
|
|
|
/* XXX check sync modes */
|
2017-03-13 22:53:06 +00:00
|
|
|
for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
|
|
|
|
if (fl->ifl_sds.ifsd_map == NULL)
|
|
|
|
continue;
|
2016-05-18 04:35:58 +00:00
|
|
|
bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* First part: import newly received packets.
|
|
|
|
*
|
|
|
|
* nm_i is the index of the next free slot in the netmap ring,
|
|
|
|
* nic_i is the index of the next received packet in the NIC ring,
|
|
|
|
* and they may differ in case if_init() has been called while
|
|
|
|
* in netmap mode. For the receive ring we have
|
|
|
|
*
|
|
|
|
* nic_i = rxr->next_check;
|
|
|
|
* nm_i = kring->nr_hwtail (previous)
|
|
|
|
* and
|
|
|
|
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
|
|
|
|
*
|
|
|
|
* rxr->next_check is set to 0 on a ring reinit
|
|
|
|
*/
|
|
|
|
if (netmap_no_pendintr || force_update) {
|
|
|
|
int crclen = iflib_crcstrip ? 0 : 4;
|
|
|
|
int error, avail;
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
for (i = 0; i < rxq->ifr_nfl; i++) {
|
|
|
|
fl = &rxq->ifr_fl[i];
|
2016-05-18 04:35:58 +00:00
|
|
|
nic_i = fl->ifl_cidx;
|
|
|
|
nm_i = netmap_idx_n2k(kring, nic_i);
|
2017-03-13 22:53:06 +00:00
|
|
|
avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (n = 0; avail > 0; n++, avail--) {
|
2017-09-16 02:41:38 +00:00
|
|
|
rxd_info_zero(&ri);
|
|
|
|
ri.iri_frags = rxq->ifr_frags;
|
|
|
|
ri.iri_qsidx = kring->ring_id;
|
|
|
|
ri.iri_ifp = ctx->ifc_ifp;
|
|
|
|
ri.iri_cidx = nic_i;
|
|
|
|
|
|
|
|
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
|
|
|
|
ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
|
2018-02-20 21:42:45 +00:00
|
|
|
ring->slot[nm_i].flags = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
if (fl->ifl_sds.ifsd_map)
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
|
|
|
|
fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
|
2016-05-18 04:35:58 +00:00
|
|
|
nm_i = nm_next(nm_i, lim);
|
|
|
|
nic_i = nm_next(nic_i, lim);
|
|
|
|
}
|
|
|
|
if (n) { /* update the state variables */
|
|
|
|
if (netmap_no_pendintr && !force_update) {
|
|
|
|
/* diagnostics */
|
|
|
|
iflib_rx_miss ++;
|
|
|
|
iflib_rx_miss_bufs += n;
|
|
|
|
}
|
|
|
|
fl->ifl_cidx = nic_i;
|
2017-10-30 21:14:31 +00:00
|
|
|
kring->nr_hwtail = netmap_idx_k2n(kring, nm_i);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
kring->nr_kflags &= ~NKR_PENDINTR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Second part: skip past packets that userspace has released.
|
|
|
|
* (kring->nr_hwcur to head excluded),
|
|
|
|
* and make the buffers available for reception.
|
|
|
|
* As usual nm_i is the index in the netmap ring,
|
|
|
|
* nic_i is the index in the NIC ring, and
|
|
|
|
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
|
|
|
|
*/
|
|
|
|
/* XXX not sure how this will work with multiple free lists */
|
2017-10-30 21:14:31 +00:00
|
|
|
nm_i = netmap_idx_n2k(kring, kring->nr_hwcur);
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
return (netmap_fl_refill(rxq, kring, nm_i, false));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static void
|
|
|
|
iflib_netmap_intr(struct netmap_adapter *na, int onoff)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = na->ifp;
|
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (onoff) {
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
} else {
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_netmap_attach(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
struct netmap_adapter na;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
bzero(&na, sizeof(na));
|
|
|
|
|
|
|
|
na.ifp = ctx->ifc_ifp;
|
|
|
|
na.na_flags = NAF_BDG_MAYSLEEP;
|
|
|
|
MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
|
|
|
|
MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
na.num_tx_desc = scctx->isc_ntxd[0];
|
|
|
|
na.num_rx_desc = scctx->isc_nrxd[0];
|
2016-05-18 04:35:58 +00:00
|
|
|
na.nm_txsync = iflib_netmap_txsync;
|
|
|
|
na.nm_rxsync = iflib_netmap_rxsync;
|
|
|
|
na.nm_register = iflib_netmap_register;
|
2017-03-13 22:53:06 +00:00
|
|
|
na.nm_intr = iflib_netmap_intr;
|
2016-05-18 04:35:58 +00:00
|
|
|
na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
|
|
|
|
na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
|
|
|
|
return (netmap_attach(&na));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = NA(ctx->ifc_ifp);
|
|
|
|
struct netmap_slot *slot;
|
|
|
|
|
|
|
|
slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
|
2017-02-22 02:35:59 +00:00
|
|
|
if (slot == NULL)
|
2016-05-18 04:35:58 +00:00
|
|
|
return;
|
2017-03-13 22:53:06 +00:00
|
|
|
if (txq->ift_sds.ifsd_map == NULL)
|
|
|
|
return;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In netmap mode, set the map for the packet buffer.
|
|
|
|
* NOTE: Some drivers (not this one) also need to set
|
|
|
|
* the physical buffer address in the NIC ring.
|
|
|
|
* netmap_idx_n2k() maps a nic index, i, into the corresponding
|
|
|
|
* netmap slot index, si
|
|
|
|
*/
|
netmap: align codebase to the current upstream (commit id 3fb001303718146)
Changelist:
- Turn tx_rings and rx_rings arrays into arrays of pointers to kring
structs. This patch includes fixes for ixv, ixl, ix, re, cxgbe, iflib,
vtnet and ptnet drivers to cope with the change.
- Generalize the nm_config() callback to accept a struct containing many
parameters.
- Introduce NKR_FAKERING to support buffers sharing (used for netmap
pipes)
- Improved API for external VALE modules.
- Various bug fixes and improvements to the netmap memory allocator,
including support for externally (userspace) allocated memory.
- Refactoring of netmap pipes: now linked rings share the same netmap
buffers, with a separate set of kring pointers (rhead, rcur, rtail).
Buffer swapping does not need to happen anymore.
- Large refactoring of the control API towards an extensible solution;
the goal is to allow the addition of more commands and extension of
existing ones (with new options) without the need of hacks or the
risk of running out of configuration space.
A new NIOCCTRL ioctl has been added to handle all the requests of the
new control API, which cover all the functionalities so far supported.
The netmap API bumps from 11 to 12 with this patch. Full backward
compatibility is provided for the old control command (NIOCREGIF), by
means of a new netmap_legacy module. Many parts of the old netmap.h
header has now been moved to netmap_legacy.h (included by netmap.h).
Approved by: hrs (mentor)
2018-04-12 07:20:50 +00:00
|
|
|
int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
|
2016-05-18 04:35:58 +00:00
|
|
|
netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
|
|
|
|
}
|
|
|
|
}
|
2017-10-30 21:14:31 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
|
|
|
iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = NA(ctx->ifc_ifp);
|
netmap: align codebase to the current upstream (commit id 3fb001303718146)
Changelist:
- Turn tx_rings and rx_rings arrays into arrays of pointers to kring
structs. This patch includes fixes for ixv, ixl, ix, re, cxgbe, iflib,
vtnet and ptnet drivers to cope with the change.
- Generalize the nm_config() callback to accept a struct containing many
parameters.
- Introduce NKR_FAKERING to support buffers sharing (used for netmap
pipes)
- Improved API for external VALE modules.
- Various bug fixes and improvements to the netmap memory allocator,
including support for externally (userspace) allocated memory.
- Refactoring of netmap pipes: now linked rings share the same netmap
buffers, with a separate set of kring pointers (rhead, rcur, rtail).
Buffer swapping does not need to happen anymore.
- Large refactoring of the control API towards an extensible solution;
the goal is to allow the addition of more commands and extension of
existing ones (with new options) without the need of hacks or the
risk of running out of configuration space.
A new NIOCCTRL ioctl has been added to handle all the requests of the
new control API, which cover all the functionalities so far supported.
The netmap API bumps from 11 to 12 with this patch. Full backward
compatibility is provided for the old control command (NIOCREGIF), by
means of a new netmap_legacy module. Many parts of the old netmap.h
header has now been moved to netmap_legacy.h (included by netmap.h).
Approved by: hrs (mentor)
2018-04-12 07:20:50 +00:00
|
|
|
struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
|
2016-05-18 04:35:58 +00:00
|
|
|
struct netmap_slot *slot;
|
2017-10-30 21:14:31 +00:00
|
|
|
uint32_t nm_i;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
|
2017-02-22 02:35:59 +00:00
|
|
|
if (slot == NULL)
|
2016-05-18 04:35:58 +00:00
|
|
|
return;
|
2017-10-30 21:14:31 +00:00
|
|
|
nm_i = netmap_idx_n2k(kring, 0);
|
|
|
|
netmap_fl_refill(rxq, kring, nm_i, true);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define iflib_netmap_detach(ifp) netmap_detach(ifp)
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define iflib_netmap_txq_init(ctx, txq)
|
|
|
|
#define iflib_netmap_rxq_init(ctx, rxq)
|
|
|
|
#define iflib_netmap_detach(ifp)
|
|
|
|
|
|
|
|
#define iflib_netmap_attach(ctx) (0)
|
|
|
|
#define netmap_rx_irq(ifp, qid, budget) (0)
|
2017-03-13 22:53:06 +00:00
|
|
|
#define netmap_tx_irq(ifp, qid) do {} while (0)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
static __inline void
|
|
|
|
prefetch(void *x)
|
|
|
|
{
|
|
|
|
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
|
|
|
|
}
|
2017-10-23 20:50:08 +00:00
|
|
|
static __inline void
|
|
|
|
prefetch2cachelines(void *x)
|
|
|
|
{
|
|
|
|
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
|
|
|
|
#if (CACHE_LINE_SIZE < 128)
|
|
|
|
__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
|
|
|
|
#endif
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
#else
|
|
|
|
#define prefetch(x)
|
2017-10-23 20:50:08 +00:00
|
|
|
#define prefetch2cachelines(x)
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
static void
|
|
|
|
iflib_gen_mac(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
MD5_CTX mdctx;
|
|
|
|
char uuid[HOSTUUIDLEN+1];
|
|
|
|
char buf[HOSTUUIDLEN+16];
|
|
|
|
uint8_t *mac;
|
|
|
|
unsigned char digest[16];
|
|
|
|
|
|
|
|
td = curthread;
|
|
|
|
mac = ctx->ifc_mac;
|
|
|
|
uuid[HOSTUUIDLEN] = 0;
|
|
|
|
bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN);
|
|
|
|
snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
|
|
|
|
/*
|
|
|
|
* Generate a pseudo-random, deterministic MAC
|
|
|
|
* address based on the UUID and unit number.
|
|
|
|
* The FreeBSD Foundation OUI of 58-9C-FC is used.
|
|
|
|
*/
|
|
|
|
MD5Init(&mdctx);
|
|
|
|
MD5Update(&mdctx, buf, strlen(buf));
|
|
|
|
MD5Final(digest, &mdctx);
|
|
|
|
|
|
|
|
mac[0] = 0x58;
|
|
|
|
mac[1] = 0x9C;
|
|
|
|
mac[2] = 0xFC;
|
|
|
|
mac[3] = digest[0];
|
|
|
|
mac[4] = digest[1];
|
|
|
|
mac[5] = digest[2];
|
|
|
|
}
|
|
|
|
|
2017-10-31 02:49:28 +00:00
|
|
|
static void
|
|
|
|
iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
|
|
|
|
{
|
|
|
|
iflib_fl_t fl;
|
|
|
|
|
|
|
|
fl = &rxq->ifr_fl[flid];
|
|
|
|
iru->iru_paddrs = fl->ifl_bus_addrs;
|
|
|
|
iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
|
|
|
|
iru->iru_idxs = fl->ifl_rxd_idxs;
|
|
|
|
iru->iru_qsidx = rxq->ifr_id;
|
|
|
|
iru->iru_buf_size = fl->ifl_buf_size;
|
|
|
|
iru->iru_flidx = fl->ifl_id;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
|
|
|
_iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
|
|
|
|
{
|
|
|
|
if (err)
|
|
|
|
return;
|
|
|
|
*(bus_addr_t *) arg = segs[0].ds_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
|
|
|
|
KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
|
|
|
|
|
|
|
|
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
|
|
|
|
sctx->isc_q_align, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
size, /* maxsize */
|
|
|
|
1, /* nsegments */
|
|
|
|
size, /* maxsegsize */
|
|
|
|
BUS_DMA_ALLOCNOW, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockarg */
|
|
|
|
&dma->idi_tag);
|
|
|
|
if (err) {
|
|
|
|
device_printf(dev,
|
|
|
|
"%s: bus_dma_tag_create failed: %d\n",
|
|
|
|
__func__, err);
|
|
|
|
goto fail_0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
|
|
|
|
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
|
|
|
|
if (err) {
|
|
|
|
device_printf(dev,
|
|
|
|
"%s: bus_dmamem_alloc(%ju) failed: %d\n",
|
|
|
|
__func__, (uintmax_t)size, err);
|
|
|
|
goto fail_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma->idi_paddr = IF_BAD_DMA;
|
|
|
|
err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
|
|
|
|
size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
|
|
|
|
if (err || dma->idi_paddr == IF_BAD_DMA) {
|
|
|
|
device_printf(dev,
|
|
|
|
"%s: bus_dmamap_load failed: %d\n",
|
|
|
|
__func__, err);
|
|
|
|
goto fail_2;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma->idi_size = size;
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail_2:
|
|
|
|
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
|
|
|
|
fail_1:
|
|
|
|
bus_dma_tag_destroy(dma->idi_tag);
|
|
|
|
fail_0:
|
|
|
|
dma->idi_tag = NULL;
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
|
|
|
|
{
|
|
|
|
int i, err;
|
|
|
|
iflib_dma_info_t *dmaiter;
|
|
|
|
|
|
|
|
dmaiter = dmalist;
|
|
|
|
for (i = 0; i < count; i++, dmaiter++) {
|
|
|
|
if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
iflib_dma_free_multi(dmalist, i);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_dma_free(iflib_dma_info_t dma)
|
|
|
|
{
|
|
|
|
if (dma->idi_tag == NULL)
|
|
|
|
return;
|
|
|
|
if (dma->idi_paddr != IF_BAD_DMA) {
|
|
|
|
bus_dmamap_sync(dma->idi_tag, dma->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(dma->idi_tag, dma->idi_map);
|
|
|
|
dma->idi_paddr = IF_BAD_DMA;
|
|
|
|
}
|
|
|
|
if (dma->idi_vaddr != NULL) {
|
|
|
|
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
|
|
|
|
dma->idi_vaddr = NULL;
|
|
|
|
}
|
|
|
|
bus_dma_tag_destroy(dma->idi_tag);
|
|
|
|
dma->idi_tag = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
iflib_dma_info_t *dmaiter = dmalist;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++, dmaiter++)
|
|
|
|
iflib_dma_free(*dmaiter);
|
|
|
|
}
|
|
|
|
|
2017-01-24 16:05:42 +00:00
|
|
|
#ifdef EARLY_AP_STARTUP
|
|
|
|
static const int iflib_started = 1;
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* We used to abuse the smp_started flag to decide if the queues have been
|
|
|
|
* fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
|
|
|
|
* That gave bad races, since the SYSINIT() runs strictly after smp_started
|
|
|
|
* is set. Run a SYSINIT() strictly after that to just set a usable
|
|
|
|
* completion flag.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int iflib_started;
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_record_started(void *arg)
|
|
|
|
{
|
|
|
|
iflib_started = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
|
|
|
|
iflib_record_started, NULL);
|
|
|
|
#endif
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_fast_intr(void *arg)
|
2017-03-13 22:53:06 +00:00
|
|
|
{
|
|
|
|
iflib_filter_info_t info = arg;
|
|
|
|
struct grouptask *gtask = info->ifi_task;
|
|
|
|
if (!iflib_started)
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
|
|
|
|
DBG_COUNTER_INC(fast_intrs);
|
|
|
|
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(gtask);
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_fast_intr_rxtx(void *arg)
|
|
|
|
{
|
|
|
|
iflib_filter_info_t info = arg;
|
|
|
|
struct grouptask *gtask = info->ifi_task;
|
|
|
|
iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
|
2018-05-04 18:57:05 +00:00
|
|
|
if_ctx_t ctx = NULL;;
|
2017-03-13 22:53:06 +00:00
|
|
|
int i, cidx;
|
|
|
|
|
|
|
|
if (!iflib_started)
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
|
|
|
|
DBG_COUNTER_INC(fast_intrs);
|
|
|
|
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
|
2018-05-04 18:57:05 +00:00
|
|
|
MPASS(rxq->ifr_ntxqirq);
|
2017-03-13 22:53:06 +00:00
|
|
|
for (i = 0; i < rxq->ifr_ntxqirq; i++) {
|
|
|
|
qidx_t txqid = rxq->ifr_txqid[i];
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
ctx = rxq->ifr_ctx;
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) {
|
|
|
|
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
|
|
|
|
}
|
|
|
|
if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
|
|
|
|
cidx = rxq->ifr_cq_cidx;
|
|
|
|
else
|
|
|
|
cidx = rxq->ifr_fl[0].ifl_cidx;
|
|
|
|
if (iflib_rxd_avail(ctx, rxq, cidx, 1))
|
|
|
|
GROUPTASK_ENQUEUE(gtask);
|
|
|
|
else
|
|
|
|
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_fast_intr_ctx(void *arg)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_filter_info_t info = arg;
|
|
|
|
struct grouptask *gtask = info->ifi_task;
|
|
|
|
|
2017-01-24 16:05:42 +00:00
|
|
|
if (!iflib_started)
|
2017-01-02 00:56:33 +00:00
|
|
|
return (FILTER_HANDLED);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(fast_intrs);
|
|
|
|
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(gtask);
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
_iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
|
2018-05-29 21:56:39 +00:00
|
|
|
driver_filter_t filter, driver_intr_t handler, void *arg,
|
|
|
|
const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-30 16:54:01 +00:00
|
|
|
int rc, flags;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct resource *res;
|
2017-03-30 16:54:01 +00:00
|
|
|
void *tag = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
|
2017-03-30 16:54:01 +00:00
|
|
|
flags = RF_ACTIVE;
|
|
|
|
if (ctx->ifc_flags & IFC_LEGACY)
|
|
|
|
flags |= RF_SHAREABLE;
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(rid < 512);
|
|
|
|
irq->ii_rid = rid;
|
2017-03-30 16:54:01 +00:00
|
|
|
res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (res == NULL) {
|
|
|
|
device_printf(dev,
|
|
|
|
"failed to allocate IRQ for rid %d, name %s.\n", rid, name);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
irq->ii_res = res;
|
|
|
|
KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
|
|
|
|
rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
|
|
|
|
filter, handler, arg, &tag);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(dev,
|
|
|
|
"failed to setup interrupt for rid %d, name %s: %d\n",
|
|
|
|
rid, name ? name : "unknown", rc);
|
|
|
|
return (rc);
|
|
|
|
} else if (name)
|
2016-08-04 18:29:16 +00:00
|
|
|
bus_describe_intr(dev, res, tag, "%s", name);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
irq->ii_tag = tag;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Allocate memory for tx_buffer structures. The tx_buffer stores all
|
|
|
|
* the information needed to transmit a packet on the wire. This is
|
|
|
|
* called only once at attach, setup is done every reset.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_txsd_alloc(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
int err, nsegments, ntsosegments;
|
|
|
|
|
|
|
|
nsegments = scctx->isc_tx_nsegments;
|
|
|
|
ntsosegments = scctx->isc_tx_tso_segments_max;
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(scctx->isc_ntxd[0] > 0);
|
|
|
|
MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(nsegments > 0);
|
|
|
|
MPASS(ntsosegments > 0);
|
|
|
|
/*
|
|
|
|
* Setup DMA descriptor areas.
|
|
|
|
*/
|
|
|
|
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
|
|
|
|
1, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
sctx->isc_tx_maxsize, /* maxsize */
|
|
|
|
nsegments, /* nsegments */
|
|
|
|
sctx->isc_tx_maxsegsize, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockfuncarg */
|
|
|
|
&txq->ift_desc_tag))) {
|
|
|
|
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
|
2017-07-20 20:28:31 +00:00
|
|
|
device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
|
|
|
|
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
|
|
|
|
1, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
scctx->isc_tx_tso_size_max, /* maxsize */
|
|
|
|
ntsosegments, /* nsegments */
|
|
|
|
scctx->isc_tx_tso_segsize_max, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockfuncarg */
|
|
|
|
&txq->ift_tso_desc_tag))) {
|
|
|
|
device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
|
|
|
|
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (!(txq->ift_sds.ifsd_flags =
|
2018-01-21 15:42:36 +00:00
|
|
|
(uint8_t *) malloc(sizeof(uint8_t) *
|
|
|
|
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (!(txq->ift_sds.ifsd_m =
|
2018-01-21 15:42:36 +00:00
|
|
|
(struct mbuf **) malloc(sizeof(struct mbuf *) *
|
|
|
|
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create the descriptor buffer dma maps */
|
2017-03-13 22:53:06 +00:00
|
|
|
#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((ctx->ifc_flags & IFC_DMAR) == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
if (!(txq->ift_sds.ifsd_map =
|
2018-01-21 15:42:36 +00:00
|
|
|
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
|
|
|
|
if (err != 0) {
|
|
|
|
device_printf(dev, "Unable to create TX DMA map\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return (0);
|
|
|
|
fail:
|
|
|
|
/* We free all, it handles case where we are in the middle */
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
|
|
|
|
{
|
|
|
|
bus_dmamap_t map;
|
|
|
|
|
|
|
|
map = NULL;
|
|
|
|
if (txq->ift_sds.ifsd_map != NULL)
|
|
|
|
map = txq->ift_sds.ifsd_map[i];
|
|
|
|
if (map != NULL) {
|
|
|
|
bus_dmamap_unload(txq->ift_desc_tag, map);
|
|
|
|
bus_dmamap_destroy(txq->ift_desc_tag, map);
|
|
|
|
txq->ift_sds.ifsd_map[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txq_destroy(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < txq->ift_size; i++)
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txsd_destroy(ctx, txq, i);
|
|
|
|
if (txq->ift_sds.ifsd_map != NULL) {
|
|
|
|
free(txq->ift_sds.ifsd_map, M_IFLIB);
|
|
|
|
txq->ift_sds.ifsd_map = NULL;
|
|
|
|
}
|
|
|
|
if (txq->ift_sds.ifsd_m != NULL) {
|
|
|
|
free(txq->ift_sds.ifsd_m, M_IFLIB);
|
|
|
|
txq->ift_sds.ifsd_m = NULL;
|
|
|
|
}
|
|
|
|
if (txq->ift_sds.ifsd_flags != NULL) {
|
|
|
|
free(txq->ift_sds.ifsd_flags, M_IFLIB);
|
|
|
|
txq->ift_sds.ifsd_flags = NULL;
|
|
|
|
}
|
|
|
|
if (txq->ift_desc_tag != NULL) {
|
|
|
|
bus_dma_tag_destroy(txq->ift_desc_tag);
|
|
|
|
txq->ift_desc_tag = NULL;
|
|
|
|
}
|
|
|
|
if (txq->ift_tso_desc_tag != NULL) {
|
|
|
|
bus_dma_tag_destroy(txq->ift_tso_desc_tag);
|
|
|
|
txq->ift_tso_desc_tag = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
|
|
|
|
{
|
|
|
|
struct mbuf **mp;
|
|
|
|
|
|
|
|
mp = &txq->ift_sds.ifsd_m[i];
|
|
|
|
if (*mp == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (txq->ift_sds.ifsd_map != NULL) {
|
|
|
|
bus_dmamap_sync(txq->ift_desc_tag,
|
|
|
|
txq->ift_sds.ifsd_map[i],
|
|
|
|
BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(txq->ift_desc_tag,
|
|
|
|
txq->ift_sds.ifsd_map[i]);
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
m_free(*mp);
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
*mp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_txq_setup(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_info_t di;
|
|
|
|
int i;
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
/* Set number of descriptors available */
|
2016-05-18 04:35:58 +00:00
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX make configurable */
|
|
|
|
txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Reset indices */
|
2017-03-13 22:53:06 +00:00
|
|
|
txq->ift_cidx_processed = 0;
|
|
|
|
txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
|
|
|
|
bzero((void *)di->idi_vaddr, di->idi_size);
|
|
|
|
|
|
|
|
IFDI_TXQ_SETUP(ctx, txq->ift_id);
|
|
|
|
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
|
|
|
|
bus_dmamap_sync(di->idi_tag, di->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Allocate memory for rx_buffer structures. Since we use one
|
|
|
|
* rx_buffer per received packet, the maximum number of rx_buffer's
|
|
|
|
* that we'll need is equal to the number of receive descriptors
|
|
|
|
* that we've allocated.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
iflib_rxsd_alloc(iflib_rxq_t rxq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
int err;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(scctx->isc_nrxd[0] > 0);
|
|
|
|
MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
fl = rxq->ifr_fl;
|
|
|
|
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
|
2016-08-12 21:29:44 +00:00
|
|
|
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
|
2016-05-18 04:35:58 +00:00
|
|
|
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
|
|
|
|
1, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
sctx->isc_rx_maxsize, /* maxsize */
|
|
|
|
sctx->isc_rx_nsegments, /* nsegments */
|
|
|
|
sctx->isc_rx_maxsegsize, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockarg */
|
|
|
|
&fl->ifl_desc_tag);
|
|
|
|
if (err) {
|
|
|
|
device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
|
|
|
|
__func__, err);
|
|
|
|
goto fail;
|
|
|
|
}
|
2017-01-27 23:08:06 +00:00
|
|
|
if (!(fl->ifl_sds.ifsd_flags =
|
2018-01-21 15:42:36 +00:00
|
|
|
(uint8_t *) malloc(sizeof(uint8_t) *
|
|
|
|
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2017-01-27 23:08:06 +00:00
|
|
|
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (!(fl->ifl_sds.ifsd_m =
|
2018-01-21 15:42:36 +00:00
|
|
|
(struct mbuf **) malloc(sizeof(struct mbuf *) *
|
|
|
|
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2017-01-27 23:08:06 +00:00
|
|
|
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (!(fl->ifl_sds.ifsd_cl =
|
2018-01-21 15:42:36 +00:00
|
|
|
(caddr_t *) malloc(sizeof(caddr_t) *
|
|
|
|
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2017-01-27 23:08:06 +00:00
|
|
|
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create the descriptor buffer dma maps */
|
2017-03-13 22:53:06 +00:00
|
|
|
#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__)))
|
2017-01-27 23:08:06 +00:00
|
|
|
if ((ctx->ifc_flags & IFC_DMAR) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!(fl->ifl_sds.ifsd_map =
|
2018-01-21 15:42:36 +00:00
|
|
|
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2017-01-27 23:08:06 +00:00
|
|
|
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
|
|
|
|
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
|
|
|
|
if (err != 0) {
|
2017-03-13 22:53:06 +00:00
|
|
|
device_printf(dev, "Unable to create RX buffer DMA map\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
2017-01-27 23:08:06 +00:00
|
|
|
#endif
|
2017-01-28 15:44:14 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
iflib_rx_structures_free(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal service routines
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct rxq_refill_cb_arg {
|
|
|
|
int error;
|
|
|
|
bus_dma_segment_t seg;
|
|
|
|
int nseg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
_rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
|
|
|
|
{
|
|
|
|
struct rxq_refill_cb_arg *cb_arg = arg;
|
|
|
|
|
|
|
|
cb_arg->error = error;
|
|
|
|
cb_arg->seg = segs[0];
|
|
|
|
cb_arg->nseg = nseg;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef ACPI_DMAR
|
|
|
|
#define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR)
|
|
|
|
#else
|
|
|
|
#define IS_DMAR(ctx) (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rxq_refill - refill an rxq free-buffer list
|
|
|
|
* @ctx: the iflib context
|
|
|
|
* @rxq: the free-list to refill
|
|
|
|
* @n: the number of new buffers to allocate
|
|
|
|
*
|
|
|
|
* (Re)populate an rxq free-buffer list with up to @n new packet buffers.
|
|
|
|
* The caller must assure that @n does not exceed the queue's capacity.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
|
|
|
|
{
|
|
|
|
struct mbuf *m;
|
2017-07-03 18:23:35 +00:00
|
|
|
int idx, frag_idx = fl->ifl_fragidx;
|
|
|
|
int pidx = fl->ifl_pidx;
|
2017-01-27 23:08:06 +00:00
|
|
|
caddr_t cl, *sd_cl;
|
|
|
|
struct mbuf **sd_m;
|
|
|
|
uint8_t *sd_flags;
|
2017-03-13 22:53:06 +00:00
|
|
|
struct if_rxd_update iru;
|
2017-01-27 23:08:06 +00:00
|
|
|
bus_dmamap_t *sd_map;
|
2016-05-18 04:35:58 +00:00
|
|
|
int n, i = 0;
|
|
|
|
uint64_t bus_addr;
|
|
|
|
int err;
|
2017-10-31 17:50:42 +00:00
|
|
|
qidx_t credits;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
sd_m = fl->ifl_sds.ifsd_m;
|
|
|
|
sd_map = fl->ifl_sds.ifsd_map;
|
|
|
|
sd_cl = fl->ifl_sds.ifsd_cl;
|
|
|
|
sd_flags = fl->ifl_sds.ifsd_flags;
|
|
|
|
idx = pidx;
|
2017-10-31 17:50:42 +00:00
|
|
|
credits = fl->ifl_credits;
|
2017-01-27 23:08:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
n = count;
|
|
|
|
MPASS(n > 0);
|
2017-10-31 17:50:42 +00:00
|
|
|
MPASS(credits + n <= fl->ifl_size);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (pidx < fl->ifl_cidx)
|
|
|
|
MPASS(pidx + n <= fl->ifl_cidx);
|
2017-10-31 17:50:42 +00:00
|
|
|
if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(fl->ifl_gen == 0);
|
|
|
|
if (pidx > fl->ifl_cidx)
|
|
|
|
MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
|
|
|
|
|
|
|
|
DBG_COUNTER_INC(fl_refills);
|
|
|
|
if (n > 8)
|
|
|
|
DBG_COUNTER_INC(fl_refills_large);
|
2017-10-30 21:14:31 +00:00
|
|
|
iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
|
2016-05-18 04:35:58 +00:00
|
|
|
while (n--) {
|
|
|
|
/*
|
|
|
|
* We allocate an uninitialized mbuf + cluster, mbuf is
|
|
|
|
* initialized after rx.
|
|
|
|
*
|
|
|
|
* If the cluster is still set then we know a minimum sized packet was received
|
|
|
|
*/
|
2017-07-03 18:23:35 +00:00
|
|
|
bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx);
|
|
|
|
if ((frag_idx < 0) || (frag_idx >= fl->ifl_size))
|
|
|
|
bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
|
|
|
|
if ((cl = sd_cl[frag_idx]) == NULL) {
|
|
|
|
if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
fl->ifl_cl_enqueued++;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
fl->ifl_m_enqueued++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
DBG_COUNTER_INC(rx_allocs);
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
if (!IS_DMAR(ctx)) {
|
|
|
|
bus_addr = pmap_kextract((vm_offset_t)cl);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
struct rxq_refill_cb_arg cb_arg;
|
|
|
|
|
|
|
|
cb_arg.error = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
MPASS(sd_map != NULL);
|
2017-07-03 18:23:35 +00:00
|
|
|
MPASS(sd_map[frag_idx] != NULL);
|
|
|
|
err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
|
2016-05-18 04:35:58 +00:00
|
|
|
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
|
2017-07-03 18:23:35 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
|
|
|
|
BUS_DMASYNC_PREREAD);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (err != 0 || cb_arg.error) {
|
|
|
|
/*
|
|
|
|
* !zone_pack ?
|
|
|
|
*/
|
|
|
|
if (fl->ifl_zone == zone_pack)
|
|
|
|
uma_zfree(fl->ifl_zone, cl);
|
|
|
|
m_free(m);
|
|
|
|
n = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
bus_addr = cb_arg.seg.ds_addr;
|
|
|
|
}
|
2017-07-03 18:23:35 +00:00
|
|
|
bit_set(fl->ifl_rx_bitmap, frag_idx);
|
|
|
|
sd_flags[frag_idx] |= RX_SW_DESC_INUSE;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-07-03 18:23:35 +00:00
|
|
|
MPASS(sd_m[frag_idx] == NULL);
|
|
|
|
sd_cl[frag_idx] = cl;
|
|
|
|
sd_m[frag_idx] = m;
|
|
|
|
fl->ifl_rxd_idxs[i] = frag_idx;
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_bus_addrs[i] = bus_addr;
|
|
|
|
fl->ifl_vm_addrs[i] = cl;
|
2017-10-31 17:50:42 +00:00
|
|
|
credits++;
|
2016-05-18 04:35:58 +00:00
|
|
|
i++;
|
2017-10-31 17:50:42 +00:00
|
|
|
MPASS(credits <= fl->ifl_size);
|
2017-01-27 23:08:06 +00:00
|
|
|
if (++idx == fl->ifl_size) {
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_gen = 1;
|
2017-01-27 23:08:06 +00:00
|
|
|
idx = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
|
2017-03-13 22:53:06 +00:00
|
|
|
iru.iru_pidx = pidx;
|
|
|
|
iru.iru_count = i;
|
|
|
|
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
|
2016-05-18 04:35:58 +00:00
|
|
|
i = 0;
|
2017-01-27 23:08:06 +00:00
|
|
|
pidx = idx;
|
2017-07-03 18:23:35 +00:00
|
|
|
fl->ifl_pidx = idx;
|
2017-10-31 17:50:42 +00:00
|
|
|
fl->ifl_credits = credits;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-01-27 23:08:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
done:
|
2017-10-31 17:50:42 +00:00
|
|
|
if (i) {
|
|
|
|
iru.iru_pidx = pidx;
|
|
|
|
iru.iru_count = i;
|
|
|
|
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
|
|
|
|
fl->ifl_pidx = idx;
|
|
|
|
fl->ifl_credits = credits;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(rxd_flush);
|
|
|
|
if (fl->ifl_pidx == 0)
|
|
|
|
pidx = fl->ifl_size - 1;
|
|
|
|
else
|
|
|
|
pidx = fl->ifl_pidx - 1;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
if (sd_map)
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
|
2017-07-03 18:23:35 +00:00
|
|
|
fl->ifl_fragidx = frag_idx;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
|
|
|
__iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
|
|
|
|
{
|
|
|
|
/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
|
|
|
|
int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
MPASS(fl->ifl_credits <= fl->ifl_size);
|
|
|
|
MPASS(reclaimable == delta);
|
|
|
|
|
|
|
|
if (reclaimable > 0)
|
|
|
|
_iflib_fl_refill(ctx, fl, min(max, reclaimable));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_fl_bufs_free(iflib_fl_t fl)
|
|
|
|
{
|
|
|
|
iflib_dma_info_t idi = fl->ifl_ifdi;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < fl->ifl_size; i++) {
|
2017-01-27 23:08:06 +00:00
|
|
|
struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
|
|
|
|
uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i];
|
|
|
|
caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
|
|
|
|
|
|
|
|
if (*sd_flags & RX_SW_DESC_INUSE) {
|
|
|
|
if (fl->ifl_sds.ifsd_map != NULL) {
|
|
|
|
bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
|
|
|
|
bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
|
2018-02-20 18:33:45 +00:00
|
|
|
if (fl->ifl_rxq->ifr_ctx->ifc_in_detach)
|
|
|
|
bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-01-27 23:08:06 +00:00
|
|
|
if (*sd_m != NULL) {
|
|
|
|
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
|
|
|
|
uma_zfree(zone_mbuf, *sd_m);
|
|
|
|
}
|
|
|
|
if (*sd_cl != NULL)
|
|
|
|
uma_zfree(fl->ifl_zone, *sd_cl);
|
|
|
|
*sd_flags = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
} else {
|
2017-01-27 23:08:06 +00:00
|
|
|
MPASS(*sd_cl == NULL);
|
|
|
|
MPASS(*sd_m == NULL);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
#if MEMORY_LOGGING
|
2017-09-16 02:41:38 +00:00
|
|
|
fl->ifl_m_dequeued++;
|
|
|
|
fl->ifl_cl_dequeued++;
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
2017-01-27 23:08:06 +00:00
|
|
|
*sd_cl = NULL;
|
|
|
|
*sd_m = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
for (i = 0; i < fl->ifl_size; i++) {
|
2017-09-16 02:41:38 +00:00
|
|
|
MPASS(fl->ifl_sds.ifsd_flags[i] == 0);
|
2017-03-13 22:53:06 +00:00
|
|
|
MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
|
|
|
|
MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
|
|
|
|
}
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Reset free list values
|
|
|
|
*/
|
2017-07-03 18:23:35 +00:00
|
|
|
fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero(idi->idi_vaddr, idi->idi_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize a receive ring and its buffers.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
iflib_fl_setup(iflib_fl_t fl)
|
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = fl->ifl_rxq;
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
|
|
|
|
2017-11-20 21:57:04 +00:00
|
|
|
bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
** Free current RX buffer structs and their mbufs
|
|
|
|
*/
|
|
|
|
iflib_fl_bufs_free(fl);
|
|
|
|
/* Now replenish the mbufs */
|
|
|
|
MPASS(fl->ifl_credits == 0);
|
|
|
|
/*
|
|
|
|
* XXX don't set the max_frame_size to larger
|
|
|
|
* than the hardware can handle
|
|
|
|
*/
|
|
|
|
if (sctx->isc_max_frame_size <= 2048)
|
|
|
|
fl->ifl_buf_size = MCLBYTES;
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifndef CONTIGMALLOC_WORKS
|
|
|
|
else
|
|
|
|
fl->ifl_buf_size = MJUMPAGESIZE;
|
|
|
|
#else
|
2016-05-18 04:35:58 +00:00
|
|
|
else if (sctx->isc_max_frame_size <= 4096)
|
|
|
|
fl->ifl_buf_size = MJUMPAGESIZE;
|
|
|
|
else if (sctx->isc_max_frame_size <= 9216)
|
|
|
|
fl->ifl_buf_size = MJUM9BYTES;
|
|
|
|
else
|
|
|
|
fl->ifl_buf_size = MJUM16BYTES;
|
2017-03-13 22:53:06 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
|
|
|
|
ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
|
|
|
|
fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
|
|
|
|
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
|
|
|
|
|
|
|
|
|
|
|
|
/* avoid pre-allocating zillions of clusters to an idle card
|
|
|
|
* potentially speeding up attach
|
|
|
|
*/
|
|
|
|
_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
|
|
|
|
MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
|
|
|
|
if (min(128, fl->ifl_size) != fl->ifl_credits)
|
|
|
|
return (ENOBUFS);
|
|
|
|
/*
|
|
|
|
* handle failure
|
|
|
|
*/
|
|
|
|
MPASS(rxq != NULL);
|
|
|
|
MPASS(fl->ifl_ifdi != NULL);
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free receive ring data structures
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
iflib_rx_sds_free(iflib_rxq_t rxq)
|
|
|
|
{
|
|
|
|
iflib_fl_t fl;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (rxq->ifr_fl != NULL) {
|
|
|
|
for (i = 0; i < rxq->ifr_nfl; i++) {
|
|
|
|
fl = &rxq->ifr_fl[i];
|
|
|
|
if (fl->ifl_desc_tag != NULL) {
|
|
|
|
bus_dma_tag_destroy(fl->ifl_desc_tag);
|
|
|
|
fl->ifl_desc_tag = NULL;
|
|
|
|
}
|
2017-01-27 23:08:06 +00:00
|
|
|
free(fl->ifl_sds.ifsd_m, M_IFLIB);
|
|
|
|
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
|
|
|
|
/* XXX destroy maps first */
|
|
|
|
free(fl->ifl_sds.ifsd_map, M_IFLIB);
|
|
|
|
fl->ifl_sds.ifsd_m = NULL;
|
|
|
|
fl->ifl_sds.ifsd_cl = NULL;
|
|
|
|
fl->ifl_sds.ifsd_map = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
free(rxq->ifr_fl, M_IFLIB);
|
|
|
|
rxq->ifr_fl = NULL;
|
|
|
|
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MI independent logic
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
iflib_timer(void *arg)
|
|
|
|
{
|
2017-09-16 02:41:38 +00:00
|
|
|
iflib_txq_t txq = arg;
|
2016-05-18 04:35:58 +00:00
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2017-09-16 02:41:38 +00:00
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
** Check on the state of the TX queue(s), this
|
|
|
|
** can be done without the lock because its RO
|
|
|
|
** and the HUNG state will be static if set.
|
|
|
|
*/
|
2017-09-16 02:41:38 +00:00
|
|
|
IFDI_TIMER(ctx, txq->ift_id);
|
|
|
|
if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
|
|
|
|
((txq->ift_cleaned_prev == txq->ift_cleaned) ||
|
|
|
|
(sctx->isc_pause_frames == 0)))
|
|
|
|
goto hung;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
if (ifmp_ring_is_stalled(txq->ift_br))
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_HUNG;
|
|
|
|
txq->ift_cleaned_prev = txq->ift_cleaned;
|
|
|
|
/* handle any laggards */
|
|
|
|
if (txq->ift_db_pending)
|
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
sctx->isc_pause_frames = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
|
2017-09-16 02:41:38 +00:00
|
|
|
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
|
|
|
|
return;
|
2018-04-12 14:35:37 +00:00
|
|
|
hung:
|
2017-09-16 02:41:38 +00:00
|
|
|
device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n",
|
|
|
|
txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
|
|
|
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
|
|
|
|
ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
|
2017-09-16 02:41:38 +00:00
|
|
|
iflib_admin_intr_deferred(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_init_locked(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
2017-01-02 00:56:33 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
2017-09-16 02:41:38 +00:00
|
|
|
int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
|
|
|
|
tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Set hardware offload abilities */
|
|
|
|
if_clearhwassist(ifp);
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
|
2017-01-02 00:56:33 +00:00
|
|
|
if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
|
2017-01-02 00:56:33 +00:00
|
|
|
if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_TSO4)
|
|
|
|
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_TSO6)
|
|
|
|
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
|
|
|
|
|
|
|
|
for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
iflib_netmap_txq_init(ctx, txq);
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
i = if_getdrvflags(ifp);
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_INIT(ctx);
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(if_getdrvflags(ifp) == i);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX this should really be done on a per-queue basis */
|
2017-09-20 20:40:49 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
|
|
|
|
MPASS(rxq->ifr_id == i);
|
|
|
|
iflib_netmap_rxq_init(ctx, rxq);
|
2017-03-13 22:53:06 +00:00
|
|
|
continue;
|
2017-09-20 20:40:49 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
|
|
|
|
if (iflib_fl_setup(fl)) {
|
|
|
|
device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
txq = ctx->ifc_txqs;
|
|
|
|
for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
|
2017-09-16 02:41:38 +00:00
|
|
|
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
|
|
|
|
txq->ift_timer.c_cpu);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_media_change(if_t ifp)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
|
|
|
|
iflib_init_locked(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
IFDI_UPDATE_ADMIN_STATUS(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_MEDIA_STATUS(ctx, ifmr);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
void
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_stop(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
iflib_dma_info_t di;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
/* Tell the stack that the interface is no longer active */
|
|
|
|
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
|
|
|
|
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
DELAY(1000);
|
2016-11-18 04:19:21 +00:00
|
|
|
IFDI_STOP(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
DELAY(1000);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2016-11-18 04:19:21 +00:00
|
|
|
iflib_debug_reset();
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Wait for current tx queue users to exit to disarm watchdog timer. */
|
|
|
|
for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
/* make sure all transmitters have completed before proceeding XXX */
|
|
|
|
|
2018-03-02 18:48:07 +00:00
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/* clean any enqueued buffers */
|
2016-11-18 04:19:21 +00:00
|
|
|
iflib_ifmp_purge(txq);
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Free any existing tx buffers. */
|
2016-08-12 21:29:44 +00:00
|
|
|
for (j = 0; j < txq->ift_size; j++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txsd_free(ctx, txq, j);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
|
|
|
|
txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
|
|
|
|
txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
|
2017-09-16 02:41:38 +00:00
|
|
|
txq->ift_pullups = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
ifmp_ring_reset_stats(txq->ift_br);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
|
|
|
|
bzero((void *)di->idi_vaddr, di->idi_size);
|
|
|
|
}
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
|
|
|
|
/* make sure all transmitters have completed before proceeding XXX */
|
|
|
|
|
2018-04-12 04:11:37 +00:00
|
|
|
for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++)
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero((void *)di->idi_vaddr, di->idi_size);
|
|
|
|
/* also resets the free lists pidx/cidx */
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
|
|
|
iflib_fl_bufs_free(fl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline caddr_t
|
|
|
|
calc_next_rxd(iflib_fl_t fl, int cidx)
|
|
|
|
{
|
|
|
|
qidx_t size;
|
|
|
|
int nrxd;
|
|
|
|
caddr_t start, end, cur, next;
|
|
|
|
|
|
|
|
nrxd = fl->ifl_size;
|
|
|
|
size = fl->ifl_rxd_size;
|
|
|
|
start = fl->ifl_ifdi->idi_vaddr;
|
|
|
|
|
|
|
|
if (__predict_false(size == 0))
|
|
|
|
return (start);
|
|
|
|
cur = start + size*cidx;
|
|
|
|
end = start + size*nrxd;
|
|
|
|
next = CACHE_PTR_NEXT(cur);
|
|
|
|
return (next < end ? next : start);
|
|
|
|
}
|
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
static inline void
|
|
|
|
prefetch_pkts(iflib_fl_t fl, int cidx)
|
|
|
|
{
|
|
|
|
int nextptr;
|
|
|
|
int nrxd = fl->ifl_size;
|
2017-03-13 22:53:06 +00:00
|
|
|
caddr_t next_rxd;
|
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
|
|
|
|
nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
|
|
|
|
prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
|
|
|
|
prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
|
2017-03-13 22:53:06 +00:00
|
|
|
next_rxd = calc_next_rxd(fl, cidx);
|
|
|
|
prefetch(next_rxd);
|
2017-01-27 23:08:06 +00:00
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
int flid, cidx;
|
2017-01-27 23:08:06 +00:00
|
|
|
bus_dmamap_t map;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_fl_t fl;
|
|
|
|
iflib_dma_info_t di;
|
2017-01-27 23:08:06 +00:00
|
|
|
int next;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
map = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
flid = irf->irf_flid;
|
|
|
|
cidx = irf->irf_idx;
|
|
|
|
fl = &rxq->ifr_fl[flid];
|
2017-03-13 22:53:06 +00:00
|
|
|
sd->ifsd_fl = fl;
|
|
|
|
sd->ifsd_cidx = cidx;
|
|
|
|
sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
|
|
|
|
sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_credits--;
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
fl->ifl_m_dequeued++;
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
|
|
|
|
prefetch_pkts(fl, cidx);
|
2017-01-27 23:08:06 +00:00
|
|
|
if (fl->ifl_sds.ifsd_map != NULL) {
|
|
|
|
next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
|
|
|
|
prefetch(&fl->ifl_sds.ifsd_map[next]);
|
|
|
|
map = fl->ifl_sds.ifsd_map[cidx];
|
|
|
|
di = fl->ifl_ifdi;
|
|
|
|
next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
|
|
|
|
prefetch(&fl->ifl_sds.ifsd_flags[next]);
|
|
|
|
bus_dmamap_sync(di->idi_tag, di->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* not valid assert if bxe really does SGE from non-contiguous elements */
|
2017-01-27 23:08:06 +00:00
|
|
|
MPASS(fl->ifl_cidx == cidx);
|
|
|
|
if (unload)
|
|
|
|
bus_dmamap_unload(fl->ifl_desc_tag, map);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
|
|
|
|
if (__predict_false(fl->ifl_cidx == 0))
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_gen = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
if (map != NULL)
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2017-07-03 18:23:35 +00:00
|
|
|
bit_clear(fl->ifl_rx_bitmap, cidx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
2017-03-13 22:53:06 +00:00
|
|
|
assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
int i, padlen , flags;
|
|
|
|
struct mbuf *m, *mh, *mt;
|
|
|
|
caddr_t cl;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
i = 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
mh = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
do {
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
MPASS(*sd->ifsd_cl != NULL);
|
|
|
|
MPASS(*sd->ifsd_m != NULL);
|
2016-08-12 21:29:44 +00:00
|
|
|
|
|
|
|
/* Don't include zero-length frags */
|
|
|
|
if (ri->iri_frags[i].irf_len == 0) {
|
|
|
|
/* XXX we can save the cluster here, but not the mbuf */
|
2017-03-13 22:53:06 +00:00
|
|
|
m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
|
|
|
|
m_free(*sd->ifsd_m);
|
|
|
|
*sd->ifsd_m = NULL;
|
2016-08-12 21:29:44 +00:00
|
|
|
continue;
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
m = *sd->ifsd_m;
|
|
|
|
*sd->ifsd_m = NULL;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (mh == NULL) {
|
2016-05-18 04:35:58 +00:00
|
|
|
flags = M_PKTHDR|M_EXT;
|
|
|
|
mh = mt = m;
|
|
|
|
padlen = ri->iri_pad;
|
|
|
|
} else {
|
|
|
|
flags = M_EXT;
|
|
|
|
mt->m_next = m;
|
|
|
|
mt = m;
|
|
|
|
/* assuming padding is only on the first fragment */
|
|
|
|
padlen = 0;
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
cl = *sd->ifsd_cl;
|
|
|
|
*sd->ifsd_cl = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Can these two be made one ? */
|
|
|
|
m_init(m, M_NOWAIT, MT_DATA, flags);
|
2017-03-13 22:53:06 +00:00
|
|
|
m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* These must follow m_init and m_cljset
|
|
|
|
*/
|
|
|
|
m->m_data += padlen;
|
|
|
|
ri->iri_len -= padlen;
|
2016-08-12 21:29:44 +00:00
|
|
|
m->m_len = ri->iri_frags[i].irf_len;
|
2016-05-18 04:35:58 +00:00
|
|
|
} while (++i < ri->iri_nfrags);
|
|
|
|
|
|
|
|
return (mh);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process one software descriptor
|
|
|
|
*/
|
|
|
|
static struct mbuf *
|
|
|
|
iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
|
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
struct if_rxsd sd;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
/* should I merge this back in now that the two paths are basically duplicated? */
|
2016-08-12 21:29:44 +00:00
|
|
|
if (ri->iri_nfrags == 1 &&
|
2018-03-25 23:23:19 +00:00
|
|
|
ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
|
|
|
|
m = *sd.ifsd_m;
|
|
|
|
*sd.ifsd_m = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
if (!IP_ALIGNED(m))
|
|
|
|
m->m_data += 2;
|
|
|
|
#endif
|
|
|
|
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
|
2016-08-12 21:29:44 +00:00
|
|
|
m->m_len = ri->iri_frags[0].irf_len;
|
2016-05-18 04:35:58 +00:00
|
|
|
} else {
|
2017-03-13 22:53:06 +00:00
|
|
|
m = assemble_segments(rxq, ri, &sd);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
m->m_pkthdr.len = ri->iri_len;
|
|
|
|
m->m_pkthdr.rcvif = ri->iri_ifp;
|
|
|
|
m->m_flags |= ri->iri_flags;
|
|
|
|
m->m_pkthdr.ether_vtag = ri->iri_vtag;
|
|
|
|
m->m_pkthdr.flowid = ri->iri_flowid;
|
|
|
|
M_HASHTYPE_SET(m, ri->iri_rsstype);
|
|
|
|
m->m_pkthdr.csum_flags = ri->iri_csum_flags;
|
|
|
|
m->m_pkthdr.csum_data = ri->iri_csum_data;
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2017-11-06 16:23:21 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2017-12-05 20:43:24 +00:00
|
|
|
static void
|
|
|
|
iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
|
|
|
|
{
|
|
|
|
CURVNET_SET(lc->ifp->if_vnet);
|
|
|
|
#if defined(INET6)
|
|
|
|
*v6 = VNET(ip6_forwarding);
|
|
|
|
#endif
|
|
|
|
#if defined(INET)
|
|
|
|
*v4 = VNET(ipforwarding);
|
|
|
|
#endif
|
|
|
|
CURVNET_RESTORE();
|
|
|
|
}
|
|
|
|
|
2017-11-06 16:23:21 +00:00
|
|
|
/*
|
|
|
|
* Returns true if it's possible this packet could be LROed.
|
|
|
|
* if it returns false, it is guaranteed that tcp_lro_rx()
|
|
|
|
* would not return zero.
|
|
|
|
*/
|
|
|
|
static bool
|
2017-12-05 20:43:24 +00:00
|
|
|
iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
|
2017-11-06 16:23:21 +00:00
|
|
|
{
|
|
|
|
struct ether_header *eh;
|
|
|
|
uint16_t eh_type;
|
|
|
|
|
|
|
|
eh = mtod(m, struct ether_header *);
|
|
|
|
eh_type = ntohs(eh->ether_type);
|
|
|
|
switch (eh_type) {
|
2017-11-06 19:54:25 +00:00
|
|
|
#if defined(INET6)
|
2017-11-06 16:23:21 +00:00
|
|
|
case ETHERTYPE_IPV6:
|
2017-12-05 20:43:24 +00:00
|
|
|
return !v6_forwarding;
|
2017-11-06 19:54:25 +00:00
|
|
|
#endif
|
|
|
|
#if defined (INET)
|
2017-11-06 16:23:21 +00:00
|
|
|
case ETHERTYPE_IP:
|
2017-12-05 20:43:24 +00:00
|
|
|
return !v4_forwarding;
|
2017-11-06 19:54:25 +00:00
|
|
|
#endif
|
2017-11-06 16:23:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2017-12-05 20:43:24 +00:00
|
|
|
#else
|
|
|
|
static void
|
|
|
|
iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
|
|
|
|
{
|
|
|
|
}
|
2017-11-06 16:23:21 +00:00
|
|
|
#endif
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static bool
|
2017-03-13 22:53:06 +00:00
|
|
|
iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
int avail, i;
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t *cidxp;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct if_rxd_info ri;
|
|
|
|
int err, budget_left, rx_bytes, rx_pkts;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int lro_enabled;
|
2018-05-19 19:00:04 +00:00
|
|
|
bool v4_forwarding, v6_forwarding, lro_possible;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* XXX early demux data packets so that if_input processing only handles
|
|
|
|
* acks in interrupt context
|
|
|
|
*/
|
2017-09-23 01:35:14 +00:00
|
|
|
struct mbuf *m, *mh, *mt, *mf;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-19 19:00:04 +00:00
|
|
|
lro_possible = v4_forwarding = v6_forwarding = false;
|
2017-03-13 22:53:06 +00:00
|
|
|
ifp = ctx->ifc_ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
mh = mt = NULL;
|
|
|
|
MPASS(budget > 0);
|
2017-09-16 02:41:38 +00:00
|
|
|
rx_pkts = rx_bytes = 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ)
|
2016-05-18 04:35:58 +00:00
|
|
|
cidxp = &rxq->ifr_cq_cidx;
|
|
|
|
else
|
|
|
|
cidxp = &rxq->ifr_fl[0].ifl_cidx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
|
|
|
|
__iflib_fl_refill_lt(ctx, fl, budget + 8);
|
|
|
|
DBG_COUNTER_INC(rx_unavail);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) {
|
|
|
|
if (__predict_false(!CTX_ACTIVE(ctx))) {
|
|
|
|
DBG_COUNTER_INC(rx_ctx_inactive);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Reset client set fields to their default values
|
|
|
|
*/
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_info_zero(&ri);
|
2016-05-18 04:35:58 +00:00
|
|
|
ri.iri_qsidx = rxq->ifr_id;
|
|
|
|
ri.iri_cidx = *cidxp;
|
2017-03-13 22:53:06 +00:00
|
|
|
ri.iri_ifp = ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
ri.iri_frags = rxq->ifr_frags;
|
|
|
|
err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
if (err)
|
|
|
|
goto err;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
|
|
|
|
*cidxp = ri.iri_cidx;
|
|
|
|
/* Update our consumer index */
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX NB: shurd - check if this is still safe */
|
2016-08-12 21:29:44 +00:00
|
|
|
while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
|
|
|
|
rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
|
2016-05-18 04:35:58 +00:00
|
|
|
rxq->ifr_cq_gen = 0;
|
|
|
|
}
|
|
|
|
/* was this only a completion queue message? */
|
|
|
|
if (__predict_false(ri.iri_nfrags == 0))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MPASS(ri.iri_nfrags != 0);
|
|
|
|
MPASS(ri.iri_len != 0);
|
|
|
|
|
|
|
|
/* will advance the cidx on the corresponding free lists */
|
|
|
|
m = iflib_rxd_pkt_get(rxq, &ri);
|
|
|
|
if (avail == 0 && budget_left)
|
2016-08-12 21:29:44 +00:00
|
|
|
avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (__predict_false(m == NULL)) {
|
|
|
|
DBG_COUNTER_INC(rx_mbuf_null);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* imm_pkt: -- cxgb */
|
|
|
|
if (mh == NULL)
|
|
|
|
mh = mt = m;
|
|
|
|
else {
|
|
|
|
mt->m_nextpkt = m;
|
|
|
|
mt = m;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* make sure that we can refill faster than drain */
|
|
|
|
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
|
2017-09-16 02:41:38 +00:00
|
|
|
__iflib_fl_refill_lt(ctx, fl, budget + 8);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
|
2017-12-05 20:43:24 +00:00
|
|
|
if (lro_enabled)
|
|
|
|
iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
|
2017-09-23 01:35:14 +00:00
|
|
|
mt = mf = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
while (mh != NULL) {
|
|
|
|
m = mh;
|
|
|
|
mh = mh->m_nextpkt;
|
|
|
|
m->m_nextpkt = NULL;
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
|
|
|
|
continue;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
rx_bytes += m->m_pkthdr.len;
|
|
|
|
rx_pkts++;
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2017-11-06 16:23:21 +00:00
|
|
|
if (lro_enabled) {
|
|
|
|
if (!lro_possible) {
|
2017-12-05 20:43:24 +00:00
|
|
|
lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
|
2017-11-06 16:23:21 +00:00
|
|
|
if (lro_possible && mf != NULL) {
|
|
|
|
ifp->if_input(ifp, mf);
|
|
|
|
DBG_COUNTER_INC(rx_if_input);
|
|
|
|
mt = mf = NULL;
|
|
|
|
}
|
|
|
|
}
|
2017-12-21 01:22:36 +00:00
|
|
|
if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
|
|
|
|
(CSUM_L4_CALC|CSUM_L4_VALID)) {
|
|
|
|
if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
|
2017-12-27 19:12:32 +00:00
|
|
|
continue;
|
2017-12-21 01:22:36 +00:00
|
|
|
}
|
2017-09-23 01:35:14 +00:00
|
|
|
}
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2017-11-06 16:23:21 +00:00
|
|
|
if (lro_possible) {
|
|
|
|
ifp->if_input(ifp, m);
|
|
|
|
DBG_COUNTER_INC(rx_if_input);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mf == NULL)
|
|
|
|
mf = m;
|
2017-09-23 01:35:14 +00:00
|
|
|
if (mt != NULL)
|
|
|
|
mt->m_nextpkt = m;
|
|
|
|
mt = m;
|
|
|
|
}
|
|
|
|
if (mf != NULL) {
|
|
|
|
ifp->if_input(ifp, mf);
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(rx_if_input);
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
|
|
|
|
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush any outstanding LRO work
|
|
|
|
*/
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2016-08-12 21:29:44 +00:00
|
|
|
tcp_lro_flush_all(&rxq->ifr_lc);
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2017-09-16 02:41:38 +00:00
|
|
|
if (avail)
|
|
|
|
return true;
|
|
|
|
return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
|
2017-03-13 22:53:06 +00:00
|
|
|
err:
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
ctx->ifc_flags |= IFC_DO_RESET;
|
|
|
|
iflib_admin_intr_deferred(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
|
|
|
|
static inline qidx_t
|
|
|
|
txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
|
|
|
|
{
|
|
|
|
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
|
|
|
|
qidx_t minthresh = txq->ift_size / 8;
|
|
|
|
if (in_use > 4*minthresh)
|
|
|
|
return (notify_count);
|
|
|
|
if (in_use > 2*minthresh)
|
|
|
|
return (notify_count >> 1);
|
|
|
|
if (in_use > minthresh)
|
|
|
|
return (notify_count >> 3);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline qidx_t
|
|
|
|
txq_max_rs_deferred(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
|
|
|
|
qidx_t minthresh = txq->ift_size / 8;
|
|
|
|
if (txq->ift_in_use > 4*minthresh)
|
|
|
|
return (notify_count);
|
|
|
|
if (txq->ift_in_use > 2*minthresh)
|
|
|
|
return (notify_count >> 1);
|
|
|
|
if (txq->ift_in_use > minthresh)
|
|
|
|
return (notify_count >> 2);
|
2017-03-30 16:54:01 +00:00
|
|
|
return (2);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
|
|
|
|
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
#define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
|
|
|
|
#define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
|
2016-08-12 21:29:44 +00:00
|
|
|
#define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* forward compatibility for cxgb */
|
|
|
|
#define FIRST_QSET(ctx) 0
|
|
|
|
#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
|
|
|
|
#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
|
|
|
|
#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
|
|
|
|
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
|
|
|
|
|
|
|
|
/* XXX we should be setting this to something other than zero */
|
|
|
|
#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
|
|
|
|
#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline bool
|
|
|
|
iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
|
|
|
|
{
|
|
|
|
qidx_t dbval, max;
|
|
|
|
bool rang;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
rang = false;
|
|
|
|
max = TXQ_MAX_DB_DEFERRED(txq, in_use);
|
|
|
|
if (ring || txq->ift_db_pending >= max) {
|
2016-05-18 04:35:58 +00:00
|
|
|
dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
|
|
|
|
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
|
|
|
|
txq->ift_db_pending = txq->ift_npending = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
rang = true;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
return (rang);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PKT_DEBUG
|
|
|
|
static void
|
|
|
|
print_pkt(if_pkt_info_t pi)
|
|
|
|
{
|
|
|
|
printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
|
|
|
|
pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
|
|
|
|
printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
|
|
|
|
pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
|
|
|
|
printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
|
|
|
|
pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
|
2018-06-07 13:03:07 +00:00
|
|
|
#define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
|
2016-05-18 04:35:58 +00:00
|
|
|
#define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
|
2018-06-07 13:03:07 +00:00
|
|
|
#define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
|
|
|
|
{
|
2017-09-16 02:41:38 +00:00
|
|
|
if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct ether_vlan_header *eh;
|
2016-08-12 21:29:44 +00:00
|
|
|
struct mbuf *m, *n;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
n = m = *mp;
|
2017-09-16 02:41:38 +00:00
|
|
|
if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
|
|
|
|
M_WRITABLE(m) == 0) {
|
|
|
|
if ((m = m_dup(m, M_NOWAIT)) == NULL) {
|
|
|
|
return (ENOMEM);
|
|
|
|
} else {
|
|
|
|
m_freem(*mp);
|
|
|
|
n = *mp = m;
|
|
|
|
}
|
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Determine where frame payload starts.
|
|
|
|
* Jump over vlan headers if already present,
|
|
|
|
* helpful for QinQ too.
|
|
|
|
*/
|
|
|
|
if (__predict_false(m->m_len < sizeof(*eh))) {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
eh = mtod(m, struct ether_vlan_header *);
|
|
|
|
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
|
|
|
|
pi->ipi_etype = ntohs(eh->evl_proto);
|
|
|
|
pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
|
|
|
|
} else {
|
|
|
|
pi->ipi_etype = ntohs(eh->evl_encap_proto);
|
|
|
|
pi->ipi_ehdrlen = ETHER_HDR_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (pi->ipi_etype) {
|
|
|
|
#ifdef INET
|
|
|
|
case ETHERTYPE_IP:
|
|
|
|
{
|
|
|
|
struct ip *ip = NULL;
|
|
|
|
struct tcphdr *th = NULL;
|
|
|
|
int minthlen;
|
|
|
|
|
|
|
|
minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
|
|
|
|
if (__predict_false(m->m_len < minthlen)) {
|
|
|
|
/*
|
|
|
|
* if this code bloat is causing too much of a hit
|
|
|
|
* move it to a separate function and mark it noinline
|
|
|
|
*/
|
|
|
|
if (m->m_len == pi->ipi_ehdrlen) {
|
|
|
|
n = m->m_next;
|
|
|
|
MPASS(n);
|
|
|
|
if (n->m_len >= sizeof(*ip)) {
|
|
|
|
ip = (struct ip *)n->m_data;
|
|
|
|
if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
|
|
|
|
} else {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
|
|
|
|
}
|
|
|
|
pi->ipi_ip_hlen = ip->ip_hl << 2;
|
|
|
|
pi->ipi_ipproto = ip->ip_p;
|
|
|
|
pi->ipi_flags |= IPI_TX_IPV4;
|
|
|
|
|
2017-09-23 01:33:20 +00:00
|
|
|
if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
|
2016-05-18 04:35:58 +00:00
|
|
|
ip->ip_sum = 0;
|
|
|
|
|
2018-06-07 13:03:07 +00:00
|
|
|
/* TCP checksum offload may require TCP header length */
|
|
|
|
if (IS_TX_OFFLOAD4(pi)) {
|
|
|
|
if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
|
2017-09-23 01:33:20 +00:00
|
|
|
if (__predict_false(th == NULL)) {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
|
|
|
|
}
|
|
|
|
pi->ipi_tcp_hflags = th->th_flags;
|
|
|
|
pi->ipi_tcp_hlen = th->th_off << 2;
|
|
|
|
pi->ipi_tcp_seq = th->th_seq;
|
|
|
|
}
|
2018-06-07 13:03:07 +00:00
|
|
|
if (IS_TSO4(pi)) {
|
|
|
|
if (__predict_false(ip->ip_p != IPPROTO_TCP))
|
|
|
|
return (ENXIO);
|
|
|
|
th->th_sum = in_pseudo(ip->ip_src.s_addr,
|
|
|
|
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
|
|
|
|
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
|
|
|
|
if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
|
|
|
|
ip->ip_sum = 0;
|
|
|
|
ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
|
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef INET6
|
|
|
|
case ETHERTYPE_IPV6:
|
|
|
|
{
|
|
|
|
struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
struct tcphdr *th;
|
|
|
|
pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
|
|
|
|
|
|
|
|
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
|
|
|
|
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
|
|
|
|
|
|
|
|
/* XXX-BZ this will go badly in case of ext hdrs. */
|
|
|
|
pi->ipi_ipproto = ip6->ip6_nxt;
|
|
|
|
pi->ipi_flags |= IPI_TX_IPV6;
|
|
|
|
|
2018-06-07 13:03:07 +00:00
|
|
|
/* TCP checksum offload may require TCP header length */
|
|
|
|
if (IS_TX_OFFLOAD6(pi)) {
|
2017-09-23 01:33:20 +00:00
|
|
|
if (pi->ipi_ipproto == IPPROTO_TCP) {
|
|
|
|
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
|
2018-06-07 13:03:07 +00:00
|
|
|
txq->ift_pullups++;
|
2017-09-23 01:33:20 +00:00
|
|
|
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
pi->ipi_tcp_hflags = th->th_flags;
|
|
|
|
pi->ipi_tcp_hlen = th->th_off << 2;
|
2018-06-07 13:03:07 +00:00
|
|
|
pi->ipi_tcp_seq = th->th_seq;
|
|
|
|
}
|
|
|
|
if (IS_TSO6(pi)) {
|
|
|
|
if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
|
|
|
|
return (ENXIO);
|
|
|
|
/*
|
|
|
|
* The corresponding flag is set by the stack in the IPv4
|
|
|
|
* TSO case, but not in IPv6 (at least in FreeBSD 10.2).
|
|
|
|
* So, set it here because the rest of the flow requires it.
|
|
|
|
*/
|
|
|
|
pi->ipi_csum_flags |= CSUM_IP6_TCP;
|
|
|
|
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
|
|
|
|
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
|
2017-09-23 01:33:20 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
|
|
|
|
pi->ipi_ip_hlen = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*mp = m;
|
2017-01-02 00:56:33 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __noinline struct mbuf *
|
|
|
|
collapse_pkthdr(struct mbuf *m0)
|
|
|
|
{
|
|
|
|
struct mbuf *m, *m_next, *tmp;
|
|
|
|
|
|
|
|
m = m0;
|
|
|
|
m_next = m->m_next;
|
|
|
|
while (m_next != NULL && m_next->m_len == 0) {
|
|
|
|
m = m_next;
|
|
|
|
m->m_next = NULL;
|
|
|
|
m_free(m);
|
|
|
|
m_next = m_next->m_next;
|
|
|
|
}
|
|
|
|
m = m0;
|
|
|
|
m->m_next = m_next;
|
|
|
|
if ((m_next->m_flags & M_EXT) == 0) {
|
|
|
|
m = m_defrag(m, M_NOWAIT);
|
|
|
|
} else {
|
|
|
|
tmp = m_next->m_next;
|
|
|
|
memcpy(m_next, m, MPKTHSIZE);
|
|
|
|
m = m_next;
|
|
|
|
m->m_next = tmp;
|
|
|
|
}
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If dodgy hardware rejects the scatter gather chain we've handed it
|
2016-08-12 21:29:44 +00:00
|
|
|
* we'll need to remove the mbuf chain from ifsg_m[] before we can add the
|
|
|
|
* m_defrag'd mbufs
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
|
|
|
static __noinline struct mbuf *
|
2016-08-12 21:29:44 +00:00
|
|
|
iflib_remove_mbuf(iflib_txq_t txq)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2016-08-12 21:29:44 +00:00
|
|
|
int ntxd, i, pidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct mbuf *m, *mh, **ifsd_m;
|
|
|
|
|
|
|
|
pidx = txq->ift_pidx;
|
|
|
|
ifsd_m = txq->ift_sds.ifsd_m;
|
2016-08-12 21:29:44 +00:00
|
|
|
ntxd = txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
mh = m = ifsd_m[pidx];
|
|
|
|
ifsd_m[pidx] = NULL;
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
txq->ift_dequeued++;
|
|
|
|
#endif
|
|
|
|
i = 1;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
while (m) {
|
2016-05-18 04:35:58 +00:00
|
|
|
ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
txq->ift_dequeued++;
|
|
|
|
#endif
|
|
|
|
m = m->m_next;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
return (mh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
|
|
|
|
struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs,
|
|
|
|
int max_segs, int flags)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_shared_ctx_t sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx;
|
2017-07-03 19:23:45 +00:00
|
|
|
int i, next, pidx, err, ntxd, count;
|
2017-06-13 19:32:23 +00:00
|
|
|
struct mbuf *m, *tmp, **ifsd_m;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
m = *m0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Please don't ever do this
|
|
|
|
*/
|
|
|
|
if (__predict_false(m->m_len == 0))
|
|
|
|
*m0 = m = collapse_pkthdr(m);
|
|
|
|
|
|
|
|
ctx = txq->ift_ctx;
|
|
|
|
sctx = ctx->ifc_sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
ifsd_m = txq->ift_sds.ifsd_m;
|
2016-08-12 21:29:44 +00:00
|
|
|
ntxd = txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
pidx = txq->ift_pidx;
|
2017-09-16 02:41:38 +00:00
|
|
|
if (map != NULL) {
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
err = bus_dmamap_load_mbuf_sg(tag, map,
|
|
|
|
*m0, segs, nsegs, BUS_DMA_NOWAIT);
|
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
|
2017-06-13 19:32:23 +00:00
|
|
|
count = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
m = *m0;
|
|
|
|
do {
|
2017-06-13 19:32:23 +00:00
|
|
|
if (__predict_false(m->m_len <= 0)) {
|
|
|
|
tmp = m;
|
|
|
|
m = m->m_next;
|
|
|
|
tmp->m_next = NULL;
|
|
|
|
m_free(tmp);
|
|
|
|
continue;
|
|
|
|
}
|
2017-07-19 21:18:04 +00:00
|
|
|
m = m->m_next;
|
|
|
|
count++;
|
|
|
|
} while (m != NULL);
|
2017-08-10 03:43:23 +00:00
|
|
|
if (count > *nsegs) {
|
|
|
|
ifsd_m[pidx] = *m0;
|
|
|
|
ifsd_m[pidx]->m_flags |= M_TOOBIG;
|
2017-07-19 21:18:04 +00:00
|
|
|
return (0);
|
2017-08-10 03:43:23 +00:00
|
|
|
}
|
2017-07-19 21:18:04 +00:00
|
|
|
m = *m0;
|
|
|
|
count = 0;
|
|
|
|
do {
|
2017-06-13 19:32:23 +00:00
|
|
|
next = (pidx + count) & (ntxd-1);
|
|
|
|
MPASS(ifsd_m[next] == NULL);
|
|
|
|
ifsd_m[next] = m;
|
|
|
|
count++;
|
|
|
|
tmp = m;
|
2016-05-18 04:35:58 +00:00
|
|
|
m = m->m_next;
|
|
|
|
} while (m != NULL);
|
|
|
|
} else {
|
2017-07-03 19:23:45 +00:00
|
|
|
int buflen, sgsize, maxsegsz, max_sgsize;
|
2016-05-18 04:35:58 +00:00
|
|
|
vm_offset_t vaddr;
|
|
|
|
vm_paddr_t curaddr;
|
|
|
|
|
|
|
|
count = i = 0;
|
|
|
|
m = *m0;
|
2017-07-03 19:23:45 +00:00
|
|
|
if (m->m_pkthdr.csum_flags & CSUM_TSO)
|
|
|
|
maxsegsz = scctx->isc_tx_tso_segsize_max;
|
|
|
|
else
|
|
|
|
maxsegsz = sctx->isc_tx_maxsegsize;
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
do {
|
|
|
|
if (__predict_false(m->m_len <= 0)) {
|
|
|
|
tmp = m;
|
|
|
|
m = m->m_next;
|
|
|
|
tmp->m_next = NULL;
|
|
|
|
m_free(tmp);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
buflen = m->m_len;
|
|
|
|
vaddr = (vm_offset_t)m->m_data;
|
|
|
|
/*
|
|
|
|
* see if we can't be smarter about physically
|
|
|
|
* contiguous mappings
|
|
|
|
*/
|
|
|
|
next = (pidx + count) & (ntxd-1);
|
|
|
|
MPASS(ifsd_m[next] == NULL);
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
txq->ift_enqueued++;
|
|
|
|
#endif
|
|
|
|
ifsd_m[next] = m;
|
|
|
|
while (buflen > 0) {
|
2017-07-27 22:53:47 +00:00
|
|
|
if (i >= max_segs)
|
|
|
|
goto err;
|
2016-05-18 04:35:58 +00:00
|
|
|
max_sgsize = MIN(buflen, maxsegsz);
|
|
|
|
curaddr = pmap_kextract(vaddr);
|
|
|
|
sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
|
|
|
|
sgsize = MIN(sgsize, max_sgsize);
|
|
|
|
segs[i].ds_addr = curaddr;
|
|
|
|
segs[i].ds_len = sgsize;
|
|
|
|
vaddr += sgsize;
|
|
|
|
buflen -= sgsize;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
count++;
|
|
|
|
tmp = m;
|
|
|
|
m = m->m_next;
|
|
|
|
} while (m != NULL);
|
|
|
|
*nsegs = i;
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
err:
|
2016-08-12 21:29:44 +00:00
|
|
|
*m0 = iflib_remove_mbuf(txq);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (EFBIG);
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline caddr_t
|
|
|
|
calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
|
|
|
|
{
|
|
|
|
qidx_t size;
|
|
|
|
int ntxd;
|
|
|
|
caddr_t start, end, cur, next;
|
|
|
|
|
|
|
|
ntxd = txq->ift_size;
|
|
|
|
size = txq->ift_txd_size[qid];
|
|
|
|
start = txq->ift_ifdi[qid].idi_vaddr;
|
|
|
|
|
|
|
|
if (__predict_false(size == 0))
|
|
|
|
return (start);
|
|
|
|
cur = start + size*cidx;
|
|
|
|
end = start + size*ntxd;
|
|
|
|
next = CACHE_PTR_NEXT(cur);
|
|
|
|
return (next < end ? next : start);
|
|
|
|
}
|
|
|
|
|
2017-12-05 21:00:31 +00:00
|
|
|
/*
|
|
|
|
* Pad an mbuf to ensure a minimum ethernet frame size.
|
|
|
|
* min_frame_size is the frame size (less CRC) to pad the mbuf to
|
|
|
|
*/
|
|
|
|
static __noinline int
|
2017-12-08 18:43:31 +00:00
|
|
|
iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
|
2017-12-05 21:00:31 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* 18 is enough bytes to pad an ARP packet to 46 bytes, and
|
|
|
|
* and ARP message is the smallest common payload I can think of
|
|
|
|
*/
|
|
|
|
static char pad[18]; /* just zeros */
|
|
|
|
int n;
|
2017-12-08 18:43:31 +00:00
|
|
|
struct mbuf *new_head;
|
2017-12-05 21:00:31 +00:00
|
|
|
|
2017-12-08 18:43:31 +00:00
|
|
|
if (!M_WRITABLE(*m_head)) {
|
|
|
|
new_head = m_dup(*m_head, M_NOWAIT);
|
|
|
|
if (new_head == NULL) {
|
2017-12-08 19:50:06 +00:00
|
|
|
m_freem(*m_head);
|
2017-12-08 18:43:31 +00:00
|
|
|
device_printf(dev, "cannot pad short frame, m_dup() failed");
|
2017-12-11 20:01:28 +00:00
|
|
|
DBG_COUNTER_INC(encap_pad_mbuf_fail);
|
2017-12-08 18:43:31 +00:00
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
m_freem(*m_head);
|
|
|
|
*m_head = new_head;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = min_frame_size - (*m_head)->m_pkthdr.len;
|
2017-12-05 21:00:31 +00:00
|
|
|
n > 0; n -= sizeof(pad))
|
2017-12-08 18:43:31 +00:00
|
|
|
if (!m_append(*m_head, min(n, sizeof(pad)), pad))
|
2017-12-05 21:00:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (n > 0) {
|
2017-12-08 18:43:31 +00:00
|
|
|
m_freem(*m_head);
|
2017-12-05 21:00:31 +00:00
|
|
|
device_printf(dev, "cannot pad short frame\n");
|
|
|
|
DBG_COUNTER_INC(encap_pad_mbuf_fail);
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_shared_ctx_t sctx;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
bus_dma_segment_t *segs;
|
|
|
|
struct mbuf *m_head;
|
2017-03-13 22:53:06 +00:00
|
|
|
void *next_txd;
|
2016-05-18 04:35:58 +00:00
|
|
|
bus_dmamap_t map;
|
|
|
|
struct if_pkt_info pi;
|
|
|
|
int remap = 0;
|
|
|
|
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
|
|
|
|
bus_dma_tag_t desc_tag;
|
|
|
|
|
|
|
|
segs = txq->ift_segs;
|
|
|
|
ctx = txq->ift_ctx;
|
|
|
|
sctx = ctx->ifc_sctx;
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
segs = txq->ift_segs;
|
2016-08-12 21:29:44 +00:00
|
|
|
ntxd = txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
m_head = *m_headp;
|
|
|
|
map = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're doing TSO the next descriptor to clean may be quite far ahead
|
|
|
|
*/
|
|
|
|
cidx = txq->ift_cidx;
|
|
|
|
pidx = txq->ift_pidx;
|
2017-03-13 22:53:06 +00:00
|
|
|
if (ctx->ifc_flags & IFC_PREFETCH) {
|
|
|
|
next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
|
|
|
|
if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
|
|
|
|
next_txd = calc_next_txd(txq, cidx, 0);
|
|
|
|
prefetch(next_txd);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* prefetch the next cache line of mbuf pointers and flags */
|
|
|
|
prefetch(&txq->ift_sds.ifsd_m[next]);
|
|
|
|
if (txq->ift_sds.ifsd_map != NULL) {
|
|
|
|
prefetch(&txq->ift_sds.ifsd_map[next]);
|
|
|
|
next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
|
|
|
|
prefetch(&txq->ift_sds.ifsd_flags[next]);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
} else if (txq->ift_sds.ifsd_map != NULL)
|
2016-05-18 04:35:58 +00:00
|
|
|
map = txq->ift_sds.ifsd_map[pidx];
|
|
|
|
|
|
|
|
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
|
|
|
|
desc_tag = txq->ift_tso_desc_tag;
|
|
|
|
max_segs = scctx->isc_tx_tso_segments_max;
|
|
|
|
} else {
|
|
|
|
desc_tag = txq->ift_desc_tag;
|
|
|
|
max_segs = scctx->isc_tx_nsegments;
|
|
|
|
}
|
2017-12-05 21:00:31 +00:00
|
|
|
if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
|
|
|
|
__predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
|
2017-12-08 18:43:31 +00:00
|
|
|
err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
|
2017-12-05 21:00:31 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2017-12-08 18:43:31 +00:00
|
|
|
m_head = *m_headp;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
pkt_info_zero(&pi);
|
2017-09-16 02:41:38 +00:00
|
|
|
pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
|
|
|
|
pi.ipi_pidx = pidx;
|
|
|
|
pi.ipi_qsidx = txq->ift_id;
|
2017-10-23 20:50:08 +00:00
|
|
|
pi.ipi_len = m_head->m_pkthdr.len;
|
|
|
|
pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
|
|
|
|
pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* deliberate bitwise OR to make one condition */
|
|
|
|
if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
|
|
|
|
if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0))
|
|
|
|
return (err);
|
|
|
|
m_head = *m_headp;
|
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
|
|
|
err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
|
|
|
|
defrag:
|
|
|
|
if (__predict_false(err)) {
|
|
|
|
switch (err) {
|
|
|
|
case EFBIG:
|
|
|
|
/* try collapse once and defrag once */
|
2018-04-30 23:53:27 +00:00
|
|
|
if (remap == 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
|
2018-04-30 23:53:27 +00:00
|
|
|
/* try defrag if collapsing fails */
|
|
|
|
if (m_head == NULL)
|
|
|
|
remap++;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
if (remap == 1)
|
|
|
|
m_head = m_defrag(*m_headp, M_NOWAIT);
|
|
|
|
remap++;
|
|
|
|
if (__predict_false(m_head == NULL))
|
|
|
|
goto defrag_failed;
|
|
|
|
txq->ift_mbuf_defrag++;
|
|
|
|
*m_headp = m_head;
|
|
|
|
goto retry;
|
|
|
|
break;
|
|
|
|
case ENOMEM:
|
|
|
|
txq->ift_no_tx_dma_setup++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
txq->ift_no_tx_dma_setup++;
|
|
|
|
m_freem(*m_headp);
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
*m_headp = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
txq->ift_map_failed++;
|
|
|
|
DBG_COUNTER_INC(encap_load_mbuf_fail);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX assumes a 1 to 1 relationship between segments and
|
|
|
|
* descriptors - this does not hold true on all drivers, e.g.
|
|
|
|
* cxgb
|
|
|
|
*/
|
|
|
|
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
|
|
|
|
txq->ift_no_desc_avail++;
|
|
|
|
if (map != NULL)
|
|
|
|
bus_dmamap_unload(desc_tag, map);
|
|
|
|
DBG_COUNTER_INC(encap_txq_avail_fail);
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
/*
|
|
|
|
* On Intel cards we can greatly reduce the number of TX interrupts
|
|
|
|
* we see by only setting report status on every Nth descriptor.
|
|
|
|
* However, this also means that the driver will need to keep track
|
|
|
|
* of the descriptors that RS was set on to check them for the DD bit.
|
|
|
|
*/
|
|
|
|
txq->ift_rs_pending += nsegs + 1;
|
|
|
|
if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
|
2018-05-07 18:11:22 +00:00
|
|
|
iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
|
2017-03-13 22:53:06 +00:00
|
|
|
pi.ipi_flags |= IPI_TX_INTR;
|
|
|
|
txq->ift_rs_pending = 0;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
pi.ipi_segs = segs;
|
|
|
|
pi.ipi_nsegs = nsegs;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(pidx >= 0 && pidx < txq->ift_size);
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifdef PKT_DEBUG
|
|
|
|
print_pkt(&pi);
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
if (map != NULL)
|
|
|
|
bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
|
2017-03-13 22:53:06 +00:00
|
|
|
if (map != NULL)
|
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(tx_encap);
|
2017-03-13 22:53:06 +00:00
|
|
|
MPASS(pi.ipi_new_pidx < txq->ift_size);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
|
|
|
|
if (pi.ipi_new_pidx < pi.ipi_pidx) {
|
2016-08-12 21:29:44 +00:00
|
|
|
ndesc += txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
txq->ift_gen = 1;
|
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
/*
|
|
|
|
* drivers can need as many as
|
|
|
|
* two sentinels
|
|
|
|
*/
|
|
|
|
MPASS(ndesc <= pi.ipi_nsegs + 2);
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(pi.ipi_new_pidx != pidx);
|
|
|
|
MPASS(ndesc > 0);
|
|
|
|
txq->ift_in_use += ndesc;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* We update the last software descriptor again here because there may
|
|
|
|
* be a sentinel and/or there may be more mbufs than segments
|
|
|
|
*/
|
|
|
|
txq->ift_pidx = pi.ipi_new_pidx;
|
|
|
|
txq->ift_npending += pi.ipi_ndescs;
|
2018-04-30 23:53:27 +00:00
|
|
|
} else {
|
2016-08-12 21:29:44 +00:00
|
|
|
*m_headp = m_head = iflib_remove_mbuf(txq);
|
2018-04-30 23:53:27 +00:00
|
|
|
if (err == EFBIG) {
|
|
|
|
txq->ift_txd_encap_efbig++;
|
|
|
|
if (remap < 2) {
|
|
|
|
remap = 1;
|
|
|
|
goto defrag;
|
|
|
|
}
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(encap_txd_encap_fail);
|
2018-04-30 23:53:27 +00:00
|
|
|
goto defrag_failed;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
|
|
|
|
defrag_failed:
|
|
|
|
txq->ift_mbuf_defrag_failed++;
|
|
|
|
txq->ift_map_failed++;
|
|
|
|
m_freem(*m_headp);
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
*m_headp = NULL;
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_tx_desc_free(iflib_txq_t txq, int n)
|
|
|
|
{
|
|
|
|
int hasmap;
|
|
|
|
uint32_t qsize, cidx, mask, gen;
|
|
|
|
struct mbuf *m, **ifsd_m;
|
|
|
|
uint8_t *ifsd_flags;
|
|
|
|
bus_dmamap_t *ifsd_map;
|
2017-03-13 22:53:06 +00:00
|
|
|
bool do_prefetch;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
cidx = txq->ift_cidx;
|
|
|
|
gen = txq->ift_gen;
|
2016-08-12 21:29:44 +00:00
|
|
|
qsize = txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
mask = qsize-1;
|
|
|
|
hasmap = txq->ift_sds.ifsd_map != NULL;
|
|
|
|
ifsd_flags = txq->ift_sds.ifsd_flags;
|
|
|
|
ifsd_m = txq->ift_sds.ifsd_m;
|
|
|
|
ifsd_map = txq->ift_sds.ifsd_map;
|
2017-03-13 22:53:06 +00:00
|
|
|
do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-06 00:57:52 +00:00
|
|
|
while (n-- > 0) {
|
2017-03-13 22:53:06 +00:00
|
|
|
if (do_prefetch) {
|
|
|
|
prefetch(ifsd_m[(cidx + 3) & mask]);
|
|
|
|
prefetch(ifsd_m[(cidx + 4) & mask]);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ifsd_m[cidx] != NULL) {
|
|
|
|
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
|
|
|
|
prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
|
|
|
|
if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) {
|
|
|
|
/*
|
|
|
|
* does it matter if it's not the TSO tag? If so we'll
|
|
|
|
* have to add the type to flags
|
|
|
|
*/
|
|
|
|
bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
|
|
|
|
ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
|
|
|
|
}
|
|
|
|
if ((m = ifsd_m[cidx]) != NULL) {
|
|
|
|
/* XXX we don't support any drivers that batch packets yet */
|
|
|
|
MPASS(m->m_nextpkt == NULL);
|
2017-08-10 03:43:23 +00:00
|
|
|
/* if the number of clusters exceeds the number of segments
|
|
|
|
* there won't be space on the ring to save a pointer to each
|
|
|
|
* cluster so we simply free the list here
|
|
|
|
*/
|
|
|
|
if (m->m_flags & M_TOOBIG) {
|
|
|
|
m_freem(m);
|
|
|
|
} else {
|
|
|
|
m_free(m);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
ifsd_m[cidx] = NULL;
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
txq->ift_dequeued++;
|
|
|
|
#endif
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (__predict_false(++cidx == qsize)) {
|
|
|
|
cidx = 0;
|
|
|
|
gen = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
txq->ift_cidx = cidx;
|
|
|
|
txq->ift_gen = gen;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline int
|
|
|
|
iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
|
|
|
|
{
|
|
|
|
int reclaim;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
|
|
|
|
KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
|
|
|
|
MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Need a rate-limiting check so that this isn't called every time
|
|
|
|
*/
|
|
|
|
iflib_tx_credits_update(ctx, txq);
|
|
|
|
reclaim = DESC_RECLAIMABLE(txq);
|
|
|
|
|
|
|
|
if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (iflib_verbose_debug) {
|
|
|
|
printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
|
|
|
|
txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
|
|
|
|
reclaim, thresh);
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
iflib_tx_desc_free(txq, reclaim);
|
|
|
|
txq->ift_cleaned += reclaim;
|
|
|
|
txq->ift_in_use -= reclaim;
|
|
|
|
|
|
|
|
return (reclaim);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf **
|
2017-03-13 22:53:06 +00:00
|
|
|
_ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
int next, size;
|
|
|
|
struct mbuf **items;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
size = r->size;
|
|
|
|
next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
|
|
|
|
items = __DEVOLATILE(struct mbuf **, &r->items[0]);
|
|
|
|
|
|
|
|
prefetch(items[(cidx + offset) & (size-1)]);
|
|
|
|
if (remaining > 1) {
|
2017-10-23 20:50:08 +00:00
|
|
|
prefetch2cachelines(&items[next]);
|
|
|
|
prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
|
|
|
|
prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
|
|
|
|
prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
|
|
|
return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txq_check_drain(iflib_txq_t txq, int budget)
|
|
|
|
{
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
ifmp_ring_check_drainage(txq->ift_br, budget);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
iflib_txq_can_drain(struct ifmp_ring *r)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = r->cookie;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
|
2017-03-13 22:53:06 +00:00
|
|
|
ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = r->cookie;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2017-03-13 22:53:06 +00:00
|
|
|
struct ifnet *ifp = ctx->ifc_ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct mbuf **mp, *m;
|
2017-03-13 22:53:06 +00:00
|
|
|
int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
|
|
|
|
int reclaimed, err, in_use_prev, desc_used;
|
|
|
|
bool do_prefetch, ring, rang;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
|
|
|
|
!LINK_ACTIVE(ctx))) {
|
|
|
|
DBG_COUNTER_INC(txq_drain_notready);
|
|
|
|
return (0);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
|
|
|
|
rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
|
2016-05-18 04:35:58 +00:00
|
|
|
avail = IDXDIFF(pidx, cidx, r->size);
|
|
|
|
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
|
|
|
|
DBG_COUNTER_INC(txq_drain_flushing);
|
|
|
|
for (i = 0; i < avail; i++) {
|
2016-08-12 21:29:44 +00:00
|
|
|
m_free(r->items[(cidx + i) & (r->size-1)]);
|
2016-05-18 04:35:58 +00:00
|
|
|
r->items[(cidx + i) & (r->size-1)] = NULL;
|
|
|
|
}
|
|
|
|
return (avail);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
DBG_COUNTER_INC(txq_drain_oactive);
|
|
|
|
return (0);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
if (reclaimed)
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
2016-05-18 04:35:58 +00:00
|
|
|
consumed = mcast_sent = bytes_sent = pkt_sent = 0;
|
|
|
|
count = MIN(avail, TX_BATCH_SIZE);
|
2016-11-18 04:19:21 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (iflib_verbose_debug)
|
|
|
|
printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
|
|
|
|
avail, ctx->ifc_flags, TXQ_AVAIL(txq));
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
|
|
|
|
avail = TXQ_AVAIL(txq);
|
2018-05-04 18:57:05 +00:00
|
|
|
err = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
|
2018-05-04 18:57:05 +00:00
|
|
|
int rem = do_prefetch ? count - i : 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
mp = _ring_peek_one(r, cidx, i, rem);
|
2016-11-18 04:19:21 +00:00
|
|
|
MPASS(mp != NULL && *mp != NULL);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (__predict_false(*mp == (struct mbuf *)txq)) {
|
|
|
|
consumed++;
|
|
|
|
reclaimed++;
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
in_use_prev = txq->ift_in_use;
|
2017-03-13 22:53:06 +00:00
|
|
|
err = iflib_encap(txq, mp);
|
|
|
|
if (__predict_false(err)) {
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(txq_drain_encapfail);
|
2016-11-18 04:19:21 +00:00
|
|
|
/* no room - bail out */
|
2017-03-13 22:53:06 +00:00
|
|
|
if (err == ENOBUFS)
|
|
|
|
break;
|
|
|
|
consumed++;
|
2016-11-18 04:19:21 +00:00
|
|
|
DBG_COUNTER_INC(txq_drain_encapfail);
|
|
|
|
/* we can't send this packet - skip it */
|
2016-05-18 04:35:58 +00:00
|
|
|
continue;
|
2016-11-18 04:19:21 +00:00
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
consumed++;
|
2016-05-18 04:35:58 +00:00
|
|
|
pkt_sent++;
|
|
|
|
m = *mp;
|
|
|
|
DBG_COUNTER_INC(tx_sent);
|
|
|
|
bytes_sent += m->m_pkthdr.len;
|
2017-03-13 22:53:06 +00:00
|
|
|
mcast_sent += !!(m->m_flags & M_MCAST);
|
|
|
|
avail = TXQ_AVAIL(txq);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
|
|
|
|
desc_used += (txq->ift_in_use - in_use_prev);
|
|
|
|
ETHER_BPF_MTAP(ifp, m);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
2017-03-13 22:53:06 +00:00
|
|
|
rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* deliberate use of bitwise or to avoid gratuitous short-circuit */
|
|
|
|
ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
|
|
|
|
iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
|
|
|
|
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
|
|
|
|
if (mcast_sent)
|
|
|
|
if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
|
2016-11-18 04:19:21 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (iflib_verbose_debug)
|
|
|
|
printf("consumed=%d\n", consumed);
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
return (consumed);
|
|
|
|
}
|
|
|
|
|
2016-11-18 04:19:21 +00:00
|
|
|
static uint32_t
|
|
|
|
iflib_txq_drain_always(struct ifmp_ring *r)
|
|
|
|
{
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
|
|
|
|
{
|
|
|
|
int i, avail;
|
|
|
|
struct mbuf **mp;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
|
|
|
|
txq = r->cookie;
|
|
|
|
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
|
|
|
|
avail = IDXDIFF(pidx, cidx, r->size);
|
|
|
|
for (i = 0; i < avail; i++) {
|
2017-03-13 22:53:06 +00:00
|
|
|
mp = _ring_peek_one(r, cidx, i, avail - i);
|
|
|
|
if (__predict_false(*mp == (struct mbuf *)txq))
|
|
|
|
continue;
|
2016-11-18 04:19:21 +00:00
|
|
|
m_freem(*mp);
|
|
|
|
}
|
|
|
|
MPASS(ifmp_ring_is_stalled(r) == 0);
|
|
|
|
return (avail);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_ifmp_purge(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
struct ifmp_ring *r;
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
r = txq->ift_br;
|
2016-11-18 04:19:21 +00:00
|
|
|
r->drain = iflib_txq_drain_free;
|
|
|
|
r->can_drain = iflib_txq_drain_always;
|
|
|
|
|
|
|
|
ifmp_ring_check_drainage(r, r->size);
|
|
|
|
|
|
|
|
r->drain = iflib_txq_drain;
|
|
|
|
r->can_drain = iflib_txq_can_drain;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_tx(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_txq_t txq = context;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2017-03-13 22:53:06 +00:00
|
|
|
struct ifnet *ifp = ctx->ifc_ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
txq->ift_cpu_exec_count[curcpu]++;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
|
|
|
|
return;
|
2017-09-20 20:40:49 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
|
2018-05-16 21:03:22 +00:00
|
|
|
/*
|
|
|
|
* If there are no available credits, and TX IRQs are not in use,
|
|
|
|
* re-schedule the task immediately.
|
|
|
|
*/
|
2017-03-13 22:53:06 +00:00
|
|
|
if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
|
|
|
|
netmap_tx_irq(ifp, txq->ift_id);
|
|
|
|
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (txq->ift_db_pending)
|
|
|
|
ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE);
|
2017-09-23 16:46:30 +00:00
|
|
|
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (ctx->ifc_flags & IFC_LEGACY)
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
else {
|
2018-05-04 18:57:05 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int rc =
|
|
|
|
#endif
|
|
|
|
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
|
|
|
|
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_rx(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = context;
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
bool more;
|
2017-09-23 01:37:01 +00:00
|
|
|
uint16_t budget;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
rxq->ifr_cpu_exec_count[curcpu]++;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(task_fn_rxs);
|
|
|
|
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
|
|
|
|
return;
|
2017-09-20 20:40:49 +00:00
|
|
|
more = true;
|
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
|
|
|
|
u_int work = 0;
|
|
|
|
if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
|
|
|
|
more = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2017-09-23 01:37:01 +00:00
|
|
|
budget = ctx->ifc_sysctl_rx_budget;
|
|
|
|
if (budget == 0)
|
|
|
|
budget = 16; /* XXX */
|
|
|
|
if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ctx->ifc_flags & IFC_LEGACY)
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
else {
|
2018-05-04 18:57:05 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int rc =
|
|
|
|
#endif
|
|
|
|
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
|
2016-08-12 21:29:44 +00:00
|
|
|
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
|
2018-05-04 18:57:05 +00:00
|
|
|
DBG_COUNTER_INC(rx_intr_enables);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
|
|
|
|
return;
|
|
|
|
if (more)
|
|
|
|
GROUPTASK_ENQUEUE(&rxq->ifr_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_admin(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_ctx_t ctx = context;
|
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
|
|
|
iflib_txq_t txq;
|
2017-09-16 02:41:38 +00:00
|
|
|
int i;
|
2018-04-12 14:35:37 +00:00
|
|
|
bool oactive, running, do_reset, do_watchdog;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
|
|
|
running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
|
|
|
|
oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
|
|
|
|
do_reset = (ctx->ifc_flags & IFC_DO_RESET);
|
|
|
|
do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
|
|
|
|
ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
|
|
|
|
STATE_UNLOCK(ctx);
|
|
|
|
|
2018-05-26 00:46:08 +00:00
|
|
|
if ((!running & !oactive) &&
|
|
|
|
!(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
|
2018-04-12 14:35:37 +00:00
|
|
|
return;
|
2017-09-13 01:18:42 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
}
|
2018-04-12 14:35:37 +00:00
|
|
|
if (do_watchdog) {
|
|
|
|
ctx->ifc_watchdog_events++;
|
|
|
|
IFDI_WATCHDOG_RESET(ctx);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
IFDI_UPDATE_ADMIN_STATUS(ctx);
|
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
|
|
|
|
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
|
|
|
|
IFDI_LINK_INTR_ENABLE(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
if (do_reset)
|
2017-09-16 02:41:38 +00:00
|
|
|
iflib_if_init_locked(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
if (LINK_ACTIVE(ctx) == 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
return;
|
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
|
|
|
|
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_iov(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_ctx_t ctx = context;
|
|
|
|
|
|
|
|
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
|
|
|
|
return;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_VFLR_HANDLE(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if_int_delay_info_t info;
|
|
|
|
if_ctx_t ctx;
|
|
|
|
|
|
|
|
info = (if_int_delay_info_t)arg1;
|
|
|
|
ctx = info->iidi_ctx;
|
|
|
|
info->iidi_req = req;
|
|
|
|
info->iidi_oidp = oidp;
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
err = IFDI_SYSCTL_INT_DELAY(ctx, info);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* IFNET FUNCTIONS
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_if_init_locked(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_stop(ctx);
|
|
|
|
iflib_init_locked(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_if_init(void *arg)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = arg;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
iflib_if_init_locked(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_if_transmit(if_t ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
iflib_txq_t txq;
|
2016-08-12 21:29:44 +00:00
|
|
|
int err, qidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
m_freem(m);
|
2016-11-18 04:19:21 +00:00
|
|
|
return (ENOBUFS);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(m->m_nextpkt == NULL);
|
2016-05-18 04:35:58 +00:00
|
|
|
qidx = 0;
|
|
|
|
if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
|
|
|
|
qidx = QIDX(ctx, m);
|
|
|
|
/*
|
|
|
|
* XXX calculate buf_ring based on flowid (divvy up bits?)
|
|
|
|
*/
|
|
|
|
txq = &ctx->ifc_txqs[qidx];
|
|
|
|
|
|
|
|
#ifdef DRIVER_BACKPRESSURE
|
|
|
|
if (txq->ift_closed) {
|
|
|
|
while (m != NULL) {
|
|
|
|
next = m->m_nextpkt;
|
|
|
|
m->m_nextpkt = NULL;
|
|
|
|
m_freem(m);
|
|
|
|
m = next;
|
|
|
|
}
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
#endif
|
2016-08-12 21:29:44 +00:00
|
|
|
#ifdef notyet
|
2016-05-18 04:35:58 +00:00
|
|
|
qidx = count = 0;
|
|
|
|
mp = marr;
|
|
|
|
next = m;
|
|
|
|
do {
|
|
|
|
count++;
|
|
|
|
next = next->m_nextpkt;
|
|
|
|
} while (next != NULL);
|
|
|
|
|
2016-06-07 19:49:08 +00:00
|
|
|
if (count > nitems(marr))
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
|
|
|
|
/* XXX check nextpkt */
|
|
|
|
m_freem(m);
|
|
|
|
/* XXX simplify for now */
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
for (next = m, i = 0; next != NULL; i++) {
|
|
|
|
mp[i] = next;
|
|
|
|
next = next->m_nextpkt;
|
|
|
|
mp[i]->m_nextpkt = NULL;
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(tx_seen);
|
2017-03-13 22:53:06 +00:00
|
|
|
err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-09-23 16:46:30 +00:00
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (err) {
|
|
|
|
/* support forthcoming later */
|
|
|
|
#ifdef DRIVER_BACKPRESSURE
|
|
|
|
txq->ift_closed = TRUE;
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
|
2016-08-12 21:29:44 +00:00
|
|
|
m_freem(m);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_if_qflush(if_t ifp)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
int i;
|
|
|
|
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
ctx->ifc_flags |= IFC_QFLUSH;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
|
2017-03-13 22:53:06 +00:00
|
|
|
while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txq_check_drain(txq, 0);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
ctx->ifc_flags &= ~IFC_QFLUSH;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if_qflush(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
#define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
|
2017-08-10 03:11:05 +00:00
|
|
|
IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
|
2016-05-18 04:35:58 +00:00
|
|
|
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
struct ifreq *ifr = (struct ifreq *)data;
|
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
struct ifaddr *ifa = (struct ifaddr *)data;
|
|
|
|
#endif
|
|
|
|
bool avoid_reset = FALSE;
|
|
|
|
int err = 0, reinit = 0, bits;
|
|
|
|
|
|
|
|
switch (command) {
|
|
|
|
case SIOCSIFADDR:
|
|
|
|
#ifdef INET
|
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET)
|
|
|
|
avoid_reset = TRUE;
|
|
|
|
#endif
|
|
|
|
#ifdef INET6
|
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET6)
|
|
|
|
avoid_reset = TRUE;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
** Calling init results in link renegotiation,
|
|
|
|
** so we avoid doing it when possible.
|
|
|
|
*/
|
|
|
|
if (avoid_reset) {
|
|
|
|
if_setflagbits(ifp, IFF_UP,0);
|
|
|
|
if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING))
|
|
|
|
reinit = 1;
|
|
|
|
#ifdef INET
|
|
|
|
if (!(if_getflags(ifp) & IFF_NOARP))
|
|
|
|
arp_ifinit(ifp, ifa);
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
err = ether_ioctl(ifp, command, data);
|
|
|
|
break;
|
|
|
|
case SIOCSIFMTU:
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
if (ifr->ifr_mtu == if_getmtu(ifp)) {
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bits = if_getdrvflags(ifp);
|
|
|
|
/* stop the driver and free any clusters before proceeding */
|
|
|
|
iflib_stop(ctx);
|
|
|
|
|
|
|
|
if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
|
|
|
|
ctx->ifc_flags |= IFC_MULTISEG;
|
|
|
|
else
|
|
|
|
ctx->ifc_flags &= ~IFC_MULTISEG;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
err = if_setmtu(ifp, ifr->ifr_mtu);
|
|
|
|
}
|
|
|
|
iflib_init_locked(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_setdrvflags(ifp, bits);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
break;
|
|
|
|
case SIOCSIFFLAGS:
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
|
|
|
if (if_getflags(ifp) & IFF_UP) {
|
|
|
|
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
|
|
|
|
if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
|
|
|
|
(IFF_PROMISC | IFF_ALLMULTI)) {
|
|
|
|
err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
reinit = 1;
|
|
|
|
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
|
|
|
|
iflib_stop(ctx);
|
|
|
|
}
|
|
|
|
ctx->ifc_if_flags = if_getflags(ifp);
|
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
|
|
|
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
IFDI_MULTI_SET(ctx);
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SIOCSIFMEDIA:
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_MEDIA_SET(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
/* falls thru */
|
|
|
|
case SIOCGIFMEDIA:
|
2017-12-01 17:58:20 +00:00
|
|
|
case SIOCGIFXMEDIA:
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
|
|
|
|
break;
|
|
|
|
case SIOCGI2C:
|
|
|
|
{
|
|
|
|
struct ifi2creq i2c;
|
|
|
|
|
2018-03-30 18:50:13 +00:00
|
|
|
err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
|
2016-05-18 04:35:58 +00:00
|
|
|
if (err != 0)
|
|
|
|
break;
|
|
|
|
if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
|
|
|
|
err = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i2c.len > sizeof(i2c.data)) {
|
|
|
|
err = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
|
2018-03-30 18:50:13 +00:00
|
|
|
err = copyout(&i2c, ifr_data_get_ptr(ifr),
|
|
|
|
sizeof(i2c));
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SIOCSIFCAP:
|
|
|
|
{
|
|
|
|
int mask, setmask;
|
|
|
|
|
|
|
|
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
|
|
|
|
setmask = 0;
|
|
|
|
#ifdef TCP_OFFLOAD
|
|
|
|
setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
|
|
|
|
#endif
|
|
|
|
setmask |= (mask & IFCAP_FLAGS);
|
|
|
|
|
2016-10-18 13:22:44 +00:00
|
|
|
if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
|
|
|
|
setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((mask & IFCAP_WOL) &&
|
|
|
|
(if_getcapabilities(ifp) & IFCAP_WOL) != 0)
|
|
|
|
setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC));
|
|
|
|
if_vlancap(ifp);
|
|
|
|
/*
|
|
|
|
* want to ensure that traffic has stopped before we change any of the flags
|
|
|
|
*/
|
|
|
|
if (setmask) {
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
bits = if_getdrvflags(ifp);
|
2016-10-18 13:22:44 +00:00
|
|
|
if (bits & IFF_DRV_RUNNING)
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_stop(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_togglecapenable(ifp, setmask);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-10-18 13:22:44 +00:00
|
|
|
if (bits & IFF_DRV_RUNNING)
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_init_locked(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_setdrvflags(ifp, bits);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SIOCGPRIVATE_0:
|
|
|
|
case SIOCSDRVSPEC:
|
|
|
|
case SIOCGDRVSPEC:
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
err = IFDI_PRIV_IOCTL(ctx, command, data);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = ether_ioctl(ifp, command, data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (reinit)
|
|
|
|
iflib_if_init(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
iflib_if_get_counter(if_t ifp, ift_counter cnt)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
return (IFDI_GET_COUNTER(ctx, cnt));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* OTHER FUNCTIONS EXPORTED TO THE STACK
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
if ((void *)ctx != arg)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((vtag == 0) || (vtag > 4095))
|
|
|
|
return;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_VLAN_REGISTER(ctx, vtag);
|
|
|
|
/* Re-init to load the changes */
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
|
2017-08-23 21:49:56 +00:00
|
|
|
iflib_if_init_locked(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
if ((void *)ctx != arg)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((vtag == 0) || (vtag > 4095))
|
|
|
|
return;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_VLAN_UNREGISTER(ctx, vtag);
|
|
|
|
/* Re-init to load the changes */
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
|
2017-08-23 21:49:56 +00:00
|
|
|
iflib_if_init_locked(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_led_func(void *arg, int onoff)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = arg;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_LED_FUNC(ctx, onoff);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* BUS FUNCTION DEFINITIONS
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_probe(device_t dev)
|
|
|
|
{
|
|
|
|
pci_vendor_info_t *ent;
|
|
|
|
|
|
|
|
uint16_t pci_vendor_id, pci_device_id;
|
|
|
|
uint16_t pci_subvendor_id, pci_subdevice_id;
|
|
|
|
uint16_t pci_rev_id;
|
|
|
|
if_shared_ctx_t sctx;
|
|
|
|
|
|
|
|
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
|
|
|
|
return (ENOTSUP);
|
|
|
|
|
|
|
|
pci_vendor_id = pci_get_vendor(dev);
|
|
|
|
pci_device_id = pci_get_device(dev);
|
|
|
|
pci_subvendor_id = pci_get_subvendor(dev);
|
|
|
|
pci_subdevice_id = pci_get_subdevice(dev);
|
|
|
|
pci_rev_id = pci_get_revid(dev);
|
|
|
|
if (sctx->isc_parse_devinfo != NULL)
|
|
|
|
sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
|
|
|
|
|
|
|
|
ent = sctx->isc_vendor_info;
|
|
|
|
while (ent->pvi_vendor_id != 0) {
|
|
|
|
if (pci_vendor_id != ent->pvi_vendor_id) {
|
|
|
|
ent++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((pci_device_id == ent->pvi_device_id) &&
|
|
|
|
((pci_subvendor_id == ent->pvi_subvendor_id) ||
|
|
|
|
(ent->pvi_subvendor_id == 0)) &&
|
|
|
|
((pci_subdevice_id == ent->pvi_subdevice_id) ||
|
|
|
|
(ent->pvi_subdevice_id == 0)) &&
|
|
|
|
((pci_rev_id == ent->pvi_rev_id) ||
|
|
|
|
(ent->pvi_rev_id == 0))) {
|
|
|
|
|
|
|
|
device_set_desc_copy(dev, ent->pvi_name);
|
|
|
|
/* this needs to be changed to zero if the bus probing code
|
|
|
|
* ever stops re-probing on best match because the sctx
|
|
|
|
* may have its values over written by register calls
|
|
|
|
* in subsequent probes
|
|
|
|
*/
|
|
|
|
return (BUS_PROBE_DEFAULT);
|
|
|
|
}
|
|
|
|
ent++;
|
|
|
|
}
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
static void
|
|
|
|
iflib_reset_qvalues(if_ctx_t ctx)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2018-05-11 20:08:28 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
2018-05-19 05:27:49 +00:00
|
|
|
int i;
|
2017-01-02 00:56:33 +00:00
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES;
|
|
|
|
scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH;
|
2016-08-12 21:29:44 +00:00
|
|
|
/*
|
|
|
|
* XXX sanity check that ntxd & nrxd are a power of 2
|
|
|
|
*/
|
|
|
|
if (ctx->ifc_sysctl_ntxqs != 0)
|
|
|
|
scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
|
|
|
|
if (ctx->ifc_sysctl_nrxqs != 0)
|
|
|
|
scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (ctx->ifc_sysctl_ntxds[i] != 0)
|
|
|
|
scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
|
|
|
|
else
|
|
|
|
scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (ctx->ifc_sysctl_nrxds[i] != 0)
|
|
|
|
scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
|
|
|
|
else
|
|
|
|
scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
|
|
|
|
device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
|
|
|
|
i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
|
|
|
|
scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
|
|
|
|
}
|
|
|
|
if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
|
|
|
|
device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
|
|
|
|
i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
|
|
|
|
scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
|
|
|
|
device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
|
|
|
|
i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
|
|
|
|
scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
|
|
|
|
}
|
|
|
|
if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
|
|
|
|
device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
|
|
|
|
i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
|
|
|
|
scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
|
|
|
|
}
|
|
|
|
}
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
|
|
|
|
{
|
|
|
|
int err, rid, msix;
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_t ifp;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
int i;
|
|
|
|
uint16_t main_txq;
|
|
|
|
uint16_t main_rxq;
|
|
|
|
|
|
|
|
|
|
|
|
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
|
|
|
|
if (sc == NULL) {
|
|
|
|
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
device_set_softc(dev, ctx);
|
|
|
|
ctx->ifc_flags |= IFC_SC_ALLOCATED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->ifc_sctx = sctx;
|
|
|
|
ctx->ifc_dev = dev;
|
|
|
|
ctx->ifc_softc = sc;
|
|
|
|
|
|
|
|
if ((err = iflib_register(ctx)) != 0) {
|
|
|
|
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
|
|
|
|
free(sc, M_IFLIB);
|
|
|
|
free(ctx, M_IFLIB);
|
|
|
|
device_printf(dev, "iflib_register failed %d\n", err);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
iflib_add_device_sysctl_pre(ctx);
|
|
|
|
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
ifp = ctx->ifc_ifp;
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
iflib_reset_qvalues(ctx);
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_LOCK(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
|
|
|
|
return (err);
|
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
_iflib_pre_assert(scctx);
|
|
|
|
ctx->ifc_txrx = *scctx->isc_txrx;
|
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
MPASS(scctx->isc_capenable);
|
|
|
|
if (scctx->isc_capenable & IFCAP_TXCSUM)
|
|
|
|
MPASS(scctx->isc_tx_csum_flags);
|
|
|
|
#endif
|
|
|
|
|
2017-08-10 03:11:05 +00:00
|
|
|
if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
|
|
|
|
if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
|
2017-01-02 00:56:33 +00:00
|
|
|
|
|
|
|
if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
|
|
|
|
scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
|
|
|
|
if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
|
|
|
|
scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifdef ACPI_DMAR
|
|
|
|
if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
|
|
|
|
ctx->ifc_flags |= IFC_DMAR;
|
2017-03-13 22:53:06 +00:00
|
|
|
#elif !(defined(__i386__) || defined(__amd64__))
|
|
|
|
/* set unconditionally for !x86 */
|
|
|
|
ctx->ifc_flags |= IFC_DMAR;
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
|
|
|
|
main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
|
|
|
/* XXX change for per-queue sizes */
|
|
|
|
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
|
|
|
|
scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
|
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_nrxd[i])) {
|
|
|
|
/* round down instead? */
|
|
|
|
device_printf(dev, "# rx descriptors must be a power of 2\n");
|
|
|
|
err = EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_ntxd[i])) {
|
|
|
|
device_printf(dev,
|
|
|
|
"# tx descriptors must be a power of 2");
|
|
|
|
err = EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION);
|
|
|
|
if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_tso_segments_max = max(1,
|
|
|
|
scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect the stack against modern hardware
|
|
|
|
*/
|
|
|
|
if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX)
|
|
|
|
scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX;
|
|
|
|
|
|
|
|
/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
|
|
|
|
ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max;
|
|
|
|
ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max;
|
|
|
|
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
|
|
|
|
if (scctx->isc_rss_table_size == 0)
|
|
|
|
scctx->isc_rss_table_size = 64;
|
2016-08-12 21:29:44 +00:00
|
|
|
scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
|
2016-11-18 04:19:21 +00:00
|
|
|
|
|
|
|
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
|
|
|
|
/* XXX format name */
|
2017-09-16 02:41:38 +00:00
|
|
|
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
|
2017-11-29 18:14:57 +00:00
|
|
|
|
2017-11-29 18:21:17 +00:00
|
|
|
/* Set up cpu set. If it fails, use the set of all CPUs. */
|
2017-11-29 18:14:57 +00:00
|
|
|
if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
|
|
|
|
device_printf(dev, "Unable to fetch CPU list\n");
|
|
|
|
CPU_COPY(&all_cpus, &ctx->ifc_cpus);
|
|
|
|
}
|
|
|
|
MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
** Now setup MSI or MSI/X, should
|
|
|
|
** return us the number of supported
|
|
|
|
** vectors. (Will be 1 for MSI)
|
|
|
|
*/
|
|
|
|
if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
|
|
|
|
msix = scctx->isc_vectors;
|
|
|
|
} else if (scctx->isc_msix_bar != 0)
|
2017-01-25 14:37:05 +00:00
|
|
|
/*
|
|
|
|
* The simple fact that isc_msix_bar is not 0 does not mean we
|
|
|
|
* we have a good value there that is known to work.
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
msix = iflib_msix_init(ctx);
|
|
|
|
else {
|
|
|
|
scctx->isc_vectors = 1;
|
|
|
|
scctx->isc_ntxqsets = 1;
|
|
|
|
scctx->isc_nrxqsets = 1;
|
|
|
|
scctx->isc_intr = IFLIB_INTR_LEGACY;
|
|
|
|
msix = 0;
|
|
|
|
}
|
|
|
|
/* Get memory for the station queues */
|
|
|
|
if ((err = iflib_queues_alloc(ctx))) {
|
|
|
|
device_printf(dev, "Unable to allocate queue memory\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-05-08 17:15:10 +00:00
|
|
|
if ((err = iflib_qset_structures_setup(ctx)))
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail_queues;
|
2017-01-26 13:50:09 +00:00
|
|
|
|
2017-01-24 16:05:42 +00:00
|
|
|
/*
|
|
|
|
* Group taskqueues aren't properly set up until SMP is started,
|
|
|
|
* so we disable interrupts until we can handle them post
|
|
|
|
* SI_SUB_SMP.
|
|
|
|
*
|
|
|
|
* XXX: disabling interrupts doesn't actually work, at least for
|
|
|
|
* the non-MSI case. When they occur before SI_SUB_SMP completes,
|
|
|
|
* we do null handling and depend on this not causing too large an
|
|
|
|
* interrupt storm.
|
|
|
|
*/
|
2017-01-02 00:56:33 +00:00
|
|
|
IFDI_INTR_DISABLE(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
|
|
|
|
goto fail_intr_free;
|
|
|
|
}
|
|
|
|
if (msix <= 1) {
|
|
|
|
rid = 0;
|
|
|
|
if (scctx->isc_intr == IFLIB_INTR_MSI) {
|
|
|
|
MPASS(msix == 1);
|
|
|
|
rid = 1;
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "iflib_legacy_setup failed %d\n", err);
|
|
|
|
goto fail_intr_free;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
|
2017-09-16 02:41:38 +00:00
|
|
|
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
|
|
|
if ((err = iflib_netmap_attach(ctx))) {
|
|
|
|
device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
|
|
|
*ctxp = ctx;
|
|
|
|
|
2018-05-06 00:57:52 +00:00
|
|
|
NETDUMP_SET(ctx->ifc_ifp, iflib);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_add_device_sysctl_post(ctx);
|
2017-01-15 00:50:10 +00:00
|
|
|
ctx->ifc_flags |= IFC_INIT_DONE;
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
fail_detach:
|
|
|
|
ether_ifdetach(ctx->ifc_ifp);
|
|
|
|
fail_intr_free:
|
|
|
|
if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI)
|
|
|
|
pci_release_msi(ctx->ifc_dev);
|
|
|
|
fail_queues:
|
2018-05-08 16:56:02 +00:00
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
fail:
|
|
|
|
IFDI_DETACH(ctx);
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
int
|
|
|
|
iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
|
|
|
|
struct iflib_cloneattach_ctx *clctx)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_t ifp;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
int i;
|
|
|
|
void *sc;
|
|
|
|
uint16_t main_txq;
|
|
|
|
uint16_t main_rxq;
|
|
|
|
|
|
|
|
ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
ctx->ifc_flags |= IFC_SC_ALLOCATED;
|
|
|
|
if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
|
|
|
|
ctx->ifc_flags |= IFC_PSEUDO;
|
|
|
|
|
|
|
|
ctx->ifc_sctx = sctx;
|
|
|
|
ctx->ifc_softc = sc;
|
|
|
|
ctx->ifc_dev = dev;
|
|
|
|
|
|
|
|
if ((err = iflib_register(ctx)) != 0) {
|
|
|
|
device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
|
|
|
|
free(sc, M_IFLIB);
|
|
|
|
free(ctx, M_IFLIB);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
iflib_add_device_sysctl_pre(ctx);
|
|
|
|
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
ifp = ctx->ifc_ifp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX sanity check that ntxd & nrxd are a power of 2
|
|
|
|
*/
|
|
|
|
iflib_reset_qvalues(ctx);
|
|
|
|
|
|
|
|
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
if (sctx->isc_flags & IFLIB_GEN_MAC)
|
|
|
|
iflib_gen_mac(ctx);
|
|
|
|
if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
|
|
|
|
clctx->cc_params)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
|
|
|
|
ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
|
|
|
|
ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO);
|
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
MPASS(scctx->isc_capenable);
|
|
|
|
if (scctx->isc_capenable & IFCAP_TXCSUM)
|
|
|
|
MPASS(scctx->isc_tx_csum_flags);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
|
|
|
|
if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
|
|
|
|
|
|
|
|
ifp->if_flags |= IFF_NOGROUP;
|
|
|
|
if (sctx->isc_flags & IFLIB_PSEUDO) {
|
|
|
|
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
|
|
|
|
|
|
|
|
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
|
|
|
*ctxp = ctx;
|
|
|
|
|
|
|
|
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
|
|
|
|
iflib_add_device_sysctl_post(ctx);
|
|
|
|
ctx->ifc_flags |= IFC_INIT_DONE;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
_iflib_pre_assert(scctx);
|
|
|
|
ctx->ifc_txrx = *scctx->isc_txrx;
|
|
|
|
|
|
|
|
if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
|
|
|
|
scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
|
|
|
|
if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
|
|
|
|
scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
|
|
|
|
|
|
|
|
main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
|
|
|
|
main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
|
|
|
|
|
|
|
|
/* XXX change for per-queue sizes */
|
|
|
|
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
|
|
|
|
scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
|
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_nrxd[i])) {
|
|
|
|
/* round down instead? */
|
|
|
|
device_printf(dev, "# rx descriptors must be a power of 2\n");
|
|
|
|
err = EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_ntxd[i])) {
|
|
|
|
device_printf(dev,
|
|
|
|
"# tx descriptors must be a power of 2");
|
|
|
|
err = EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION);
|
|
|
|
if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_tso_segments_max = max(1,
|
|
|
|
scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect the stack against modern hardware
|
|
|
|
*/
|
|
|
|
if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX)
|
|
|
|
scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX;
|
|
|
|
|
|
|
|
/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
|
|
|
|
ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max;
|
|
|
|
ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max;
|
|
|
|
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
|
|
|
|
if (scctx->isc_rss_table_size == 0)
|
|
|
|
scctx->isc_rss_table_size = 64;
|
|
|
|
scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
|
|
|
|
|
|
|
|
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
|
|
|
|
/* XXX format name */
|
|
|
|
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
|
|
|
|
|
|
|
|
/* XXX --- can support > 1 -- but keep it simple for now */
|
|
|
|
scctx->isc_intr = IFLIB_INTR_LEGACY;
|
|
|
|
|
|
|
|
/* Get memory for the station queues */
|
|
|
|
if ((err = iflib_queues_alloc(ctx))) {
|
|
|
|
device_printf(dev, "Unable to allocate queue memory\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((err = iflib_qset_structures_setup(ctx))) {
|
|
|
|
device_printf(dev, "qset structure setup failed %d\n", err);
|
|
|
|
goto fail_queues;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* XXX What if anything do we want to do about interrupts?
|
|
|
|
*/
|
|
|
|
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
|
|
|
|
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
|
|
|
/* XXX handle more than one queue */
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++)
|
|
|
|
IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
|
|
|
|
|
|
|
|
*ctxp = ctx;
|
|
|
|
|
|
|
|
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
|
|
|
|
iflib_add_device_sysctl_post(ctx);
|
|
|
|
ctx->ifc_flags |= IFC_INIT_DONE;
|
|
|
|
return (0);
|
|
|
|
fail_detach:
|
|
|
|
ether_ifdetach(ctx->ifc_ifp);
|
|
|
|
fail_queues:
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
|
|
|
fail:
|
|
|
|
IFDI_DETACH(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_pseudo_deregister(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
int i, j;
|
|
|
|
struct taskqgroup *tqg;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
|
|
|
|
/* Unregister VLAN events */
|
|
|
|
if (ctx->ifc_vlan_attach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
|
|
|
|
if (ctx->ifc_vlan_detach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
|
|
|
|
|
|
|
|
ether_ifdetach(ifp);
|
|
|
|
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
|
|
|
|
CTX_LOCK_DESTROY(ctx);
|
|
|
|
/* XXX drain any dependent tasks */
|
|
|
|
tqg = qgroup_if_io_tqg;
|
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
|
|
|
|
callout_drain(&txq->ift_timer);
|
|
|
|
if (txq->ift_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &txq->ift_task);
|
|
|
|
}
|
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
|
|
|
|
if (rxq->ifr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &rxq->ifr_task);
|
|
|
|
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
|
|
|
free(fl->ifl_rx_bitmap, M_IFLIB);
|
|
|
|
}
|
|
|
|
tqg = qgroup_if_config_tqg;
|
|
|
|
if (ctx->ifc_admin_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_admin_task);
|
|
|
|
if (ctx->ifc_vflr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
|
|
|
|
|
|
|
|
if_free(ifp);
|
|
|
|
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
|
|
|
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
|
|
|
|
free(ctx->ifc_softc, M_IFLIB);
|
|
|
|
free(ctx, M_IFLIB);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
int
|
|
|
|
iflib_device_attach(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_shared_ctx_t sctx;
|
|
|
|
|
|
|
|
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
|
|
|
|
return (ENOTSUP);
|
|
|
|
|
|
|
|
pci_enable_busmaster(dev);
|
|
|
|
|
|
|
|
return (iflib_device_register(dev, NULL, sctx, &ctx));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_deregister(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
2017-07-03 18:23:35 +00:00
|
|
|
int i, j;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct taskqgroup *tqg;
|
2017-07-03 18:23:35 +00:00
|
|
|
iflib_fl_t fl;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Make sure VLANS are not using driver */
|
|
|
|
if (if_vlantrunkinuse(ifp)) {
|
|
|
|
device_printf(dev,"Vlan in use, detach first\n");
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
ctx->ifc_in_detach = 1;
|
|
|
|
iflib_stop(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
/* Unregister VLAN events */
|
|
|
|
if (ctx->ifc_vlan_attach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
|
|
|
|
if (ctx->ifc_vlan_detach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
|
|
|
|
|
|
|
|
iflib_netmap_detach(ifp);
|
|
|
|
ether_ifdetach(ifp);
|
|
|
|
if (ctx->ifc_led_dev != NULL)
|
|
|
|
led_destroy(ctx->ifc_led_dev);
|
|
|
|
/* XXX drain any dependent tasks */
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-08-12 21:29:44 +00:00
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
callout_drain(&txq->ift_timer);
|
|
|
|
if (txq->ift_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &txq->ift_task);
|
|
|
|
}
|
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
|
|
|
|
if (rxq->ifr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &rxq->ifr_task);
|
2017-07-03 18:23:35 +00:00
|
|
|
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
|
|
|
free(fl->ifl_rx_bitmap, M_IFLIB);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_config_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ctx->ifc_admin_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_admin_task);
|
|
|
|
if (ctx->ifc_vflr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
|
2018-05-29 18:03:43 +00:00
|
|
|
CTX_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_DETACH(ctx);
|
2018-05-29 18:03:43 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
|
|
|
|
CTX_LOCK_DESTROY(ctx);
|
2016-08-12 21:29:44 +00:00
|
|
|
device_set_softc(ctx->ifc_dev, NULL);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
|
|
|
|
pci_release_msi(dev);
|
|
|
|
}
|
|
|
|
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
|
|
|
|
iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
|
|
|
|
}
|
|
|
|
if (ctx->ifc_msix_mem != NULL) {
|
|
|
|
bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
|
|
|
|
ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem);
|
|
|
|
ctx->ifc_msix_mem = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bus_generic_detach(dev);
|
|
|
|
if_free(ifp);
|
|
|
|
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
2016-08-12 21:29:44 +00:00
|
|
|
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
|
|
|
|
free(ctx->ifc_softc, M_IFLIB);
|
|
|
|
free(ctx, M_IFLIB);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_detach(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
return (iflib_device_deregister(ctx));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_suspend(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_SUSPEND(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return bus_generic_suspend(dev);
|
|
|
|
}
|
|
|
|
int
|
|
|
|
iflib_device_shutdown(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_SHUTDOWN(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return bus_generic_suspend(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_resume(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_RESUME(ctx);
|
|
|
|
iflib_init_locked(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
|
|
|
|
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
|
|
|
|
|
|
|
|
return (bus_generic_resume(dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
error = IFDI_IOV_INIT(ctx, num_vfs, params);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_device_iov_uninit(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_IOV_UNINIT(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* MODULE FUNCTION DEFINITIONS
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
/*
|
|
|
|
* - Start a fast taskqueue thread for each core
|
|
|
|
* - Start a taskqueue for control operations
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_module_init(void)
|
|
|
|
{
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_module_event_handler(module_t mod, int what, void *arg)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (what) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
if ((err = iflib_module_init()) != 0)
|
|
|
|
return (err);
|
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
return (EBUSY);
|
|
|
|
default:
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* PUBLIC FUNCTION DEFINITIONS
|
|
|
|
* ordered as in iflib.h
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
_iflib_assert(if_shared_ctx_t sctx)
|
|
|
|
{
|
|
|
|
MPASS(sctx->isc_tx_maxsize);
|
|
|
|
MPASS(sctx->isc_tx_maxsegsize);
|
|
|
|
|
|
|
|
MPASS(sctx->isc_rx_maxsize);
|
|
|
|
MPASS(sctx->isc_rx_nsegments);
|
|
|
|
MPASS(sctx->isc_rx_maxsegsize);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(sctx->isc_nrxd_min[0]);
|
|
|
|
MPASS(sctx->isc_nrxd_max[0]);
|
|
|
|
MPASS(sctx->isc_nrxd_default[0]);
|
|
|
|
MPASS(sctx->isc_ntxd_min[0]);
|
|
|
|
MPASS(sctx->isc_ntxd_max[0]);
|
|
|
|
MPASS(sctx->isc_ntxd_default[0]);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
static void
|
|
|
|
_iflib_pre_assert(if_softc_ctx_t scctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
MPASS(scctx->isc_txrx->ift_txd_encap);
|
|
|
|
MPASS(scctx->isc_txrx->ift_txd_flush);
|
|
|
|
MPASS(scctx->isc_txrx->ift_txd_credits_update);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_available);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_refill);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_flush);
|
|
|
|
}
|
2016-10-18 14:02:45 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_register(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
driver_t *driver = sctx->isc_driver;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
if_t ifp;
|
|
|
|
|
|
|
|
_iflib_assert(sctx);
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_LOCK_INIT(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
|
2016-05-18 04:35:58 +00:00
|
|
|
ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
|
|
|
|
if (ifp == NULL) {
|
|
|
|
device_printf(dev, "can not allocate ifnet structure\n");
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize our context's device specific methods
|
|
|
|
*/
|
|
|
|
kobj_init((kobj_t) ctx, (kobj_class_t) driver);
|
|
|
|
kobj_class_compile((kobj_class_t) driver);
|
|
|
|
driver->refs++;
|
|
|
|
|
|
|
|
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
|
|
|
|
if_setsoftc(ifp, ctx);
|
|
|
|
if_setdev(ifp, dev);
|
|
|
|
if_setinitfn(ifp, iflib_if_init);
|
|
|
|
if_setioctlfn(ifp, iflib_if_ioctl);
|
|
|
|
if_settransmitfn(ifp, iflib_if_transmit);
|
|
|
|
if_setqflushfn(ifp, iflib_if_qflush);
|
|
|
|
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
|
|
|
|
|
|
|
|
ctx->ifc_vlan_attach_event =
|
|
|
|
EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
|
|
|
|
EVENTHANDLER_PRI_FIRST);
|
|
|
|
ctx->ifc_vlan_detach_event =
|
|
|
|
EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
|
|
|
|
EVENTHANDLER_PRI_FIRST);
|
|
|
|
|
|
|
|
ifmedia_init(&ctx->ifc_media, IFM_IMASK,
|
|
|
|
iflib_media_change, iflib_media_status);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_queues_alloc(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
device_t dev = ctx->ifc_dev;
|
2016-08-12 21:29:44 +00:00
|
|
|
int nrxqsets = scctx->isc_nrxqsets;
|
|
|
|
int ntxqsets = scctx->isc_ntxqsets;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
iflib_fl_t fl = NULL;
|
2016-08-12 21:29:44 +00:00
|
|
|
int i, j, cpu, err, txconf, rxconf;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_info_t ifdip;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint32_t *rxqsizes = scctx->isc_rxqsizes;
|
|
|
|
uint32_t *txqsizes = scctx->isc_txqsizes;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t nrxqs = sctx->isc_nrxqs;
|
|
|
|
uint8_t ntxqs = sctx->isc_ntxqs;
|
|
|
|
int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
|
|
|
|
caddr_t *vaddrs;
|
|
|
|
uint64_t *paddrs;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
|
|
|
|
KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Allocate the TX ring struct memory */
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
if (!(ctx->ifc_txqs =
|
2018-01-21 15:42:36 +00:00
|
|
|
(iflib_txq_t) malloc(sizeof(struct iflib_txq) *
|
|
|
|
ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate TX ring memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now allocate the RX */
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
if (!(ctx->ifc_rxqs =
|
2018-01-21 15:42:36 +00:00
|
|
|
(iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
|
|
|
|
nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate RX ring memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto rx_fail;
|
|
|
|
}
|
|
|
|
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
txq = ctx->ifc_txqs;
|
|
|
|
rxq = ctx->ifc_rxqs;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX handle allocation failure
|
|
|
|
*/
|
2016-07-06 14:09:49 +00:00
|
|
|
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Set up some basics */
|
|
|
|
|
|
|
|
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
|
|
|
|
device_printf(dev, "failed to allocate iflib_dma_info\n");
|
|
|
|
err = ENOMEM;
|
2016-06-07 20:26:00 +00:00
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
txq->ift_ifdi = ifdip;
|
|
|
|
for (j = 0; j < ntxqs; j++, ifdip++) {
|
|
|
|
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
|
|
|
|
device_printf(dev, "Unable to allocate Descriptor memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
txq->ift_txd_size[j] = scctx->isc_txd_size[j];
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
|
|
|
|
}
|
|
|
|
txq->ift_ctx = ctx;
|
|
|
|
txq->ift_id = i;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
|
|
|
|
txq->ift_br_offset = 1;
|
|
|
|
} else {
|
|
|
|
txq->ift_br_offset = 0;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
/* XXX fix this */
|
2016-07-06 14:09:49 +00:00
|
|
|
txq->ift_timer.c_cpu = cpu;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (iflib_txsd_alloc(txq)) {
|
|
|
|
device_printf(dev, "Critical Failure setting up TX buffers\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the TX lock */
|
|
|
|
snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
|
|
|
|
device_get_nameunit(dev), txq->ift_id);
|
|
|
|
mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
|
|
|
|
callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
|
|
|
|
|
|
|
|
snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
|
|
|
|
device_get_nameunit(dev), txq->ift_id);
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
|
|
|
|
iflib_txq_can_drain, M_IFLIB, M_WAITOK);
|
|
|
|
if (err) {
|
|
|
|
/* XXX free any allocated rings */
|
|
|
|
device_printf(dev, "Unable to allocate buf_ring\n");
|
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
|
|
|
|
/* Set up some basics */
|
|
|
|
|
|
|
|
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
|
|
|
|
device_printf(dev, "failed to allocate iflib_dma_info\n");
|
|
|
|
err = ENOMEM;
|
2016-06-07 20:26:00 +00:00
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rxq->ifr_ifdi = ifdip;
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX this needs to be changed if #rx queues != #tx queues */
|
|
|
|
rxq->ifr_ntxqirq = 1;
|
|
|
|
rxq->ifr_txqid[0] = i;
|
2016-05-18 04:35:58 +00:00
|
|
|
for (j = 0; j < nrxqs; j++, ifdip++) {
|
|
|
|
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
|
|
|
|
device_printf(dev, "Unable to allocate Descriptor memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
|
|
|
bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
|
|
|
|
}
|
|
|
|
rxq->ifr_ctx = ctx;
|
|
|
|
rxq->ifr_id = i;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
|
|
|
|
rxq->ifr_fl_offset = 1;
|
2016-05-18 04:35:58 +00:00
|
|
|
} else {
|
2016-08-12 21:29:44 +00:00
|
|
|
rxq->ifr_fl_offset = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
rxq->ifr_nfl = nfree_lists;
|
|
|
|
if (!(fl =
|
2018-01-21 15:42:36 +00:00
|
|
|
(iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate free list memory\n");
|
|
|
|
err = ENOMEM;
|
2016-06-07 20:26:00 +00:00
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
rxq->ifr_fl = fl;
|
|
|
|
for (j = 0; j < nfree_lists; j++) {
|
2017-03-13 22:53:06 +00:00
|
|
|
fl[j].ifl_rxq = rxq;
|
|
|
|
fl[j].ifl_id = j;
|
|
|
|
fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
|
|
|
|
fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
/* Allocate receive buffers for the ring*/
|
|
|
|
if (iflib_rxsd_alloc(rxq)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Critical Failure setting up receive buffers\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
2017-07-03 18:23:35 +00:00
|
|
|
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
|
|
|
fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* TXQs */
|
|
|
|
vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
|
|
|
|
paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
|
|
|
|
for (i = 0; i < ntxqsets; i++) {
|
|
|
|
iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
|
|
|
|
|
|
|
|
for (j = 0; j < ntxqs; j++, di++) {
|
|
|
|
vaddrs[i*ntxqs + j] = di->idi_vaddr;
|
|
|
|
paddrs[i*ntxqs + j] = di->idi_paddr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
|
|
|
|
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
|
|
|
|
/* RXQs */
|
|
|
|
vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
|
|
|
|
paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
|
|
|
|
for (i = 0; i < nrxqsets; i++) {
|
|
|
|
iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
|
|
|
|
|
|
|
|
for (j = 0; j < nrxqs; j++, di++) {
|
|
|
|
vaddrs[i*nrxqs + j] = di->idi_vaddr;
|
|
|
|
paddrs[i*nrxqs + j] = di->idi_paddr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
|
|
|
|
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/* XXX handle allocation failure changes */
|
|
|
|
err_rx_desc:
|
|
|
|
err_tx_desc:
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
rx_fail:
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ctx->ifc_rxqs != NULL)
|
|
|
|
free(ctx->ifc_rxqs, M_IFLIB);
|
|
|
|
ctx->ifc_rxqs = NULL;
|
|
|
|
if (ctx->ifc_txqs != NULL)
|
|
|
|
free(ctx->ifc_txqs, M_IFLIB);
|
|
|
|
ctx->ifc_txqs = NULL;
|
|
|
|
fail:
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_tx_structures_setup(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
|
|
|
|
iflib_txq_setup(txq);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_tx_structures_free(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
|
|
|
|
iflib_txq_destroy(txq);
|
|
|
|
for (j = 0; j < ctx->ifc_nhwtxqs; j++)
|
|
|
|
iflib_dma_free(&txq->ift_ifdi[j]);
|
|
|
|
}
|
|
|
|
free(ctx->ifc_txqs, M_IFLIB);
|
|
|
|
ctx->ifc_txqs = NULL;
|
|
|
|
IFDI_QUEUES_FREE(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize all receive rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
iflib_rx_structures_setup(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
2016-05-18 14:18:03 +00:00
|
|
|
int q;
|
|
|
|
#if defined(INET6) || defined(INET)
|
|
|
|
int i, err;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2016-05-18 04:35:58 +00:00
|
|
|
tcp_lro_free(&rxq->ifr_lc);
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
|
|
|
|
TCP_LRO_ENTRIES, min(1024,
|
|
|
|
ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
rxq->ifr_lro_enabled = TRUE;
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
|
|
|
|
}
|
|
|
|
return (0);
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2016-05-18 04:35:58 +00:00
|
|
|
fail:
|
|
|
|
/*
|
|
|
|
* Free RX software descriptors allocated so far, we will only handle
|
|
|
|
* the rings that completed, the failing case will have
|
|
|
|
* cleaned up for itself. 'q' failed, so its the terminus.
|
|
|
|
*/
|
|
|
|
rxq = ctx->ifc_rxqs;
|
|
|
|
for (i = 0; i < q; ++i, rxq++) {
|
|
|
|
iflib_rx_sds_free(rxq);
|
|
|
|
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
|
|
|
|
}
|
|
|
|
return (err);
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free all receive rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
iflib_rx_structures_free(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_rx_sds_free(rxq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_qset_structures_setup(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-05-08 16:56:02 +00:00
|
|
|
/*
|
|
|
|
* It is expected that the caller takes care of freeing queues if this
|
|
|
|
* fails.
|
|
|
|
*/
|
2018-05-08 17:15:10 +00:00
|
|
|
if ((err = iflib_tx_structures_setup(ctx)) != 0) {
|
|
|
|
device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
2018-05-08 17:15:10 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-08 16:56:02 +00:00
|
|
|
if ((err = iflib_rx_structures_setup(ctx)) != 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
|
2018-05-08 16:56:02 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
|
2018-05-29 21:56:39 +00:00
|
|
|
driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
|
|
|
|
}
|
|
|
|
|
2017-12-20 01:03:34 +00:00
|
|
|
#ifdef SMP
|
2016-10-18 13:12:19 +00:00
|
|
|
static int
|
2017-12-20 01:03:34 +00:00
|
|
|
find_nth(if_ctx_t ctx, int qid)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuset_t cpus;
|
2016-10-18 13:12:19 +00:00
|
|
|
int i, cpuid, eqid, count;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-12-20 01:03:34 +00:00
|
|
|
CPU_COPY(&ctx->ifc_cpus, &cpus);
|
|
|
|
count = CPU_COUNT(&cpus);
|
2016-10-18 13:12:19 +00:00
|
|
|
eqid = qid % count;
|
2016-05-18 04:35:58 +00:00
|
|
|
/* clear up to the qid'th bit */
|
2016-10-18 13:12:19 +00:00
|
|
|
for (i = 0; i < eqid; i++) {
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuid = CPU_FFS(&cpus);
|
2016-10-18 13:12:19 +00:00
|
|
|
MPASS(cpuid != 0);
|
2017-12-20 01:03:34 +00:00
|
|
|
CPU_CLR(cpuid-1, &cpus);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuid = CPU_FFS(&cpus);
|
2016-10-18 13:12:19 +00:00
|
|
|
MPASS(cpuid != 0);
|
|
|
|
return (cpuid-1);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-12-20 01:03:34 +00:00
|
|
|
#ifdef SCHED_ULE
|
|
|
|
extern struct cpu_group *cpu_top; /* CPU topology */
|
|
|
|
|
|
|
|
static int
|
|
|
|
find_child_with_core(int cpu, struct cpu_group *grp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (grp->cg_children == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
MPASS(grp->cg_child);
|
|
|
|
for (i = 0; i < grp->cg_children; i++) {
|
|
|
|
if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-04-18 15:34:18 +00:00
|
|
|
* Find the nth "close" core to the specified core
|
|
|
|
* "close" is defined as the deepest level that shares
|
|
|
|
* at least an L2 cache. With threads, this will be
|
|
|
|
* threads on the same core. If the sahred cache is L3
|
|
|
|
* or higher, simply returns the same core.
|
2017-12-20 01:03:34 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-04-18 15:34:18 +00:00
|
|
|
find_close_core(int cpu, int core_offset)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
|
|
|
struct cpu_group *grp;
|
|
|
|
int i;
|
2018-04-18 15:34:18 +00:00
|
|
|
int fcpu;
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuset_t cs;
|
|
|
|
|
|
|
|
grp = cpu_top;
|
|
|
|
if (grp == NULL)
|
|
|
|
return cpu;
|
|
|
|
i = 0;
|
|
|
|
while ((i = find_child_with_core(cpu, grp)) != -1) {
|
|
|
|
/* If the child only has one cpu, don't descend */
|
|
|
|
if (grp->cg_child[i].cg_count <= 1)
|
|
|
|
break;
|
|
|
|
grp = &grp->cg_child[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If they don't share at least an L2 cache, use the same CPU */
|
|
|
|
if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
|
|
|
|
return cpu;
|
|
|
|
|
|
|
|
/* Now pick one */
|
|
|
|
CPU_COPY(&grp->cg_mask, &cs);
|
2018-04-18 15:34:18 +00:00
|
|
|
|
|
|
|
/* Add the selected CPU offset to core offset. */
|
|
|
|
for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
|
|
|
|
if (fcpu - 1 == cpu)
|
|
|
|
break;
|
|
|
|
CPU_CLR(fcpu - 1, &cs);
|
|
|
|
}
|
|
|
|
MPASS(fcpu);
|
|
|
|
|
|
|
|
core_offset += i;
|
|
|
|
|
|
|
|
CPU_COPY(&grp->cg_mask, &cs);
|
|
|
|
for (i = core_offset % grp->cg_count; i > 0; i--) {
|
2017-12-20 01:03:34 +00:00
|
|
|
MPASS(CPU_FFS(&cs));
|
|
|
|
CPU_CLR(CPU_FFS(&cs) - 1, &cs);
|
|
|
|
}
|
|
|
|
MPASS(CPU_FFS(&cs));
|
|
|
|
return CPU_FFS(&cs) - 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int
|
2018-04-18 15:34:18 +00:00
|
|
|
find_close_core(int cpu, int core_offset __unused)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
2017-12-21 23:05:13 +00:00
|
|
|
return cpu;
|
2017-12-20 01:03:34 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int
|
2018-04-18 15:34:18 +00:00
|
|
|
get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case IFLIB_INTR_TX:
|
2018-04-18 15:34:18 +00:00
|
|
|
/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
|
|
|
|
/* XXX handle multiple RX threads per core and more than two core per L2 group */
|
2017-12-20 01:03:34 +00:00
|
|
|
return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
|
|
|
|
case IFLIB_INTR_RX:
|
|
|
|
case IFLIB_INTR_RXTX:
|
2018-04-18 15:34:18 +00:00
|
|
|
/* RX queues get the specified core */
|
2017-12-20 01:03:34 +00:00
|
|
|
return qid / CPU_COUNT(&ctx->ifc_cpus);
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2018-04-18 15:34:18 +00:00
|
|
|
#define get_core_offset(ctx, type, qid) CPU_FIRST()
|
|
|
|
#define find_close_core(cpuid, tid) CPU_FIRST()
|
2017-12-20 01:03:34 +00:00
|
|
|
#define find_nth(ctx, gid) CPU_FIRST()
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Just to avoid copy/paste */
|
|
|
|
static inline int
|
|
|
|
iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid,
|
2018-05-29 21:56:39 +00:00
|
|
|
struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
|
|
|
int cpuid;
|
|
|
|
int err, tid;
|
|
|
|
|
|
|
|
cpuid = find_nth(ctx, qid);
|
2018-04-18 15:34:18 +00:00
|
|
|
tid = get_core_offset(ctx, type, qid);
|
2017-12-20 01:03:34 +00:00
|
|
|
MPASS(tid >= 0);
|
2018-04-18 15:34:18 +00:00
|
|
|
cpuid = find_close_core(cpuid, tid);
|
2017-12-20 01:03:34 +00:00
|
|
|
err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name);
|
|
|
|
if (err) {
|
|
|
|
device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
#ifdef notyet
|
|
|
|
if (cpuid > ctx->ifc_cpuid_highest)
|
|
|
|
ctx->ifc_cpuid_highest = cpuid;
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
int
|
|
|
|
iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
|
2018-05-29 21:56:39 +00:00
|
|
|
iflib_intr_type_t type, driver_filter_t *filter,
|
|
|
|
void *filter_arg, int qid, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
struct grouptask *gtask;
|
|
|
|
struct taskqgroup *tqg;
|
|
|
|
iflib_filter_info_t info;
|
2016-08-12 21:29:44 +00:00
|
|
|
gtask_fn_t *fn;
|
2017-12-20 01:03:34 +00:00
|
|
|
int tqrid, err;
|
2017-03-13 22:53:06 +00:00
|
|
|
driver_filter_t *intr_fast;
|
2016-05-18 04:35:58 +00:00
|
|
|
void *q;
|
|
|
|
|
|
|
|
info = &ctx->ifc_filter_info;
|
2016-10-18 13:29:30 +00:00
|
|
|
tqrid = rid;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
/* XXX merge tx/rx for netmap? */
|
|
|
|
case IFLIB_INTR_TX:
|
|
|
|
q = &ctx->ifc_txqs[qid];
|
|
|
|
info = &ctx->ifc_txqs[qid].ift_filter_info;
|
|
|
|
gtask = &ctx->ifc_txqs[qid].ift_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_tx;
|
2017-03-13 22:53:06 +00:00
|
|
|
intr_fast = iflib_fast_intr;
|
2016-11-18 04:19:21 +00:00
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
2018-05-16 21:03:22 +00:00
|
|
|
ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case IFLIB_INTR_RX:
|
|
|
|
q = &ctx->ifc_rxqs[qid];
|
|
|
|
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
|
|
|
|
gtask = &ctx->ifc_rxqs[qid].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_rx;
|
2017-09-16 02:41:38 +00:00
|
|
|
intr_fast = iflib_fast_intr;
|
2017-03-13 22:53:06 +00:00
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
|
|
|
break;
|
|
|
|
case IFLIB_INTR_RXTX:
|
|
|
|
q = &ctx->ifc_rxqs[qid];
|
|
|
|
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
|
|
|
|
gtask = &ctx->ifc_rxqs[qid].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2017-03-13 22:53:06 +00:00
|
|
|
fn = _task_fn_rx;
|
|
|
|
intr_fast = iflib_fast_intr_rxtx;
|
2016-11-18 04:19:21 +00:00
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case IFLIB_INTR_ADMIN:
|
|
|
|
q = ctx;
|
2016-11-18 04:19:21 +00:00
|
|
|
tqrid = -1;
|
2016-05-18 04:35:58 +00:00
|
|
|
info = &ctx->ifc_filter_info;
|
|
|
|
gtask = &ctx->ifc_admin_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_config_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_admin;
|
2017-03-13 22:53:06 +00:00
|
|
|
intr_fast = iflib_fast_intr_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("unknown net intr type");
|
|
|
|
}
|
|
|
|
|
|
|
|
info->ifi_filter = filter;
|
|
|
|
info->ifi_filter_arg = filter_arg;
|
|
|
|
info->ifi_task = gtask;
|
2017-03-13 22:53:06 +00:00
|
|
|
info->ifi_ctx = q;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
|
2016-11-18 04:19:21 +00:00
|
|
|
if (err != 0) {
|
|
|
|
device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
2016-11-18 04:19:21 +00:00
|
|
|
}
|
|
|
|
if (type == IFLIB_INTR_ADMIN)
|
|
|
|
return (0);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
if (tqrid != -1) {
|
2017-12-20 01:03:34 +00:00
|
|
|
err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name);
|
|
|
|
if (err)
|
|
|
|
return (err);
|
2016-10-18 13:12:19 +00:00
|
|
|
} else {
|
2017-10-05 14:43:30 +00:00
|
|
|
taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
|
2016-10-18 13:12:19 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-05-29 21:56:39 +00:00
|
|
|
iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
struct grouptask *gtask;
|
|
|
|
struct taskqgroup *tqg;
|
2016-08-12 21:29:44 +00:00
|
|
|
gtask_fn_t *fn;
|
2016-05-18 04:35:58 +00:00
|
|
|
void *q;
|
2017-10-05 14:43:30 +00:00
|
|
|
int irq_num = -1;
|
2017-12-20 01:03:34 +00:00
|
|
|
int err;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case IFLIB_INTR_TX:
|
|
|
|
q = &ctx->ifc_txqs[qid];
|
|
|
|
gtask = &ctx->ifc_txqs[qid].ift_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_tx;
|
2017-10-05 14:43:30 +00:00
|
|
|
if (irq != NULL)
|
|
|
|
irq_num = rman_get_start(irq->ii_res);
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case IFLIB_INTR_RX:
|
|
|
|
q = &ctx->ifc_rxqs[qid];
|
|
|
|
gtask = &ctx->ifc_rxqs[qid].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_rx;
|
2017-10-05 14:43:30 +00:00
|
|
|
if (irq != NULL)
|
|
|
|
irq_num = rman_get_start(irq->ii_res);
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case IFLIB_INTR_IOV:
|
|
|
|
q = ctx;
|
|
|
|
gtask = &ctx->ifc_vflr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_config_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_iov;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("unknown net intr type");
|
|
|
|
}
|
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
2017-12-20 01:03:34 +00:00
|
|
|
if (irq_num != -1) {
|
|
|
|
err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name);
|
|
|
|
if (err)
|
|
|
|
taskqgroup_attach(tqg, gtask, q, irq_num, name);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
taskqgroup_attach(tqg, gtask, q, irq_num, name);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
|
|
|
|
{
|
|
|
|
if (irq->ii_tag)
|
|
|
|
bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
|
|
|
|
|
|
|
|
if (irq->ii_res)
|
|
|
|
bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-05-29 21:56:39 +00:00
|
|
|
iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
|
|
|
if_irq_t irq = &ctx->ifc_legacy_irq;
|
|
|
|
iflib_filter_info_t info;
|
|
|
|
struct grouptask *gtask;
|
|
|
|
struct taskqgroup *tqg;
|
2016-08-12 21:29:44 +00:00
|
|
|
gtask_fn_t *fn;
|
2016-05-18 04:35:58 +00:00
|
|
|
int tqrid;
|
|
|
|
void *q;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
q = &ctx->ifc_rxqs[0];
|
|
|
|
info = &rxq[0].ifr_filter_info;
|
|
|
|
gtask = &rxq[0].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
tqrid = irq->ii_rid = *rid;
|
|
|
|
fn = _task_fn_rx;
|
|
|
|
|
|
|
|
ctx->ifc_flags |= IFC_LEGACY;
|
|
|
|
info->ifi_filter = filter;
|
|
|
|
info->ifi_filter_arg = filter_arg;
|
|
|
|
info->ifi_task = gtask;
|
2017-01-15 00:50:10 +00:00
|
|
|
info->ifi_ctx = ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* We allocate a single interrupt resource */
|
2017-03-13 22:53:06 +00:00
|
|
|
if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
2017-12-27 20:42:30 +00:00
|
|
|
taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
|
2017-12-27 20:42:30 +00:00
|
|
|
taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx");
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_led_create(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
|
2017-03-13 22:53:06 +00:00
|
|
|
device_get_nameunit(ctx->ifc_dev));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
|
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
|
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_admin_intr_deferred(if_ctx_t ctx)
|
|
|
|
{
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
struct grouptask *gtask;
|
|
|
|
|
|
|
|
gtask = &ctx->ifc_admin_task;
|
2017-09-20 20:40:49 +00:00
|
|
|
MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
|
2017-01-02 00:56:33 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_iov_intr_deferred(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
|
|
|
|
{
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-05-03 17:02:31 +00:00
|
|
|
iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
|
|
|
|
const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_INIT(gtask, 0, fn, ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-08-12 21:29:44 +00:00
|
|
|
iflib_config_gtask_deinit(struct grouptask *gtask)
|
|
|
|
{
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
taskqgroup_detach(qgroup_if_config_tqg, gtask);
|
2016-08-12 21:29:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
|
|
|
|
if_setbaudrate(ifp, baudrate);
|
2018-04-12 14:35:37 +00:00
|
|
|
if (baudrate >= IF_Gbps(10)) {
|
|
|
|
STATE_LOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
ctx->ifc_flags |= IFC_PREFETCH;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
/* If link down, disable watchdog */
|
|
|
|
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
|
|
|
|
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
|
|
|
}
|
|
|
|
ctx->ifc_link_state = link_state;
|
|
|
|
if_link_state_change(ifp, link_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
int credits;
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int credits_pre = txq->ift_cidx_processed;
|
2017-03-13 22:53:06 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (ctx->isc_txd_credits_update == NULL)
|
|
|
|
return (0);
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
txq->ift_processed += credits;
|
|
|
|
txq->ift_cidx_processed += credits;
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
MPASS(credits_pre + credits == txq->ift_cidx_processed);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (txq->ift_cidx_processed >= txq->ift_size)
|
|
|
|
txq->ift_cidx_processed -= txq->ift_size;
|
|
|
|
return (credits);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-03-13 22:53:06 +00:00
|
|
|
iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
|
|
|
|
budget));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
|
|
|
|
const char *description, if_int_delay_info_t info,
|
|
|
|
int offset, int value)
|
|
|
|
{
|
|
|
|
info->iidi_ctx = ctx;
|
|
|
|
info->iidi_offset = offset;
|
|
|
|
info->iidi_value = value;
|
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
|
|
|
|
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
|
|
|
|
info, 0, iflib_sysctl_int_delay, "I", description);
|
|
|
|
}
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
struct sx *
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_ctx_lock_get(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
return (&ctx->ifc_ctx_sx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_msix_init(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
|
|
|
|
int iflib_num_tx_queues, iflib_num_rx_queues;
|
|
|
|
int err, admincnt, bar;
|
|
|
|
|
2017-11-16 18:52:58 +00:00
|
|
|
iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
|
|
|
|
iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
|
|
|
|
|
|
|
|
device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
bar = ctx->ifc_softc_ctx.isc_msix_bar;
|
|
|
|
admincnt = sctx->isc_admin_intrcnt;
|
2017-10-30 21:08:12 +00:00
|
|
|
/* Override by global tuneable */
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t len = sizeof(i);
|
|
|
|
err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0);
|
|
|
|
if (err == 0) {
|
|
|
|
if (i == 0)
|
|
|
|
goto msi;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
device_printf(dev, "unable to read hw.pci.enable_msix.");
|
|
|
|
}
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Override by tuneable */
|
2017-04-04 21:03:34 +00:00
|
|
|
if (scctx->isc_disable_msix)
|
2016-05-18 04:35:58 +00:00
|
|
|
goto msi;
|
|
|
|
|
|
|
|
/*
|
|
|
|
** When used in a virtualized environment
|
|
|
|
** PCI BUSMASTER capability may not be set
|
|
|
|
** so explicity set it here and rewrite
|
|
|
|
** the ENABLE in the MSIX control register
|
|
|
|
** at this point to cause the host to
|
|
|
|
** successfully initialize us.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
int msix_ctrl, rid;
|
|
|
|
|
2017-01-27 22:30:27 +00:00
|
|
|
pci_enable_busmaster(dev);
|
2017-01-25 14:37:05 +00:00
|
|
|
rid = 0;
|
|
|
|
if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) {
|
|
|
|
rid += PCIR_MSIX_CTRL;
|
|
|
|
msix_ctrl = pci_read_config(dev, rid, 2);
|
|
|
|
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
|
|
|
|
pci_write_config(dev, rid, msix_ctrl, 2);
|
|
|
|
} else {
|
|
|
|
device_printf(dev, "PCIY_MSIX capability not found; "
|
|
|
|
"or rid %d == 0.\n", rid);
|
|
|
|
goto msi;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bar == -1 => "trust me I know what I'm doing"
|
|
|
|
* Some drivers are for hardware that is so shoddily
|
|
|
|
* documented that no one knows which bars are which
|
|
|
|
* so the developer has to map all bars. This hack
|
|
|
|
* allows shoddy garbage to use msix in this framework.
|
|
|
|
*/
|
|
|
|
if (bar != -1) {
|
|
|
|
ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
|
|
|
|
SYS_RES_MEMORY, &bar, RF_ACTIVE);
|
|
|
|
if (ctx->ifc_msix_mem == NULL) {
|
|
|
|
/* May not be enabled */
|
|
|
|
device_printf(dev, "Unable to map MSIX table \n");
|
|
|
|
goto msi;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* First try MSI/X */
|
|
|
|
if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */
|
|
|
|
device_printf(dev, "System has MSIX disabled \n");
|
|
|
|
bus_release_resource(dev, SYS_RES_MEMORY,
|
|
|
|
bar, ctx->ifc_msix_mem);
|
|
|
|
ctx->ifc_msix_mem = NULL;
|
|
|
|
goto msi;
|
|
|
|
}
|
|
|
|
#if IFLIB_DEBUG
|
|
|
|
/* use only 1 qset in debug mode */
|
|
|
|
queuemsgs = min(msgs - admincnt, 1);
|
|
|
|
#else
|
|
|
|
queuemsgs = msgs - admincnt;
|
|
|
|
#endif
|
|
|
|
#ifdef RSS
|
2017-11-29 18:14:57 +00:00
|
|
|
queues = imin(queuemsgs, rss_getnumbuckets());
|
2016-05-18 04:35:58 +00:00
|
|
|
#else
|
2017-11-29 18:14:57 +00:00
|
|
|
queues = queuemsgs;
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
2017-11-29 18:14:57 +00:00
|
|
|
queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
|
|
|
|
device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n",
|
|
|
|
CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifdef RSS
|
|
|
|
/* If we're doing RSS, clamp at the number of RSS buckets */
|
|
|
|
if (queues > rss_getnumbuckets())
|
|
|
|
queues = rss_getnumbuckets();
|
|
|
|
#endif
|
2016-08-12 21:29:44 +00:00
|
|
|
if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
|
|
|
|
rx_queues = iflib_num_rx_queues;
|
2016-05-18 04:35:58 +00:00
|
|
|
else
|
|
|
|
rx_queues = queues;
|
2017-11-16 18:52:58 +00:00
|
|
|
|
|
|
|
if (rx_queues > scctx->isc_nrxqsets)
|
|
|
|
rx_queues = scctx->isc_nrxqsets;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
/*
|
|
|
|
* We want this to be all logical CPUs by default
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
|
|
|
|
tx_queues = iflib_num_tx_queues;
|
|
|
|
else
|
2016-08-12 21:29:44 +00:00
|
|
|
tx_queues = mp_ncpus;
|
|
|
|
|
2017-11-16 18:52:58 +00:00
|
|
|
if (tx_queues > scctx->isc_ntxqsets)
|
|
|
|
tx_queues = scctx->isc_ntxqsets;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
if (ctx->ifc_sysctl_qs_eq_override == 0) {
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (tx_queues != rx_queues)
|
|
|
|
device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
|
|
|
|
min(rx_queues, tx_queues), min(rx_queues, tx_queues));
|
|
|
|
#endif
|
|
|
|
tx_queues = min(rx_queues, tx_queues);
|
|
|
|
rx_queues = min(rx_queues, tx_queues);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
vectors = rx_queues + admincnt;
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Using MSIX interrupts with %d vectors\n", vectors);
|
|
|
|
scctx->isc_vectors = vectors;
|
|
|
|
scctx->isc_nrxqsets = rx_queues;
|
|
|
|
scctx->isc_ntxqsets = tx_queues;
|
|
|
|
scctx->isc_intr = IFLIB_INTR_MSIX;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
return (vectors);
|
|
|
|
} else {
|
|
|
|
device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
|
|
|
|
}
|
|
|
|
msi:
|
|
|
|
vectors = pci_msi_count(dev);
|
|
|
|
scctx->isc_nrxqsets = 1;
|
|
|
|
scctx->isc_ntxqsets = 1;
|
|
|
|
scctx->isc_vectors = vectors;
|
|
|
|
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
|
|
|
|
device_printf(dev,"Using an MSI interrupt\n");
|
|
|
|
scctx->isc_intr = IFLIB_INTR_MSI;
|
|
|
|
} else {
|
|
|
|
device_printf(dev,"Using a Legacy interrupt\n");
|
|
|
|
scctx->isc_intr = IFLIB_INTR_LEGACY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (vectors);
|
|
|
|
}
|
|
|
|
|
|
|
|
char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
|
|
|
|
|
|
|
|
static int
|
|
|
|
mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
uint16_t *state = ((uint16_t *)oidp->oid_arg1);
|
|
|
|
struct sbuf *sb;
|
|
|
|
char *ring_state = "UNKNOWN";
|
|
|
|
|
|
|
|
/* XXX needed ? */
|
|
|
|
rc = sysctl_wire_old_buffer(req, 0);
|
|
|
|
MPASS(rc == 0);
|
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
|
|
|
sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
|
|
|
|
MPASS(sb != NULL);
|
|
|
|
if (sb == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
if (state[3] <= 3)
|
|
|
|
ring_state = ring_states[state[3]];
|
|
|
|
|
|
|
|
sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
|
|
|
|
state[0], state[1], state[2], ring_state);
|
|
|
|
rc = sbuf_finish(sb);
|
|
|
|
sbuf_delete(sb);
|
|
|
|
return(rc);
|
|
|
|
}
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
enum iflib_ndesc_handler {
|
|
|
|
IFLIB_NTXD_HANDLER,
|
|
|
|
IFLIB_NRXD_HANDLER,
|
|
|
|
};
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
static int
|
|
|
|
mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = (void *)arg1;
|
|
|
|
enum iflib_ndesc_handler type = arg2;
|
|
|
|
char buf[256] = {0};
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t *ndesc;
|
2016-08-12 21:29:44 +00:00
|
|
|
char *p, *next;
|
|
|
|
int nqs, rc, i;
|
|
|
|
|
|
|
|
MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
|
|
|
|
|
|
|
|
nqs = 8;
|
|
|
|
switch(type) {
|
|
|
|
case IFLIB_NTXD_HANDLER:
|
|
|
|
ndesc = ctx->ifc_sysctl_ntxds;
|
|
|
|
if (ctx->ifc_sctx)
|
|
|
|
nqs = ctx->ifc_sctx->isc_ntxqs;
|
|
|
|
break;
|
|
|
|
case IFLIB_NRXD_HANDLER:
|
|
|
|
ndesc = ctx->ifc_sysctl_nrxds;
|
|
|
|
if (ctx->ifc_sctx)
|
|
|
|
nqs = ctx->ifc_sctx->isc_nrxqs;
|
|
|
|
break;
|
2018-05-04 18:57:05 +00:00
|
|
|
default:
|
|
|
|
panic("unhandled type");
|
2016-08-12 21:29:44 +00:00
|
|
|
}
|
|
|
|
if (nqs == 0)
|
|
|
|
nqs = 8;
|
|
|
|
|
|
|
|
for (i=0; i<8; i++) {
|
|
|
|
if (i >= nqs)
|
|
|
|
break;
|
|
|
|
if (i)
|
|
|
|
strcat(buf, ",");
|
|
|
|
sprintf(strchr(buf, 0), "%d", ndesc[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
|
|
|
|
if (rc || req->newptr == NULL)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
|
|
|
|
i++, p = strsep(&next, " ,")) {
|
|
|
|
ndesc[i] = strtoul(p, NULL, 10);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(rc);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#define NAME_BUFLEN 32
|
|
|
|
static void
|
|
|
|
iflib_add_device_sysctl_pre(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
struct sysctl_oid_list *child, *oid_list;
|
|
|
|
struct sysctl_ctx_list *ctx_list;
|
|
|
|
struct sysctl_oid *node;
|
|
|
|
|
|
|
|
ctx_list = device_get_sysctl_ctx(dev);
|
|
|
|
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
|
|
|
|
ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
|
|
|
|
CTLFLAG_RD, NULL, "IFLIB fields");
|
|
|
|
oid_list = SYSCTL_CHILDREN(node);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
|
|
|
|
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
|
|
|
|
"driver version");
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
|
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
|
|
|
|
"# of txqs to use, 0 => use default #");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
|
2016-08-12 21:29:44 +00:00
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
|
|
|
|
"# of rxqs to use, 0 => use default #");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
|
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
|
|
|
|
"permit #txq != #rxq");
|
2017-09-23 01:37:01 +00:00
|
|
|
SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
|
2017-04-04 21:03:34 +00:00
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
|
|
|
|
"disable MSIX (default 0)");
|
2017-09-23 01:37:01 +00:00
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
|
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
|
|
|
|
"set the rx budget");
|
2016-08-12 21:29:44 +00:00
|
|
|
|
|
|
|
/* XXX change for per-queue sizes */
|
|
|
|
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
|
|
|
|
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
|
|
|
|
mp_ndesc_handler, "A",
|
|
|
|
"list of # of tx descriptors to use, 0 = use default #");
|
|
|
|
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
|
|
|
|
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
|
|
|
|
mp_ndesc_handler, "A",
|
|
|
|
"list of # of rx descriptors to use, 0 = use default #");
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_add_device_sysctl_post(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
struct sysctl_oid_list *child;
|
|
|
|
struct sysctl_ctx_list *ctx_list;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
int i, j;
|
|
|
|
char namebuf[NAME_BUFLEN];
|
|
|
|
char *qfmt;
|
|
|
|
struct sysctl_oid *queue_node, *fl_node, *node;
|
|
|
|
struct sysctl_oid_list *queue_list, *fl_list;
|
|
|
|
ctx_list = device_get_sysctl_ctx(dev);
|
|
|
|
|
|
|
|
node = ctx->ifc_sysctl_node;
|
|
|
|
child = SYSCTL_CHILDREN(node);
|
|
|
|
|
|
|
|
if (scctx->isc_ntxqsets > 100)
|
|
|
|
qfmt = "txq%03d";
|
|
|
|
else if (scctx->isc_ntxqsets > 10)
|
|
|
|
qfmt = "txq%02d";
|
|
|
|
else
|
|
|
|
qfmt = "txq%d";
|
|
|
|
for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
|
|
|
|
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "Queue Name");
|
|
|
|
queue_list = SYSCTL_CHILDREN(queue_node);
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_dequeued, "total mbufs freed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_enqueued, "total mbufs enqueued");
|
|
|
|
#endif
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_mbuf_defrag, "# of times m_defrag was called");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_pullups, "# of times m_pullup was called");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
|
|
|
|
CTLFLAG_RD,
|
2016-08-12 21:29:44 +00:00
|
|
|
&txq->ift_no_desc_avail, "# of times no descriptors were available");
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_map_failed, "# of times dma map failed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_pidx, 1, "Producer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_cidx, 1, "Consumer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_in_use, 1, "descriptors in use");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_processed, "descriptors procesed for clean");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_cleaned, "total cleaned");
|
|
|
|
SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
|
2016-05-18 04:35:58 +00:00
|
|
|
0, mp_ring_state_handler, "A", "soft ring state");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->enqueues,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of enqueues to the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->drops,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of drops in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->starts,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of normal consumer starts in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->stalls,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of consumer stalls in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->restarts,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of consumer restarts in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->abdications,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of consumer abdications in the mp_ring for this queue");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scctx->isc_nrxqsets > 100)
|
|
|
|
qfmt = "rxq%03d";
|
|
|
|
else if (scctx->isc_nrxqsets > 10)
|
|
|
|
qfmt = "rxq%02d";
|
|
|
|
else
|
|
|
|
qfmt = "rxq%d";
|
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
|
|
|
|
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
|
|
|
|
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "Queue Name");
|
|
|
|
queue_list = SYSCTL_CHILDREN(queue_node);
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&rxq->ifr_cq_pidx, 1, "Producer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&rxq->ifr_cq_cidx, 1, "Consumer Index");
|
|
|
|
}
|
2016-11-18 04:19:21 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
|
|
|
|
snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
|
|
|
|
fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "freelist Name");
|
|
|
|
fl_list = SYSCTL_CHILDREN(fl_node);
|
|
|
|
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_pidx, 1, "Producer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_cidx, 1, "Consumer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_credits, 1, "credits available");
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_m_enqueued, "mbufs allocated");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_m_dequeued, "mbufs freed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_cl_enqueued, "clusters allocated");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_cl_dequeued, "clusters freed");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
static struct mbuf *
|
|
|
|
iflib_fixup_rx(struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct mbuf *n;
|
|
|
|
|
|
|
|
if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
|
|
|
|
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
|
|
|
|
m->m_data += ETHER_HDR_LEN;
|
|
|
|
n = m;
|
|
|
|
} else {
|
|
|
|
MGETHDR(n, M_NOWAIT, MT_DATA);
|
|
|
|
if (n == NULL) {
|
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
|
|
|
|
m->m_data += ETHER_HDR_LEN;
|
|
|
|
m->m_len -= ETHER_HDR_LEN;
|
|
|
|
n->m_len = ETHER_HDR_LEN;
|
|
|
|
M_MOVE_PKTHDR(n, m);
|
|
|
|
n->m_next = m;
|
|
|
|
}
|
|
|
|
return (n);
|
|
|
|
}
|
|
|
|
#endif
|
2018-05-06 00:57:52 +00:00
|
|
|
|
|
|
|
#ifdef NETDUMP
|
|
|
|
static void
|
|
|
|
iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
*nrxr = NRXQSETS(ctx);
|
|
|
|
*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
|
|
|
|
*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDUMP_START:
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++) {
|
|
|
|
rxq = &ctx->ifc_rxqs[i];
|
|
|
|
for (j = 0; j < rxq->ifr_nfl; j++) {
|
|
|
|
fl = rxq->ifr_fl;
|
|
|
|
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iflib_no_tx_batch = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
|
|
|
|
IFF_DRV_RUNNING)
|
|
|
|
return (EBUSY);
|
|
|
|
|
|
|
|
txq = &ctx->ifc_txqs[0];
|
|
|
|
error = iflib_encap(txq, &m);
|
|
|
|
if (error == 0)
|
|
|
|
(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_netdump_poll(struct ifnet *ifp, int count)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
|
|
|
|
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
|
|
|
|
IFF_DRV_RUNNING)
|
|
|
|
return (EBUSY);
|
|
|
|
|
|
|
|
txq = &ctx->ifc_txqs[0];
|
|
|
|
(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
|
|
|
|
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++)
|
|
|
|
(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
#endif /* NETDUMP */
|