2016-05-18 04:35:58 +00:00
|
|
|
/*-
|
2018-04-12 14:35:37 +00:00
|
|
|
* Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io>
|
2016-05-18 04:35:58 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Neither the name of Matthew Macy nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2016-05-18 14:18:03 +00:00
|
|
|
#include "opt_inet.h"
|
|
|
|
#include "opt_inet6.h"
|
|
|
|
#include "opt_acpi.h"
|
2017-12-20 01:03:34 +00:00
|
|
|
#include "opt_sched.h"
|
2016-05-18 14:18:03 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/eventhandler.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/jail.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/md5.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/kobj.h>
|
|
|
|
#include <sys/rman.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/proc.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/sbuf.h>
|
|
|
|
#include <sys/smp.h>
|
|
|
|
#include <sys/socket.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <sys/sockio.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/taskqueue.h>
|
2016-08-12 21:29:44 +00:00
|
|
|
#include <sys/limits.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_var.h>
|
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
#include <net/bpf.h>
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/mp_ring.h>
|
2017-11-06 16:23:21 +00:00
|
|
|
#include <net/vnet.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_pcb.h>
|
|
|
|
#include <netinet/tcp_lro.h>
|
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/if_ether.h>
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/ip6.h>
|
|
|
|
#include <netinet/tcp.h>
|
2017-11-06 16:23:21 +00:00
|
|
|
#include <netinet/ip_var.h>
|
2018-05-06 00:57:52 +00:00
|
|
|
#include <netinet/netdump/netdump.h>
|
2017-11-06 16:23:21 +00:00
|
|
|
#include <netinet6/ip6_var.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <machine/in_cksum.h>
|
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
|
|
|
|
#include <dev/led/led.h>
|
|
|
|
#include <dev/pci/pcireg.h>
|
|
|
|
#include <dev/pci/pcivar.h>
|
|
|
|
#include <dev/pci/pci_private.h>
|
|
|
|
|
|
|
|
#include <net/iflib.h>
|
2018-05-11 20:08:28 +00:00
|
|
|
#include <net/iflib_private.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#include "ifdi_if.h"
|
|
|
|
|
2018-10-12 22:40:54 +00:00
|
|
|
#ifdef PCI_IOV
|
|
|
|
#include <dev/pci/pci_iov.h>
|
|
|
|
#endif
|
|
|
|
|
2017-07-03 18:23:35 +00:00
|
|
|
#include <sys/bitstring.h>
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
2017-03-13 22:53:06 +00:00
|
|
|
* enable accounting of every mbuf as it comes in to and goes out of
|
|
|
|
* iflib's software descriptor references
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
|
|
|
#define MEMORY_LOGGING 0
|
|
|
|
/*
|
|
|
|
* Enable mbuf vectors for compressing long mbuf chains
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NB:
|
|
|
|
* - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
|
|
|
|
* we prefetch needs to be determined by the time spent in m_free vis a vis
|
|
|
|
* the cost of a prefetch. This will of course vary based on the workload:
|
|
|
|
* - NFLX's m_free path is dominated by vm-based M_EXT manipulation which
|
|
|
|
* is quite expensive, thus suggesting very little prefetch.
|
|
|
|
* - small packet forwarding which is just returning a single mbuf to
|
|
|
|
* UMA will typically be very fast vis a vis the cost of a memory
|
|
|
|
* access.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File organization:
|
|
|
|
* - private structures
|
|
|
|
* - iflib private utility functions
|
|
|
|
* - ifnet functions
|
|
|
|
* - vlan registry and other exported functions
|
|
|
|
* - iflib public core functions
|
|
|
|
*
|
|
|
|
*
|
|
|
|
*/
|
2018-05-11 20:08:28 +00:00
|
|
|
MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library");
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
struct iflib_txq;
|
|
|
|
typedef struct iflib_txq *iflib_txq_t;
|
|
|
|
struct iflib_rxq;
|
|
|
|
typedef struct iflib_rxq *iflib_rxq_t;
|
|
|
|
struct iflib_fl;
|
|
|
|
typedef struct iflib_fl *iflib_fl_t;
|
|
|
|
|
2017-01-15 00:50:10 +00:00
|
|
|
struct iflib_ctx;
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
|
2018-07-20 17:24:45 +00:00
|
|
|
static void iflib_timer(void *arg);
|
2017-10-30 21:14:31 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
typedef struct iflib_filter_info {
|
|
|
|
driver_filter_t *ifi_filter;
|
|
|
|
void *ifi_filter_arg;
|
|
|
|
struct grouptask *ifi_task;
|
2017-03-13 22:53:06 +00:00
|
|
|
void *ifi_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
} *iflib_filter_info_t;
|
|
|
|
|
|
|
|
struct iflib_ctx {
|
|
|
|
KOBJ_FIELDS;
|
2018-10-12 22:40:54 +00:00
|
|
|
/*
|
|
|
|
* Pointer to hardware driver's softc
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
void *ifc_softc;
|
|
|
|
device_t ifc_dev;
|
|
|
|
if_t ifc_ifp;
|
|
|
|
|
|
|
|
cpuset_t ifc_cpus;
|
|
|
|
if_shared_ctx_t ifc_sctx;
|
|
|
|
struct if_softc_ctx ifc_softc_ctx;
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
struct sx ifc_ctx_sx;
|
2018-04-12 14:35:37 +00:00
|
|
|
struct mtx ifc_state_mtx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
iflib_txq_t ifc_txqs;
|
|
|
|
iflib_rxq_t ifc_rxqs;
|
|
|
|
uint32_t ifc_if_flags;
|
|
|
|
uint32_t ifc_flags;
|
|
|
|
uint32_t ifc_max_fl_buf_size;
|
|
|
|
|
|
|
|
int ifc_link_state;
|
|
|
|
int ifc_link_irq;
|
|
|
|
int ifc_watchdog_events;
|
|
|
|
struct cdev *ifc_led_dev;
|
|
|
|
struct resource *ifc_msix_mem;
|
|
|
|
|
|
|
|
struct if_irq ifc_legacy_irq;
|
|
|
|
struct grouptask ifc_admin_task;
|
|
|
|
struct grouptask ifc_vflr_task;
|
|
|
|
struct iflib_filter_info ifc_filter_info;
|
|
|
|
struct ifmedia ifc_media;
|
|
|
|
|
|
|
|
struct sysctl_oid *ifc_sysctl_node;
|
|
|
|
uint16_t ifc_sysctl_ntxqs;
|
|
|
|
uint16_t ifc_sysctl_nrxqs;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint16_t ifc_sysctl_qs_eq_override;
|
2017-09-23 01:37:01 +00:00
|
|
|
uint16_t ifc_sysctl_rx_budget;
|
2018-07-20 17:45:26 +00:00
|
|
|
uint16_t ifc_sysctl_tx_abdicate;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifc_sysctl_ntxds[8];
|
|
|
|
qidx_t ifc_sysctl_nrxds[8];
|
2016-05-18 04:35:58 +00:00
|
|
|
struct if_txrx ifc_txrx;
|
|
|
|
#define isc_txd_encap ifc_txrx.ift_txd_encap
|
|
|
|
#define isc_txd_flush ifc_txrx.ift_txd_flush
|
|
|
|
#define isc_txd_credits_update ifc_txrx.ift_txd_credits_update
|
|
|
|
#define isc_rxd_available ifc_txrx.ift_rxd_available
|
|
|
|
#define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get
|
|
|
|
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
|
|
|
|
#define isc_rxd_flush ifc_txrx.ift_rxd_flush
|
|
|
|
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
|
|
|
|
#define isc_rxd_refill ifc_txrx.ift_rxd_refill
|
|
|
|
#define isc_legacy_intr ifc_txrx.ift_legacy_intr
|
|
|
|
eventhandler_tag ifc_vlan_attach_event;
|
|
|
|
eventhandler_tag ifc_vlan_detach_event;
|
|
|
|
uint8_t ifc_mac[ETHER_ADDR_LEN];
|
|
|
|
char ifc_mtx_name[16];
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
void *
|
|
|
|
iflib_get_softc(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_softc);
|
|
|
|
}
|
|
|
|
|
|
|
|
device_t
|
|
|
|
iflib_get_dev(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
if_t
|
|
|
|
iflib_get_ifp(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ifmedia *
|
|
|
|
iflib_get_media(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (&ctx->ifc_media);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
uint32_t
|
|
|
|
iflib_get_flags(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
return (ctx->ifc_flags);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
void
|
|
|
|
iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN])
|
|
|
|
{
|
|
|
|
|
|
|
|
bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
if_softc_ctx_t
|
|
|
|
iflib_get_softc_ctx(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (&ctx->ifc_softc_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if_shared_ctx_t
|
|
|
|
iflib_get_sctx(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ctx->ifc_sctx);
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
#define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2)
|
2016-05-18 04:35:58 +00:00
|
|
|
#define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*))
|
2017-03-14 22:25:07 +00:00
|
|
|
#define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1)))
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP)
|
|
|
|
#define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF)
|
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
typedef struct iflib_sw_rx_desc_array {
|
|
|
|
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
|
|
|
|
struct mbuf **ifsd_m; /* pkthdr mbufs */
|
|
|
|
caddr_t *ifsd_cl; /* direct cluster pointer for rx */
|
2018-11-27 20:01:05 +00:00
|
|
|
bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */
|
2017-01-27 23:08:06 +00:00
|
|
|
} iflib_rxsd_array_t;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
typedef struct iflib_sw_tx_desc_array {
|
|
|
|
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
|
2019-01-16 05:44:14 +00:00
|
|
|
bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */
|
2016-05-18 04:35:58 +00:00
|
|
|
struct mbuf **ifsd_m; /* pkthdr mbufs */
|
2017-03-13 22:53:06 +00:00
|
|
|
} if_txsd_vec_t;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* magic number that should be high enough for any hardware */
|
|
|
|
#define IFLIB_MAX_TX_SEGS 128
|
2017-03-13 22:53:06 +00:00
|
|
|
#define IFLIB_RX_COPY_THRESH 128
|
2016-05-18 04:35:58 +00:00
|
|
|
#define IFLIB_MAX_RX_REFRESH 32
|
2017-03-13 22:53:06 +00:00
|
|
|
/* The minimum descriptors per second before we start coalescing */
|
|
|
|
#define IFLIB_MIN_DESC_SEC 16384
|
|
|
|
#define IFLIB_DEFAULT_TX_UPDATE_FREQ 16
|
2016-05-18 04:35:58 +00:00
|
|
|
#define IFLIB_QUEUE_IDLE 0
|
|
|
|
#define IFLIB_QUEUE_HUNG 1
|
|
|
|
#define IFLIB_QUEUE_WORKING 2
|
2017-03-13 22:53:06 +00:00
|
|
|
/* maximum number of txqs that can share an rx interrupt */
|
|
|
|
#define IFLIB_MAX_TX_SHARED_INTR 4
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* this should really scale with ring size - this is a fairly arbitrary value */
|
|
|
|
#define TX_BATCH_SIZE 32
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#define IFLIB_RESTART_BUDGET 8
|
|
|
|
|
|
|
|
|
|
|
|
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
|
|
|
|
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
|
|
|
|
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
|
|
|
|
struct iflib_txq {
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ift_in_use;
|
|
|
|
qidx_t ift_cidx;
|
|
|
|
qidx_t ift_cidx_processed;
|
|
|
|
qidx_t ift_pidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t ift_gen;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint8_t ift_br_offset;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint16_t ift_npending;
|
|
|
|
uint16_t ift_db_pending;
|
|
|
|
uint16_t ift_rs_pending;
|
2016-05-18 04:35:58 +00:00
|
|
|
/* implicit pad */
|
2017-03-13 22:53:06 +00:00
|
|
|
uint8_t ift_txd_size[8];
|
2016-05-18 04:35:58 +00:00
|
|
|
uint64_t ift_processed;
|
|
|
|
uint64_t ift_cleaned;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint64_t ift_cleaned_prev;
|
2016-05-18 04:35:58 +00:00
|
|
|
#if MEMORY_LOGGING
|
|
|
|
uint64_t ift_enqueued;
|
|
|
|
uint64_t ift_dequeued;
|
|
|
|
#endif
|
|
|
|
uint64_t ift_no_tx_dma_setup;
|
|
|
|
uint64_t ift_no_desc_avail;
|
|
|
|
uint64_t ift_mbuf_defrag_failed;
|
|
|
|
uint64_t ift_mbuf_defrag;
|
|
|
|
uint64_t ift_map_failed;
|
|
|
|
uint64_t ift_txd_encap_efbig;
|
|
|
|
uint64_t ift_pullups;
|
2018-07-20 17:24:45 +00:00
|
|
|
uint64_t ift_last_timer_tick;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
struct mtx ift_mtx;
|
|
|
|
struct mtx ift_db_mtx;
|
|
|
|
|
|
|
|
/* constant values */
|
|
|
|
if_ctx_t ift_ctx;
|
2017-03-13 22:53:06 +00:00
|
|
|
struct ifmp_ring *ift_br;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct grouptask ift_task;
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint16_t ift_id;
|
|
|
|
struct callout ift_timer;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
if_txsd_vec_t ift_sds;
|
|
|
|
uint8_t ift_qstatus;
|
|
|
|
uint8_t ift_closed;
|
|
|
|
uint8_t ift_update_freq;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct iflib_filter_info ift_filter_info;
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dma_tag_t ift_buf_tag;
|
|
|
|
bus_dma_tag_t ift_tso_buf_tag;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_info_t ift_ifdi;
|
|
|
|
#define MTX_NAME_LEN 16
|
|
|
|
char ift_mtx_name[MTX_NAME_LEN];
|
|
|
|
char ift_db_mtx_name[MTX_NAME_LEN];
|
|
|
|
bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
uint64_t ift_cpu_exec_count[256];
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
|
|
|
struct iflib_fl {
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifl_cidx;
|
|
|
|
qidx_t ifl_pidx;
|
|
|
|
qidx_t ifl_credits;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t ifl_gen;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint8_t ifl_rxd_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
#if MEMORY_LOGGING
|
|
|
|
uint64_t ifl_m_enqueued;
|
|
|
|
uint64_t ifl_m_dequeued;
|
|
|
|
uint64_t ifl_cl_enqueued;
|
|
|
|
uint64_t ifl_cl_dequeued;
|
|
|
|
#endif
|
|
|
|
/* implicit pad */
|
|
|
|
|
2017-07-03 18:23:35 +00:00
|
|
|
bitstr_t *ifl_rx_bitmap;
|
|
|
|
qidx_t ifl_fragidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
/* constant */
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifl_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint16_t ifl_buf_size;
|
|
|
|
uint16_t ifl_cltype;
|
|
|
|
uma_zone_t ifl_zone;
|
2017-01-27 23:08:06 +00:00
|
|
|
iflib_rxsd_array_t ifl_sds;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_rxq_t ifl_rxq;
|
|
|
|
uint8_t ifl_id;
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dma_tag_t ifl_buf_tag;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_info_t ifl_ifdi;
|
|
|
|
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
|
|
|
|
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH];
|
2016-05-18 04:35:58 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline qidx_t
|
|
|
|
get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t used;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (pidx > cidx)
|
|
|
|
used = pidx - cidx;
|
|
|
|
else if (pidx < cidx)
|
|
|
|
used = size - cidx + pidx;
|
|
|
|
else if (gen == 0 && pidx == cidx)
|
|
|
|
used = 0;
|
|
|
|
else if (gen == 1 && pidx == cidx)
|
|
|
|
used = size;
|
|
|
|
else
|
|
|
|
panic("bad state");
|
|
|
|
|
|
|
|
return (used);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
|
|
|
|
|
|
|
|
#define IDXDIFF(head, tail, wrap) \
|
|
|
|
((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
|
|
|
|
|
|
|
|
struct iflib_rxq {
|
|
|
|
/* If there is a separate completion queue -
|
|
|
|
* these are the cq cidx and pidx. Otherwise
|
|
|
|
* these are unused.
|
|
|
|
*/
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t ifr_size;
|
|
|
|
qidx_t ifr_cq_cidx;
|
|
|
|
qidx_t ifr_cq_pidx;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t ifr_cq_gen;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint8_t ifr_fl_offset;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if_ctx_t ifr_ctx;
|
|
|
|
iflib_fl_t ifr_fl;
|
|
|
|
uint64_t ifr_rx_irq;
|
|
|
|
uint16_t ifr_id;
|
|
|
|
uint8_t ifr_lro_enabled;
|
|
|
|
uint8_t ifr_nfl;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint8_t ifr_ntxqirq;
|
|
|
|
uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR];
|
2016-05-18 04:35:58 +00:00
|
|
|
struct lro_ctrl ifr_lc;
|
|
|
|
struct grouptask ifr_task;
|
|
|
|
struct iflib_filter_info ifr_filter_info;
|
|
|
|
iflib_dma_info_t ifr_ifdi;
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/* dynamically allocate if any drivers need a value substantially larger than this */
|
|
|
|
struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
uint64_t ifr_cpu_exec_count[256];
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
typedef struct if_rxsd {
|
|
|
|
caddr_t *ifsd_cl;
|
|
|
|
struct mbuf **ifsd_m;
|
|
|
|
iflib_fl_t ifsd_fl;
|
|
|
|
qidx_t ifsd_cidx;
|
|
|
|
} *if_rxsd_t;
|
|
|
|
|
|
|
|
/* multiple of word size */
|
|
|
|
#ifdef __LP64__
|
2017-09-16 02:41:38 +00:00
|
|
|
#define PKT_INFO_SIZE 6
|
2017-03-13 22:53:06 +00:00
|
|
|
#define RXD_INFO_SIZE 5
|
|
|
|
#define PKT_TYPE uint64_t
|
|
|
|
#else
|
2017-09-16 02:41:38 +00:00
|
|
|
#define PKT_INFO_SIZE 11
|
2017-03-13 22:53:06 +00:00
|
|
|
#define RXD_INFO_SIZE 8
|
|
|
|
#define PKT_TYPE uint32_t
|
|
|
|
#endif
|
|
|
|
#define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3)
|
|
|
|
#define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4)
|
|
|
|
|
|
|
|
typedef struct if_pkt_info_pad {
|
|
|
|
PKT_TYPE pkt_val[PKT_INFO_SIZE];
|
|
|
|
} *if_pkt_info_pad_t;
|
|
|
|
typedef struct if_rxd_info_pad {
|
|
|
|
PKT_TYPE rxd_val[RXD_INFO_SIZE];
|
|
|
|
} *if_rxd_info_pad_t;
|
|
|
|
|
|
|
|
CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info));
|
|
|
|
CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info));
|
|
|
|
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pkt_info_zero(if_pkt_info_t pi)
|
|
|
|
{
|
|
|
|
if_pkt_info_pad_t pi_pad;
|
|
|
|
|
|
|
|
pi_pad = (if_pkt_info_pad_t)pi;
|
|
|
|
pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0;
|
|
|
|
pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0;
|
|
|
|
#ifndef __LP64__
|
2017-09-16 02:41:38 +00:00
|
|
|
pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0;
|
|
|
|
pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
static device_method_t iflib_pseudo_methods[] = {
|
|
|
|
DEVMETHOD(device_attach, noop_attach),
|
|
|
|
DEVMETHOD(device_detach, iflib_pseudo_detach),
|
|
|
|
DEVMETHOD_END
|
|
|
|
};
|
|
|
|
|
|
|
|
driver_t iflib_pseudodriver = {
|
|
|
|
"iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx),
|
|
|
|
};
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline void
|
|
|
|
rxd_info_zero(if_rxd_info_t ri)
|
|
|
|
{
|
|
|
|
if_rxd_info_pad_t ri_pad;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ri_pad = (if_rxd_info_pad_t)ri;
|
|
|
|
for (i = 0; i < RXD_LOOP_BOUND; i += 4) {
|
|
|
|
ri_pad->rxd_val[i] = 0;
|
|
|
|
ri_pad->rxd_val[i+1] = 0;
|
|
|
|
ri_pad->rxd_val[i+2] = 0;
|
|
|
|
ri_pad->rxd_val[i+3] = 0;
|
|
|
|
}
|
|
|
|
#ifdef __LP64__
|
|
|
|
ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Only allow a single packet to take up most 1/nth of the tx ring
|
|
|
|
*/
|
|
|
|
#define MAX_SINGLE_PACKET_FRACTION 12
|
|
|
|
#define IF_BAD_DMA (bus_addr_t)-1
|
|
|
|
|
2017-09-13 01:18:42 +00:00
|
|
|
#define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
|
2017-08-30 18:56:24 +00:00
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
#define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock")
|
|
|
|
#define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx)
|
|
|
|
#define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx)
|
|
|
|
#define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx)
|
2018-04-12 14:35:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
#define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF)
|
|
|
|
#define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx)
|
|
|
|
#define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx)
|
|
|
|
#define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx)
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
|
|
|
|
#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
|
|
|
|
|
2018-10-12 22:40:54 +00:00
|
|
|
void
|
|
|
|
iflib_set_detach(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
STATE_LOCK(ctx);
|
|
|
|
ctx->ifc_flags |= IFC_IN_DETACH;
|
|
|
|
STATE_UNLOCK(ctx);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Our boot-time initialization hook */
|
|
|
|
static int iflib_module_event_handler(module_t, int, void *);
|
|
|
|
|
|
|
|
static moduledata_t iflib_moduledata = {
|
|
|
|
"iflib",
|
|
|
|
iflib_module_event_handler,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY);
|
|
|
|
MODULE_VERSION(iflib, 1);
|
|
|
|
|
|
|
|
MODULE_DEPEND(iflib, pci, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(iflib, ether, 1, 1, 1);
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1);
|
|
|
|
TASKQGROUP_DEFINE(if_config_tqg, 1, 1);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifndef IFLIB_DEBUG_COUNTERS
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
#define IFLIB_DEBUG_COUNTERS 1
|
|
|
|
#else
|
|
|
|
#define IFLIB_DEBUG_COUNTERS 0
|
|
|
|
#endif /* !INVARIANTS */
|
|
|
|
#endif
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0,
|
|
|
|
"iflib driver parameters");
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* XXX need to ensure that this can't accidentally cause the head to be moved backwards
|
|
|
|
*/
|
|
|
|
static int iflib_min_tx_latency = 0;
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW,
|
2016-11-18 04:19:21 +00:00
|
|
|
&iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput");
|
2017-03-13 22:53:06 +00:00
|
|
|
static int iflib_no_tx_batch = 0;
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW,
|
|
|
|
&iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput");
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
#if IFLIB_DEBUG_COUNTERS
|
|
|
|
|
|
|
|
static int iflib_tx_seen;
|
|
|
|
static int iflib_tx_sent;
|
|
|
|
static int iflib_tx_encap;
|
|
|
|
static int iflib_rx_allocs;
|
|
|
|
static int iflib_fl_refills;
|
|
|
|
static int iflib_fl_refills_large;
|
|
|
|
static int iflib_tx_frees;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD,
|
|
|
|
&iflib_tx_seen, 0, "# tx mbufs seen");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD,
|
|
|
|
&iflib_tx_sent, 0, "# tx mbufs sent");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD,
|
|
|
|
&iflib_tx_encap, 0, "# tx mbufs encapped");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD,
|
|
|
|
&iflib_tx_frees, 0, "# tx frees");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD,
|
|
|
|
&iflib_rx_allocs, 0, "# rx allocations");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD,
|
|
|
|
&iflib_fl_refills, 0, "# refills");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD,
|
|
|
|
&iflib_fl_refills_large, 0, "# large refills");
|
|
|
|
|
|
|
|
|
|
|
|
static int iflib_txq_drain_flushing;
|
|
|
|
static int iflib_txq_drain_oactive;
|
|
|
|
static int iflib_txq_drain_notready;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD,
|
|
|
|
&iflib_txq_drain_flushing, 0, "# drain flushes");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD,
|
|
|
|
&iflib_txq_drain_oactive, 0, "# drain oactives");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD,
|
|
|
|
&iflib_txq_drain_notready, 0, "# drain notready");
|
|
|
|
|
|
|
|
|
|
|
|
static int iflib_encap_load_mbuf_fail;
|
2017-12-05 21:00:31 +00:00
|
|
|
static int iflib_encap_pad_mbuf_fail;
|
2016-05-18 04:35:58 +00:00
|
|
|
static int iflib_encap_txq_avail_fail;
|
|
|
|
static int iflib_encap_txd_encap_fail;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_load_mbuf_fail, 0, "# busdma load failures");
|
2017-12-05 21:00:31 +00:00
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures");
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_txq_avail_fail, 0, "# txq avail failures");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD,
|
|
|
|
&iflib_encap_txd_encap_fail, 0, "# driver encap failures");
|
|
|
|
|
|
|
|
static int iflib_task_fn_rxs;
|
|
|
|
static int iflib_rx_intr_enables;
|
|
|
|
static int iflib_fast_intrs;
|
|
|
|
static int iflib_rx_unavail;
|
|
|
|
static int iflib_rx_ctx_inactive;
|
|
|
|
static int iflib_rx_if_input;
|
|
|
|
static int iflib_rx_mbuf_null;
|
|
|
|
static int iflib_rxd_flush;
|
|
|
|
|
|
|
|
static int iflib_verbose_debug;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD,
|
|
|
|
&iflib_task_fn_rxs, 0, "# task_fn_rx calls");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD,
|
|
|
|
&iflib_rx_intr_enables, 0, "# rx intr enables");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD,
|
|
|
|
&iflib_fast_intrs, 0, "# fast_intr calls");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD,
|
|
|
|
&iflib_rx_unavail, 0, "# times rxeof called with no available data");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD,
|
|
|
|
&iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD,
|
|
|
|
&iflib_rx_if_input, 0, "# times rxeof called if_input");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD,
|
|
|
|
&iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD,
|
|
|
|
&iflib_rxd_flush, 0, "# times rxd_flush called");
|
|
|
|
SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW,
|
|
|
|
&iflib_verbose_debug, 0, "enable verbose debugging");
|
|
|
|
|
|
|
|
#define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1)
|
2016-11-18 04:19:21 +00:00
|
|
|
static void
|
|
|
|
iflib_debug_reset(void)
|
|
|
|
{
|
|
|
|
iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs =
|
|
|
|
iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees =
|
|
|
|
iflib_txq_drain_flushing = iflib_txq_drain_oactive =
|
2018-09-06 18:51:52 +00:00
|
|
|
iflib_txq_drain_notready =
|
2017-12-05 21:00:31 +00:00
|
|
|
iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail =
|
|
|
|
iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail =
|
|
|
|
iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs =
|
2018-09-06 18:51:52 +00:00
|
|
|
iflib_rx_unavail =
|
|
|
|
iflib_rx_ctx_inactive = iflib_rx_if_input =
|
2016-11-18 04:19:21 +00:00
|
|
|
iflib_rx_mbuf_null = iflib_rxd_flush = 0;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
#define DBG_COUNTER_INC(name)
|
2016-11-18 04:19:21 +00:00
|
|
|
static void iflib_debug_reset(void) {}
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define IFLIB_DEBUG 0
|
|
|
|
|
|
|
|
static void iflib_tx_structures_free(if_ctx_t ctx);
|
|
|
|
static void iflib_rx_structures_free(if_ctx_t ctx);
|
|
|
|
static int iflib_queues_alloc(if_ctx_t ctx);
|
|
|
|
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
|
2017-03-13 22:53:06 +00:00
|
|
|
static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
|
2016-05-18 04:35:58 +00:00
|
|
|
static int iflib_qset_structures_setup(if_ctx_t ctx);
|
|
|
|
static int iflib_msix_init(if_ctx_t ctx);
|
2018-05-29 21:56:39 +00:00
|
|
|
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str);
|
2016-05-18 04:35:58 +00:00
|
|
|
static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
|
|
|
|
static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
|
2018-07-25 22:46:36 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
static void iflib_altq_if_start(if_t ifp);
|
|
|
|
static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
static int iflib_register(if_ctx_t);
|
|
|
|
static void iflib_init_locked(if_ctx_t ctx);
|
|
|
|
static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
|
|
|
|
static void iflib_add_device_sysctl_post(if_ctx_t ctx);
|
2016-11-18 04:19:21 +00:00
|
|
|
static void iflib_ifmp_purge(iflib_txq_t txq);
|
2017-01-02 00:56:33 +00:00
|
|
|
static void _iflib_pre_assert(if_softc_ctx_t scctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
static void iflib_if_init_locked(if_ctx_t ctx);
|
2018-10-12 22:40:54 +00:00
|
|
|
static void iflib_free_intr_mem(if_ctx_t ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
static struct mbuf * iflib_fixup_rx(struct mbuf *m);
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-06 00:57:52 +00:00
|
|
|
NETDUMP_DEFINE(iflib);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
#include <sys/selinfo.h>
|
|
|
|
#include <net/netmap.h>
|
|
|
|
#include <dev/netmap/netmap_kern.h>
|
|
|
|
|
|
|
|
MODULE_DEPEND(iflib, netmap, 1, 1, 1);
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* device-specific sysctl variables:
|
|
|
|
*
|
2016-07-08 17:04:21 +00:00
|
|
|
* iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
|
2016-05-18 04:35:58 +00:00
|
|
|
* During regular operations the CRC is stripped, but on some
|
|
|
|
* hardware reception of frames not multiple of 64 is slower,
|
|
|
|
* so using crcstrip=0 helps in benchmarks.
|
|
|
|
*
|
2016-07-08 17:04:21 +00:00
|
|
|
* iflib_rx_miss, iflib_rx_miss_bufs:
|
2016-05-18 04:35:58 +00:00
|
|
|
* count packets that might be missed due to lost interrupts.
|
|
|
|
*/
|
|
|
|
SYSCTL_DECL(_dev_netmap);
|
|
|
|
/*
|
|
|
|
* The xl driver by default strips CRCs and we do not override it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int iflib_crcstrip = 1;
|
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip,
|
|
|
|
CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames");
|
|
|
|
|
|
|
|
int iflib_rx_miss, iflib_rx_miss_bufs;
|
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss,
|
|
|
|
CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr");
|
2016-07-08 17:04:21 +00:00
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs,
|
2016-05-18 04:35:58 +00:00
|
|
|
CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register/unregister. We are already under netmap lock.
|
|
|
|
* Only called on the first register or the last unregister.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
iflib_netmap_register(struct netmap_adapter *na, int onoff)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = na->ifp;
|
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
2017-03-13 22:53:06 +00:00
|
|
|
int status;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
|
|
|
|
/* Tell the stack that the interface is no longer active */
|
|
|
|
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
|
|
|
|
|
|
|
|
if (!CTX_IS_VF(ctx))
|
2017-01-02 00:56:33 +00:00
|
|
|
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* enable or disable flags and callbacks in na and ifp */
|
|
|
|
if (onoff) {
|
|
|
|
nm_set_native_flags(na);
|
|
|
|
} else {
|
|
|
|
nm_clear_native_flags(na);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
iflib_stop(ctx);
|
|
|
|
iflib_init_locked(ctx);
|
2017-01-02 00:56:33 +00:00
|
|
|
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
|
2017-03-13 22:53:06 +00:00
|
|
|
status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1;
|
|
|
|
if (status)
|
|
|
|
nm_clear_native_flags(na);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
return (status);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
static int
|
|
|
|
netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = kring->na;
|
|
|
|
u_int const lim = kring->nkr_num_slots - 1;
|
|
|
|
u_int head = kring->rhead;
|
|
|
|
struct netmap_ring *ring = kring->ring;
|
|
|
|
bus_dmamap_t *map;
|
|
|
|
struct if_rxd_update iru;
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
iflib_fl_t fl = &rxq->ifr_fl[0];
|
|
|
|
uint32_t refill_pidx, nic_i;
|
2018-09-06 18:51:52 +00:00
|
|
|
#if IFLIB_DEBUG_COUNTERS
|
|
|
|
int rf_count = 0;
|
|
|
|
#endif
|
2017-10-30 21:14:31 +00:00
|
|
|
|
|
|
|
if (nm_i == head && __predict_true(!init))
|
|
|
|
return 0;
|
|
|
|
iru_init(&iru, rxq, 0 /* flid */);
|
|
|
|
map = fl->ifl_sds.ifsd_map;
|
|
|
|
refill_pidx = netmap_idx_k2n(kring, nm_i);
|
|
|
|
/*
|
|
|
|
* IMPORTANT: we must leave one free slot in the ring,
|
|
|
|
* so move head back by one unit
|
|
|
|
*/
|
|
|
|
head = nm_prev(head, lim);
|
2018-05-04 18:57:05 +00:00
|
|
|
nic_i = UINT_MAX;
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(fl_refills);
|
2017-10-30 21:14:31 +00:00
|
|
|
while (nm_i != head) {
|
2018-09-06 18:51:52 +00:00
|
|
|
#if IFLIB_DEBUG_COUNTERS
|
|
|
|
if (++rf_count == 9)
|
|
|
|
DBG_COUNTER_INC(fl_refills_large);
|
|
|
|
#endif
|
2017-10-30 21:14:31 +00:00
|
|
|
for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) {
|
|
|
|
struct netmap_slot *slot = &ring->slot[nm_i];
|
|
|
|
void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]);
|
|
|
|
uint32_t nic_i_dma = refill_pidx;
|
|
|
|
nic_i = netmap_idx_k2n(kring, nm_i);
|
|
|
|
|
|
|
|
MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH);
|
|
|
|
|
|
|
|
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
|
|
|
|
return netmap_ring_reinit(kring);
|
|
|
|
|
|
|
|
fl->ifl_vm_addrs[tmp_pidx] = addr;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
if (__predict_false(init)) {
|
|
|
|
netmap_load_map(na, fl->ifl_buf_tag,
|
|
|
|
map[nic_i], addr);
|
|
|
|
} else if (slot->flags & NS_BUF_CHANGED) {
|
2017-10-30 21:14:31 +00:00
|
|
|
/* buffer has changed, reload map */
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
netmap_reload_map(na, fl->ifl_buf_tag,
|
|
|
|
map[nic_i], addr);
|
2017-10-30 21:14:31 +00:00
|
|
|
}
|
|
|
|
slot->flags &= ~NS_BUF_CHANGED;
|
|
|
|
|
|
|
|
nm_i = nm_next(nm_i, lim);
|
|
|
|
fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim);
|
|
|
|
if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
iru.iru_pidx = refill_pidx;
|
|
|
|
iru.iru_count = tmp_pidx+1;
|
|
|
|
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
|
|
|
|
refill_pidx = nic_i;
|
|
|
|
for (int n = 0; n < iru.iru_count; n++) {
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i_dma],
|
2017-10-30 21:14:31 +00:00
|
|
|
BUS_DMASYNC_PREREAD);
|
|
|
|
/* XXX - change this to not use the netmap func*/
|
|
|
|
nic_i_dma = nm_next(nic_i_dma, lim);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kring->nr_hwcur = head;
|
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2018-09-06 18:51:52 +00:00
|
|
|
if (__predict_true(nic_i != UINT_MAX)) {
|
2018-05-04 18:57:05 +00:00
|
|
|
ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(rxd_flush);
|
|
|
|
}
|
2017-10-30 21:14:31 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Reconcile kernel and user view of the transmit ring.
|
|
|
|
*
|
|
|
|
* All information is in the kring.
|
|
|
|
* Userspace wants to send packets up to the one before kring->rhead,
|
|
|
|
* kernel knows kring->nr_hwcur is the first unsent packet.
|
|
|
|
*
|
|
|
|
* Here we push packets out (as many as possible), and possibly
|
|
|
|
* reclaim buffers from previously completed transmission.
|
|
|
|
*
|
|
|
|
* The caller (netmap) guarantees that there is only one instance
|
|
|
|
* running at any time. Any interference with other driver
|
|
|
|
* methods should be handled by the individual drivers.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
iflib_netmap_txsync(struct netmap_kring *kring, int flags)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = kring->na;
|
|
|
|
struct ifnet *ifp = na->ifp;
|
|
|
|
struct netmap_ring *ring = kring->ring;
|
2018-07-20 17:24:45 +00:00
|
|
|
u_int nm_i; /* index into the netmap kring */
|
2016-05-18 04:35:58 +00:00
|
|
|
u_int nic_i; /* index into the NIC ring */
|
|
|
|
u_int n;
|
|
|
|
u_int const lim = kring->nkr_num_slots - 1;
|
|
|
|
u_int const head = kring->rhead;
|
|
|
|
struct if_pkt_info pi;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* interrupts on every tx packet are expensive so request
|
|
|
|
* them every half ring, or where NS_REPORT is set
|
|
|
|
*/
|
|
|
|
u_int report_frequency = kring->nkr_num_slots >> 1;
|
|
|
|
/* device-specific */
|
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
|
|
|
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
|
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
2018-11-27 20:01:05 +00:00
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First part: process new packets to send.
|
2018-07-20 17:24:45 +00:00
|
|
|
* nm_i is the current index in the netmap kring,
|
2016-05-18 04:35:58 +00:00
|
|
|
* nic_i is the corresponding index in the NIC ring.
|
|
|
|
*
|
|
|
|
* If we have packets to send (nm_i != head)
|
|
|
|
* iterate over the netmap ring, fetch length and update
|
|
|
|
* the corresponding slot in the NIC ring. Some drivers also
|
|
|
|
* need to update the buffer's physical address in the NIC slot
|
|
|
|
* even NS_BUF_CHANGED is not set (PNMB computes the addresses).
|
|
|
|
*
|
|
|
|
* The netmap_reload_map() calls is especially expensive,
|
|
|
|
* even when (as in this case) the tag is 0, so do only
|
|
|
|
* when the buffer has actually changed.
|
|
|
|
*
|
|
|
|
* If possible do not set the report/intr bit on all slots,
|
|
|
|
* but only a few times per ring or when NS_REPORT is set.
|
|
|
|
*
|
|
|
|
* Finally, on 10G and faster drivers, it might be useful
|
|
|
|
* to prefetch the next slot and txr entry.
|
|
|
|
*/
|
|
|
|
|
2018-07-20 17:24:45 +00:00
|
|
|
nm_i = kring->nr_hwcur;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (nm_i != head) { /* we have new packets to send */
|
2018-05-16 21:03:22 +00:00
|
|
|
pkt_info_zero(&pi);
|
|
|
|
pi.ipi_segs = txq->ift_segs;
|
|
|
|
pi.ipi_qsidx = kring->ring_id;
|
2016-05-18 04:35:58 +00:00
|
|
|
nic_i = netmap_idx_k2n(kring, nm_i);
|
|
|
|
|
|
|
|
__builtin_prefetch(&ring->slot[nm_i]);
|
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
for (n = 0; nm_i != head; n++) {
|
|
|
|
struct netmap_slot *slot = &ring->slot[nm_i];
|
|
|
|
u_int len = slot->len;
|
2017-03-14 15:08:56 +00:00
|
|
|
uint64_t paddr;
|
2016-05-18 04:35:58 +00:00
|
|
|
void *addr = PNMB(na, slot, &paddr);
|
|
|
|
int flags = (slot->flags & NS_REPORT ||
|
|
|
|
nic_i == 0 || nic_i == report_frequency) ?
|
|
|
|
IPI_TX_INTR : 0;
|
|
|
|
|
|
|
|
/* device-specific */
|
2017-03-13 22:53:06 +00:00
|
|
|
pi.ipi_len = len;
|
|
|
|
pi.ipi_segs[0].ds_addr = paddr;
|
|
|
|
pi.ipi_segs[0].ds_len = len;
|
|
|
|
pi.ipi_nsegs = 1;
|
|
|
|
pi.ipi_ndescs = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
pi.ipi_pidx = nic_i;
|
|
|
|
pi.ipi_flags = flags;
|
|
|
|
|
|
|
|
/* Fill the slot in the NIC ring. */
|
|
|
|
ctx->isc_txd_encap(ctx->ifc_softc, &pi);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(tx_encap);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* prefetch for next round */
|
|
|
|
__builtin_prefetch(&ring->slot[nm_i + 1]);
|
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
__builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
NM_CHECK_ADDR_LEN(na, addr, len);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
if (slot->flags & NS_BUF_CHANGED) {
|
|
|
|
/* buffer has changed, reload map */
|
|
|
|
netmap_reload_map(na, txq->ift_buf_tag,
|
|
|
|
txq->ift_sds.ifsd_map[nic_i], addr);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
/* make sure changes to the buffer are synced */
|
|
|
|
bus_dmamap_sync(txq->ift_buf_tag,
|
|
|
|
txq->ift_sds.ifsd_map[nic_i],
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
|
|
|
|
nm_i = nm_next(nm_i, lim);
|
|
|
|
nic_i = nm_next(nic_i, lim);
|
|
|
|
}
|
2018-07-20 17:24:45 +00:00
|
|
|
kring->nr_hwcur = nm_i;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* synchronize the NIC ring */
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
2018-11-27 20:01:05 +00:00
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* (re)start the tx unit up to slot nic_i (excluded) */
|
|
|
|
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Second part: reclaim buffers for completed transmissions.
|
2018-05-16 21:03:22 +00:00
|
|
|
*
|
|
|
|
* If there are unclaimed buffers, attempt to reclaim them.
|
|
|
|
* If none are reclaimed, and TX IRQs are not in use, do an initial
|
|
|
|
* minimal delay, then trigger the tx handler which will spin in the
|
|
|
|
* group task queue.
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
2018-07-20 17:24:45 +00:00
|
|
|
if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
|
2018-05-16 21:03:22 +00:00
|
|
|
if (iflib_tx_credits_update(ctx, txq)) {
|
|
|
|
/* some tx completed, increment avail */
|
|
|
|
nic_i = txq->ift_cidx_processed;
|
|
|
|
kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
|
|
|
|
}
|
2018-07-20 17:24:45 +00:00
|
|
|
}
|
|
|
|
if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ))
|
|
|
|
if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) {
|
|
|
|
callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000,
|
|
|
|
iflib_timer, txq, txq->ift_timer.c_cpu);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reconcile kernel and user view of the receive ring.
|
|
|
|
* Same as for the txsync, this routine must be efficient.
|
|
|
|
* The caller guarantees a single invocations, but races against
|
|
|
|
* the rest of the driver should be handled here.
|
|
|
|
*
|
|
|
|
* On call, kring->rhead is the first packet that userspace wants
|
|
|
|
* to keep, and kring->rcur is the wakeup point.
|
|
|
|
* The kernel has previously reported packets up to kring->rtail.
|
|
|
|
*
|
|
|
|
* If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
|
|
|
|
* of whether or not we received an interrupt.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = kring->na;
|
|
|
|
struct netmap_ring *ring = kring->ring;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
iflib_fl_t fl;
|
2017-03-13 22:53:06 +00:00
|
|
|
uint32_t nm_i; /* index into the netmap ring */
|
2017-10-30 21:14:31 +00:00
|
|
|
uint32_t nic_i; /* index into the NIC ring */
|
2016-05-18 04:35:58 +00:00
|
|
|
u_int i, n;
|
|
|
|
u_int const lim = kring->nkr_num_slots - 1;
|
2018-07-20 17:24:45 +00:00
|
|
|
u_int const head = kring->rhead;
|
2016-05-18 04:35:58 +00:00
|
|
|
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
|
2017-09-16 02:41:38 +00:00
|
|
|
struct if_rxd_info ri;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
struct ifnet *ifp = na->ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
|
|
|
iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id];
|
|
|
|
if (head > lim)
|
|
|
|
return netmap_ring_reinit(kring);
|
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
/*
|
|
|
|
* XXX netmap_fl_refill() only ever (re)fills free list 0 so far.
|
|
|
|
*/
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* First part: import newly received packets.
|
|
|
|
*
|
|
|
|
* nm_i is the index of the next free slot in the netmap ring,
|
|
|
|
* nic_i is the index of the next received packet in the NIC ring,
|
|
|
|
* and they may differ in case if_init() has been called while
|
|
|
|
* in netmap mode. For the receive ring we have
|
|
|
|
*
|
|
|
|
* nic_i = rxr->next_check;
|
|
|
|
* nm_i = kring->nr_hwtail (previous)
|
|
|
|
* and
|
|
|
|
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
|
|
|
|
*
|
|
|
|
* rxr->next_check is set to 0 on a ring reinit
|
|
|
|
*/
|
|
|
|
if (netmap_no_pendintr || force_update) {
|
|
|
|
int crclen = iflib_crcstrip ? 0 : 4;
|
|
|
|
int error, avail;
|
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
for (i = 0; i < rxq->ifr_nfl; i++) {
|
|
|
|
fl = &rxq->ifr_fl[i];
|
2016-05-18 04:35:58 +00:00
|
|
|
nic_i = fl->ifl_cidx;
|
|
|
|
nm_i = netmap_idx_n2k(kring, nic_i);
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
avail = ctx->isc_rxd_available(ctx->ifc_softc,
|
|
|
|
rxq->ifr_id, nic_i, USHRT_MAX);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (n = 0; avail > 0; n++, avail--) {
|
2017-09-16 02:41:38 +00:00
|
|
|
rxd_info_zero(&ri);
|
|
|
|
ri.iri_frags = rxq->ifr_frags;
|
|
|
|
ri.iri_qsidx = kring->ring_id;
|
|
|
|
ri.iri_ifp = ctx->ifc_ifp;
|
|
|
|
ri.iri_cidx = nic_i;
|
|
|
|
|
|
|
|
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
|
|
|
|
ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen;
|
2018-02-20 21:42:45 +00:00
|
|
|
ring->slot[nm_i].flags = 0;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_buf_tag,
|
2018-11-27 20:01:05 +00:00
|
|
|
fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
|
2016-05-18 04:35:58 +00:00
|
|
|
nm_i = nm_next(nm_i, lim);
|
|
|
|
nic_i = nm_next(nic_i, lim);
|
|
|
|
}
|
|
|
|
if (n) { /* update the state variables */
|
|
|
|
if (netmap_no_pendintr && !force_update) {
|
|
|
|
/* diagnostics */
|
|
|
|
iflib_rx_miss ++;
|
|
|
|
iflib_rx_miss_bufs += n;
|
|
|
|
}
|
|
|
|
fl->ifl_cidx = nic_i;
|
2018-07-20 17:24:45 +00:00
|
|
|
kring->nr_hwtail = nm_i;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
kring->nr_kflags &= ~NKR_PENDINTR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Second part: skip past packets that userspace has released.
|
|
|
|
* (kring->nr_hwcur to head excluded),
|
|
|
|
* and make the buffers available for reception.
|
|
|
|
* As usual nm_i is the index in the netmap ring,
|
|
|
|
* nic_i is the index in the NIC ring, and
|
|
|
|
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
|
|
|
|
*/
|
|
|
|
/* XXX not sure how this will work with multiple free lists */
|
2018-07-20 17:24:45 +00:00
|
|
|
nm_i = kring->nr_hwcur;
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2017-10-30 21:14:31 +00:00
|
|
|
return (netmap_fl_refill(rxq, kring, nm_i, false));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static void
|
|
|
|
iflib_netmap_intr(struct netmap_adapter *na, int onoff)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = na->ifp;
|
|
|
|
if_ctx_t ctx = ifp->if_softc;
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (onoff) {
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
} else {
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_netmap_attach(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
struct netmap_adapter na;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
bzero(&na, sizeof(na));
|
|
|
|
|
|
|
|
na.ifp = ctx->ifc_ifp;
|
|
|
|
na.na_flags = NAF_BDG_MAYSLEEP;
|
|
|
|
MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
|
|
|
|
MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
na.num_tx_desc = scctx->isc_ntxd[0];
|
|
|
|
na.num_rx_desc = scctx->isc_nrxd[0];
|
2016-05-18 04:35:58 +00:00
|
|
|
na.nm_txsync = iflib_netmap_txsync;
|
|
|
|
na.nm_rxsync = iflib_netmap_rxsync;
|
|
|
|
na.nm_register = iflib_netmap_register;
|
2017-03-13 22:53:06 +00:00
|
|
|
na.nm_intr = iflib_netmap_intr;
|
2016-05-18 04:35:58 +00:00
|
|
|
na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets;
|
|
|
|
na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets;
|
|
|
|
return (netmap_attach(&na));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = NA(ctx->ifc_ifp);
|
|
|
|
struct netmap_slot *slot;
|
|
|
|
|
|
|
|
slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
|
2017-02-22 02:35:59 +00:00
|
|
|
if (slot == NULL)
|
2016-05-18 04:35:58 +00:00
|
|
|
return;
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In netmap mode, set the map for the packet buffer.
|
|
|
|
* NOTE: Some drivers (not this one) also need to set
|
|
|
|
* the physical buffer address in the NIC ring.
|
|
|
|
* netmap_idx_n2k() maps a nic index, i, into the corresponding
|
|
|
|
* netmap slot index, si
|
|
|
|
*/
|
netmap: align codebase to the current upstream (commit id 3fb001303718146)
Changelist:
- Turn tx_rings and rx_rings arrays into arrays of pointers to kring
structs. This patch includes fixes for ixv, ixl, ix, re, cxgbe, iflib,
vtnet and ptnet drivers to cope with the change.
- Generalize the nm_config() callback to accept a struct containing many
parameters.
- Introduce NKR_FAKERING to support buffers sharing (used for netmap
pipes)
- Improved API for external VALE modules.
- Various bug fixes and improvements to the netmap memory allocator,
including support for externally (userspace) allocated memory.
- Refactoring of netmap pipes: now linked rings share the same netmap
buffers, with a separate set of kring pointers (rhead, rcur, rtail).
Buffer swapping does not need to happen anymore.
- Large refactoring of the control API towards an extensible solution;
the goal is to allow the addition of more commands and extension of
existing ones (with new options) without the need of hacks or the
risk of running out of configuration space.
A new NIOCCTRL ioctl has been added to handle all the requests of the
new control API, which cover all the functionalities so far supported.
The netmap API bumps from 11 to 12 with this patch. Full backward
compatibility is provided for the old control command (NIOCREGIF), by
means of a new netmap_legacy module. Many parts of the old netmap.h
header has now been moved to netmap_legacy.h (included by netmap.h).
Approved by: hrs (mentor)
2018-04-12 07:20:50 +00:00
|
|
|
int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
|
|
|
|
NMB(na, slot + si));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
2017-10-30 21:14:31 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
|
|
|
iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
|
|
|
|
{
|
|
|
|
struct netmap_adapter *na = NA(ctx->ifc_ifp);
|
netmap: align codebase to the current upstream (commit id 3fb001303718146)
Changelist:
- Turn tx_rings and rx_rings arrays into arrays of pointers to kring
structs. This patch includes fixes for ixv, ixl, ix, re, cxgbe, iflib,
vtnet and ptnet drivers to cope with the change.
- Generalize the nm_config() callback to accept a struct containing many
parameters.
- Introduce NKR_FAKERING to support buffers sharing (used for netmap
pipes)
- Improved API for external VALE modules.
- Various bug fixes and improvements to the netmap memory allocator,
including support for externally (userspace) allocated memory.
- Refactoring of netmap pipes: now linked rings share the same netmap
buffers, with a separate set of kring pointers (rhead, rcur, rtail).
Buffer swapping does not need to happen anymore.
- Large refactoring of the control API towards an extensible solution;
the goal is to allow the addition of more commands and extension of
existing ones (with new options) without the need of hacks or the
risk of running out of configuration space.
A new NIOCCTRL ioctl has been added to handle all the requests of the
new control API, which cover all the functionalities so far supported.
The netmap API bumps from 11 to 12 with this patch. Full backward
compatibility is provided for the old control command (NIOCREGIF), by
means of a new netmap_legacy module. Many parts of the old netmap.h
header has now been moved to netmap_legacy.h (included by netmap.h).
Approved by: hrs (mentor)
2018-04-12 07:20:50 +00:00
|
|
|
struct netmap_kring *kring = na->rx_rings[rxq->ifr_id];
|
2016-05-18 04:35:58 +00:00
|
|
|
struct netmap_slot *slot;
|
2017-10-30 21:14:31 +00:00
|
|
|
uint32_t nm_i;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
|
2017-02-22 02:35:59 +00:00
|
|
|
if (slot == NULL)
|
2016-05-18 04:35:58 +00:00
|
|
|
return;
|
2017-10-30 21:14:31 +00:00
|
|
|
nm_i = netmap_idx_n2k(kring, 0);
|
|
|
|
netmap_fl_refill(rxq, kring, nm_i, true);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2018-07-20 17:24:45 +00:00
|
|
|
static void
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
iflib_netmap_timer_adjust(if_ctx_t ctx, iflib_txq_t txq, uint32_t *reset_on)
|
2018-07-20 17:24:45 +00:00
|
|
|
{
|
|
|
|
struct netmap_kring *kring;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
uint16_t txqid;
|
2018-07-20 17:24:45 +00:00
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
txqid = txq->ift_id;
|
2018-07-20 17:24:45 +00:00
|
|
|
kring = NA(ctx->ifc_ifp)->tx_rings[txqid];
|
|
|
|
|
|
|
|
if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) {
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
2018-07-20 17:24:45 +00:00
|
|
|
if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false))
|
|
|
|
netmap_tx_irq(ctx->ifc_ifp, txqid);
|
|
|
|
if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) {
|
|
|
|
if (hz < 2000)
|
|
|
|
*reset_on = 1;
|
|
|
|
else
|
|
|
|
*reset_on = hz / 1000;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
#define iflib_netmap_detach(ifp) netmap_detach(ifp)
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define iflib_netmap_txq_init(ctx, txq)
|
|
|
|
#define iflib_netmap_rxq_init(ctx, rxq)
|
|
|
|
#define iflib_netmap_detach(ifp)
|
|
|
|
|
|
|
|
#define iflib_netmap_attach(ctx) (0)
|
|
|
|
#define netmap_rx_irq(ifp, qid, budget) (0)
|
2017-03-13 22:53:06 +00:00
|
|
|
#define netmap_tx_irq(ifp, qid) do {} while (0)
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
#define iflib_netmap_timer_adjust(ctx, txq, reset_on)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
static __inline void
|
|
|
|
prefetch(void *x)
|
|
|
|
{
|
|
|
|
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
|
|
|
|
}
|
2017-10-23 20:50:08 +00:00
|
|
|
static __inline void
|
|
|
|
prefetch2cachelines(void *x)
|
|
|
|
{
|
|
|
|
__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
|
|
|
|
#if (CACHE_LINE_SIZE < 128)
|
|
|
|
__asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long)))));
|
|
|
|
#endif
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
#else
|
|
|
|
#define prefetch(x)
|
2017-10-23 20:50:08 +00:00
|
|
|
#define prefetch2cachelines(x)
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
static void
|
|
|
|
iflib_gen_mac(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
MD5_CTX mdctx;
|
|
|
|
char uuid[HOSTUUIDLEN+1];
|
|
|
|
char buf[HOSTUUIDLEN+16];
|
|
|
|
uint8_t *mac;
|
|
|
|
unsigned char digest[16];
|
|
|
|
|
|
|
|
td = curthread;
|
|
|
|
mac = ctx->ifc_mac;
|
|
|
|
uuid[HOSTUUIDLEN] = 0;
|
|
|
|
bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN);
|
|
|
|
snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev));
|
|
|
|
/*
|
|
|
|
* Generate a pseudo-random, deterministic MAC
|
|
|
|
* address based on the UUID and unit number.
|
|
|
|
* The FreeBSD Foundation OUI of 58-9C-FC is used.
|
|
|
|
*/
|
|
|
|
MD5Init(&mdctx);
|
|
|
|
MD5Update(&mdctx, buf, strlen(buf));
|
|
|
|
MD5Final(digest, &mdctx);
|
|
|
|
|
|
|
|
mac[0] = 0x58;
|
|
|
|
mac[1] = 0x9C;
|
|
|
|
mac[2] = 0xFC;
|
|
|
|
mac[3] = digest[0];
|
|
|
|
mac[4] = digest[1];
|
|
|
|
mac[5] = digest[2];
|
|
|
|
}
|
|
|
|
|
2017-10-31 02:49:28 +00:00
|
|
|
static void
|
|
|
|
iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid)
|
|
|
|
{
|
|
|
|
iflib_fl_t fl;
|
|
|
|
|
|
|
|
fl = &rxq->ifr_fl[flid];
|
|
|
|
iru->iru_paddrs = fl->ifl_bus_addrs;
|
|
|
|
iru->iru_vaddrs = &fl->ifl_vm_addrs[0];
|
|
|
|
iru->iru_idxs = fl->ifl_rxd_idxs;
|
|
|
|
iru->iru_qsidx = rxq->ifr_id;
|
|
|
|
iru->iru_buf_size = fl->ifl_buf_size;
|
|
|
|
iru->iru_flidx = fl->ifl_id;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
|
|
|
_iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
|
|
|
|
{
|
|
|
|
if (err)
|
|
|
|
return;
|
|
|
|
*(bus_addr_t *) arg = segs[0].ds_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-01-22 01:11:17 +00:00
|
|
|
iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
|
2019-01-22 01:11:17 +00:00
|
|
|
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
|
|
|
|
align, 0, /* alignment, bounds */
|
2016-05-18 04:35:58 +00:00
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
size, /* maxsize */
|
|
|
|
1, /* nsegments */
|
|
|
|
size, /* maxsegsize */
|
|
|
|
BUS_DMA_ALLOCNOW, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockarg */
|
|
|
|
&dma->idi_tag);
|
|
|
|
if (err) {
|
|
|
|
device_printf(dev,
|
|
|
|
"%s: bus_dma_tag_create failed: %d\n",
|
|
|
|
__func__, err);
|
|
|
|
goto fail_0;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr,
|
|
|
|
BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map);
|
|
|
|
if (err) {
|
|
|
|
device_printf(dev,
|
|
|
|
"%s: bus_dmamem_alloc(%ju) failed: %d\n",
|
|
|
|
__func__, (uintmax_t)size, err);
|
|
|
|
goto fail_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma->idi_paddr = IF_BAD_DMA;
|
|
|
|
err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr,
|
|
|
|
size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT);
|
|
|
|
if (err || dma->idi_paddr == IF_BAD_DMA) {
|
|
|
|
device_printf(dev,
|
|
|
|
"%s: bus_dmamap_load failed: %d\n",
|
|
|
|
__func__, err);
|
|
|
|
goto fail_2;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma->idi_size = size;
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail_2:
|
|
|
|
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
|
|
|
|
fail_1:
|
|
|
|
bus_dma_tag_destroy(dma->idi_tag);
|
|
|
|
fail_0:
|
|
|
|
dma->idi_tag = NULL;
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2019-01-22 01:11:17 +00:00
|
|
|
int
|
|
|
|
iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags)
|
|
|
|
{
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
|
|
|
|
KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized"));
|
|
|
|
|
|
|
|
return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags));
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
int
|
|
|
|
iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count)
|
|
|
|
{
|
|
|
|
int i, err;
|
|
|
|
iflib_dma_info_t *dmaiter;
|
|
|
|
|
|
|
|
dmaiter = dmalist;
|
|
|
|
for (i = 0; i < count; i++, dmaiter++) {
|
|
|
|
if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
iflib_dma_free_multi(dmalist, i);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_dma_free(iflib_dma_info_t dma)
|
|
|
|
{
|
|
|
|
if (dma->idi_tag == NULL)
|
|
|
|
return;
|
|
|
|
if (dma->idi_paddr != IF_BAD_DMA) {
|
|
|
|
bus_dmamap_sync(dma->idi_tag, dma->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(dma->idi_tag, dma->idi_map);
|
|
|
|
dma->idi_paddr = IF_BAD_DMA;
|
|
|
|
}
|
|
|
|
if (dma->idi_vaddr != NULL) {
|
|
|
|
bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map);
|
|
|
|
dma->idi_vaddr = NULL;
|
|
|
|
}
|
|
|
|
bus_dma_tag_destroy(dma->idi_tag);
|
|
|
|
dma->idi_tag = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
iflib_dma_info_t *dmaiter = dmalist;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++, dmaiter++)
|
|
|
|
iflib_dma_free(*dmaiter);
|
|
|
|
}
|
|
|
|
|
2017-01-24 16:05:42 +00:00
|
|
|
#ifdef EARLY_AP_STARTUP
|
|
|
|
static const int iflib_started = 1;
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* We used to abuse the smp_started flag to decide if the queues have been
|
|
|
|
* fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()).
|
|
|
|
* That gave bad races, since the SYSINIT() runs strictly after smp_started
|
|
|
|
* is set. Run a SYSINIT() strictly after that to just set a usable
|
|
|
|
* completion flag.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int iflib_started;
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_record_started(void *arg)
|
|
|
|
{
|
|
|
|
iflib_started = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST,
|
|
|
|
iflib_record_started, NULL);
|
|
|
|
#endif
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_fast_intr(void *arg)
|
2017-03-13 22:53:06 +00:00
|
|
|
{
|
|
|
|
iflib_filter_info_t info = arg;
|
|
|
|
struct grouptask *gtask = info->ifi_task;
|
2019-02-15 18:51:43 +00:00
|
|
|
int result;
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
if (!iflib_started)
|
2019-02-15 18:51:43 +00:00
|
|
|
return (FILTER_STRAY);
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
DBG_COUNTER_INC(fast_intrs);
|
2019-02-15 18:51:43 +00:00
|
|
|
if (info->ifi_filter != NULL) {
|
|
|
|
result = info->ifi_filter(info->ifi_filter_arg);
|
|
|
|
if ((result & FILTER_SCHEDULE_THREAD) == 0)
|
|
|
|
return (result);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(gtask);
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_fast_intr_rxtx(void *arg)
|
|
|
|
{
|
|
|
|
iflib_filter_info_t info = arg;
|
|
|
|
struct grouptask *gtask = info->ifi_task;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
if_ctx_t ctx;
|
2017-03-13 22:53:06 +00:00
|
|
|
iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
iflib_txq_t txq;
|
|
|
|
void *sc;
|
2019-02-15 18:51:43 +00:00
|
|
|
int i, cidx, result;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
qidx_t txqid;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
if (!iflib_started)
|
2019-02-15 18:51:43 +00:00
|
|
|
return (FILTER_STRAY);
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
DBG_COUNTER_INC(fast_intrs);
|
2019-02-15 18:51:43 +00:00
|
|
|
if (info->ifi_filter != NULL) {
|
|
|
|
result = info->ifi_filter(info->ifi_filter_arg);
|
|
|
|
if ((result & FILTER_SCHEDULE_THREAD) == 0)
|
|
|
|
return (result);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
ctx = rxq->ifr_ctx;
|
|
|
|
sc = ctx->ifc_softc;
|
2018-05-04 18:57:05 +00:00
|
|
|
MPASS(rxq->ifr_ntxqirq);
|
2017-03-13 22:53:06 +00:00
|
|
|
for (i = 0; i < rxq->ifr_ntxqirq; i++) {
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
txqid = rxq->ifr_txqid[i];
|
|
|
|
txq = &ctx->ifc_txqs[txqid];
|
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
2019-01-16 05:44:14 +00:00
|
|
|
BUS_DMASYNC_POSTREAD);
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
if (!ctx->isc_txd_credits_update(sc, txqid, false)) {
|
2017-03-13 22:53:06 +00:00
|
|
|
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid);
|
|
|
|
continue;
|
|
|
|
}
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
|
|
|
if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ)
|
|
|
|
cidx = rxq->ifr_cq_cidx;
|
|
|
|
else
|
|
|
|
cidx = rxq->ifr_fl[0].ifl_cidx;
|
|
|
|
if (iflib_rxd_avail(ctx, rxq, cidx, 1))
|
|
|
|
GROUPTASK_ENQUEUE(gtask);
|
2018-09-06 18:51:52 +00:00
|
|
|
else {
|
2017-03-13 22:53:06 +00:00
|
|
|
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(rx_intr_enables);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
return (FILTER_HANDLED);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_fast_intr_ctx(void *arg)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_filter_info_t info = arg;
|
|
|
|
struct grouptask *gtask = info->ifi_task;
|
2019-02-15 18:51:43 +00:00
|
|
|
int result;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-24 16:05:42 +00:00
|
|
|
if (!iflib_started)
|
2019-02-15 18:51:43 +00:00
|
|
|
return (FILTER_STRAY);
|
2017-01-02 00:56:33 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(fast_intrs);
|
2019-02-15 18:51:43 +00:00
|
|
|
if (info->ifi_filter != NULL) {
|
|
|
|
result = info->ifi_filter(info->ifi_filter_arg);
|
|
|
|
if ((result & FILTER_SCHEDULE_THREAD) == 0)
|
|
|
|
return (result);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(gtask);
|
|
|
|
return (FILTER_HANDLED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
_iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
|
2018-05-29 21:56:39 +00:00
|
|
|
driver_filter_t filter, driver_intr_t handler, void *arg,
|
|
|
|
const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-30 16:54:01 +00:00
|
|
|
int rc, flags;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct resource *res;
|
2017-03-30 16:54:01 +00:00
|
|
|
void *tag = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
|
2017-03-30 16:54:01 +00:00
|
|
|
flags = RF_ACTIVE;
|
|
|
|
if (ctx->ifc_flags & IFC_LEGACY)
|
|
|
|
flags |= RF_SHAREABLE;
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(rid < 512);
|
|
|
|
irq->ii_rid = rid;
|
2017-03-30 16:54:01 +00:00
|
|
|
res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (res == NULL) {
|
|
|
|
device_printf(dev,
|
|
|
|
"failed to allocate IRQ for rid %d, name %s.\n", rid, name);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
irq->ii_res = res;
|
|
|
|
KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL"));
|
|
|
|
rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET,
|
|
|
|
filter, handler, arg, &tag);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(dev,
|
|
|
|
"failed to setup interrupt for rid %d, name %s: %d\n",
|
|
|
|
rid, name ? name : "unknown", rc);
|
|
|
|
return (rc);
|
|
|
|
} else if (name)
|
2016-08-04 18:29:16 +00:00
|
|
|
bus_describe_intr(dev, res, tag, "%s", name);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
irq->ii_tag = tag;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
* Allocate DMA resources for TX buffers as well as memory for the TX
|
|
|
|
* mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
|
|
|
|
* iflib_sw_tx_desc_array structure, storing all the information that
|
|
|
|
* is needed to transmit a packet on the wire. This is called only
|
|
|
|
* once at attach, setup is done every reset.
|
2016-05-18 04:35:58 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
iflib_txsd_alloc(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
bus_size_t tsomaxsize;
|
2016-05-18 04:35:58 +00:00
|
|
|
int err, nsegments, ntsosegments;
|
2019-01-16 05:44:14 +00:00
|
|
|
bool tso;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
nsegments = scctx->isc_tx_nsegments;
|
|
|
|
ntsosegments = scctx->isc_tx_tso_segments_max;
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
tsomaxsize = scctx->isc_tx_tso_size_max;
|
|
|
|
if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU)
|
|
|
|
tsomaxsize += sizeof(struct ether_vlan_header);
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(scctx->isc_ntxd[0] > 0);
|
|
|
|
MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(nsegments > 0);
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) {
|
|
|
|
MPASS(ntsosegments > 0);
|
|
|
|
MPASS(sctx->isc_tso_maxsize >= tsomaxsize);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
* Set up DMA tags for TX buffers.
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
|
|
|
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
|
|
|
|
1, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
sctx->isc_tx_maxsize, /* maxsize */
|
|
|
|
nsegments, /* nsegments */
|
|
|
|
sctx->isc_tx_maxsegsize, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockfuncarg */
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
&txq->ift_buf_tag))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
|
2017-07-20 20:28:31 +00:00
|
|
|
device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
|
|
|
|
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2019-01-16 05:44:14 +00:00
|
|
|
tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0;
|
|
|
|
if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev),
|
2016-05-18 04:35:58 +00:00
|
|
|
1, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
tsomaxsize, /* maxsize */
|
2016-05-18 04:35:58 +00:00
|
|
|
ntsosegments, /* nsegments */
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
sctx->isc_tso_maxsegsize,/* maxsegsize */
|
2016-05-18 04:35:58 +00:00
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockfuncarg */
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
&txq->ift_tso_buf_tag))) {
|
|
|
|
device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
|
|
|
|
err);
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
|
|
|
|
/* Allocate memory for the TX mbuf map. */
|
2016-05-18 04:35:58 +00:00
|
|
|
if (!(txq->ift_sds.ifsd_m =
|
2018-01-21 15:42:36 +00:00
|
|
|
(struct mbuf **) malloc(sizeof(struct mbuf *) *
|
|
|
|
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev, "Unable to allocate TX mbuf map memory\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
/*
|
|
|
|
* Create the DMA maps for TX buffers.
|
|
|
|
*/
|
2019-01-16 05:44:14 +00:00
|
|
|
if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
|
|
|
|
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
|
|
|
|
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate TX buffer DMA map memory\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2019-01-16 05:44:14 +00:00
|
|
|
if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
|
|
|
|
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
|
|
|
|
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate TSO TX buffer map memory\n");
|
2019-01-16 05:44:14 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
err = bus_dmamap_create(txq->ift_buf_tag, 0,
|
2019-01-16 05:44:14 +00:00
|
|
|
&txq->ift_sds.ifsd_map[i]);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (err != 0) {
|
|
|
|
device_printf(dev, "Unable to create TX DMA map\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
2019-01-16 05:44:14 +00:00
|
|
|
if (!tso)
|
|
|
|
continue;
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
|
2019-01-16 05:44:14 +00:00
|
|
|
&txq->ift_sds.ifsd_tso_map[i]);
|
|
|
|
if (err != 0) {
|
|
|
|
device_printf(dev, "Unable to create TSO TX DMA map\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
fail:
|
|
|
|
/* We free all, it handles case where we are in the middle */
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
|
|
|
|
{
|
|
|
|
bus_dmamap_t map;
|
|
|
|
|
|
|
|
map = NULL;
|
|
|
|
if (txq->ift_sds.ifsd_map != NULL)
|
|
|
|
map = txq->ift_sds.ifsd_map[i];
|
|
|
|
if (map != NULL) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(txq->ift_buf_tag, map);
|
|
|
|
bus_dmamap_destroy(txq->ift_buf_tag, map);
|
2016-05-18 04:35:58 +00:00
|
|
|
txq->ift_sds.ifsd_map[i] = NULL;
|
|
|
|
}
|
2019-01-16 05:44:14 +00:00
|
|
|
|
|
|
|
map = NULL;
|
|
|
|
if (txq->ift_sds.ifsd_tso_map != NULL)
|
|
|
|
map = txq->ift_sds.ifsd_tso_map[i];
|
|
|
|
if (map != NULL) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(txq->ift_tso_buf_tag, map,
|
2019-01-16 05:44:14 +00:00
|
|
|
BUS_DMASYNC_POSTWRITE);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(txq->ift_tso_buf_tag, map);
|
|
|
|
bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_tso_map[i] = NULL;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txq_destroy(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < txq->ift_size; i++)
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txsd_destroy(ctx, txq, i);
|
|
|
|
if (txq->ift_sds.ifsd_map != NULL) {
|
|
|
|
free(txq->ift_sds.ifsd_map, M_IFLIB);
|
|
|
|
txq->ift_sds.ifsd_map = NULL;
|
|
|
|
}
|
2019-01-16 05:44:14 +00:00
|
|
|
if (txq->ift_sds.ifsd_tso_map != NULL) {
|
|
|
|
free(txq->ift_sds.ifsd_tso_map, M_IFLIB);
|
|
|
|
txq->ift_sds.ifsd_tso_map = NULL;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
if (txq->ift_sds.ifsd_m != NULL) {
|
|
|
|
free(txq->ift_sds.ifsd_m, M_IFLIB);
|
|
|
|
txq->ift_sds.ifsd_m = NULL;
|
|
|
|
}
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
if (txq->ift_buf_tag != NULL) {
|
|
|
|
bus_dma_tag_destroy(txq->ift_buf_tag);
|
|
|
|
txq->ift_buf_tag = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
if (txq->ift_tso_buf_tag != NULL) {
|
|
|
|
bus_dma_tag_destroy(txq->ift_tso_buf_tag);
|
|
|
|
txq->ift_tso_buf_tag = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
|
|
|
|
{
|
|
|
|
struct mbuf **mp;
|
|
|
|
|
|
|
|
mp = &txq->ift_sds.ifsd_m[i];
|
|
|
|
if (*mp == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (txq->ift_sds.ifsd_map != NULL) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(txq->ift_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
|
2019-01-16 05:44:14 +00:00
|
|
|
}
|
|
|
|
if (txq->ift_sds.ifsd_tso_map != NULL) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(txq->ift_tso_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(txq->ift_tso_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_tso_map[i]);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
m_free(*mp);
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
*mp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_txq_setup(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2018-11-14 15:16:45 +00:00
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_info_t di;
|
|
|
|
int i;
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
/* Set number of descriptors available */
|
2016-05-18 04:35:58 +00:00
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX make configurable */
|
|
|
|
txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Reset indices */
|
2017-03-13 22:53:06 +00:00
|
|
|
txq->ift_cidx_processed = 0;
|
|
|
|
txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-11-14 15:16:45 +00:00
|
|
|
for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero((void *)di->idi_vaddr, di->idi_size);
|
|
|
|
|
|
|
|
IFDI_TXQ_SETUP(ctx, txq->ift_id);
|
2018-11-14 15:16:45 +00:00
|
|
|
for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++)
|
2016-05-18 04:35:58 +00:00
|
|
|
bus_dmamap_sync(di->idi_tag, di->idi_map,
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
* Allocate DMA resources for RX buffers as well as memory for the RX
|
|
|
|
* mbuf map, direct RX cluster pointer map and RX cluster bus address
|
|
|
|
* map. RX DMA map, RX mbuf map, direct RX cluster pointer map and
|
|
|
|
* RX cluster map are kept in a iflib_sw_rx_desc_array structure.
|
|
|
|
* Since we use use one entry in iflib_sw_rx_desc_array per received
|
|
|
|
* packet, the maximum number of entries we'll need is equal to the
|
|
|
|
* number of hardware receive descriptors that we've allocated.
|
2016-05-18 04:35:58 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
iflib_rxsd_alloc(iflib_rxq_t rxq)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
int err;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(scctx->isc_nrxd[0] > 0);
|
|
|
|
MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
fl = rxq->ifr_fl;
|
|
|
|
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
|
2016-08-12 21:29:44 +00:00
|
|
|
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
/* Set up DMA tag for RX buffers. */
|
2016-05-18 04:35:58 +00:00
|
|
|
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
|
|
|
|
1, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
sctx->isc_rx_maxsize, /* maxsize */
|
|
|
|
sctx->isc_rx_nsegments, /* nsegments */
|
|
|
|
sctx->isc_rx_maxsegsize, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockarg */
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
&fl->ifl_buf_tag);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (err) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RX DMA tag: %d\n", err);
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
|
|
|
|
/* Allocate memory for the RX mbuf map. */
|
2017-01-27 23:08:06 +00:00
|
|
|
if (!(fl->ifl_sds.ifsd_m =
|
2018-01-21 15:42:36 +00:00
|
|
|
(struct mbuf **) malloc(sizeof(struct mbuf *) *
|
|
|
|
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RX mbuf map memory\n");
|
2017-01-27 23:08:06 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
|
|
|
|
/* Allocate memory for the direct RX cluster pointer map. */
|
2017-01-27 23:08:06 +00:00
|
|
|
if (!(fl->ifl_sds.ifsd_cl =
|
2018-01-21 15:42:36 +00:00
|
|
|
(caddr_t *) malloc(sizeof(caddr_t) *
|
|
|
|
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RX cluster map memory\n");
|
2017-01-27 23:08:06 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
/* Allocate memory for the RX cluster bus address map. */
|
2018-11-27 20:01:05 +00:00
|
|
|
if (!(fl->ifl_sds.ifsd_ba =
|
|
|
|
(bus_addr_t *) malloc(sizeof(bus_addr_t) *
|
|
|
|
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RX bus address map memory\n");
|
2018-11-27 20:01:05 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2017-01-27 23:08:06 +00:00
|
|
|
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
/*
|
|
|
|
* Create the DMA maps for RX buffers.
|
|
|
|
*/
|
2017-01-27 23:08:06 +00:00
|
|
|
if (!(fl->ifl_sds.ifsd_map =
|
2018-01-21 15:42:36 +00:00
|
|
|
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RX buffer DMA map memory\n");
|
2017-01-27 23:08:06 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
err = bus_dmamap_create(fl->ifl_buf_tag, 0,
|
|
|
|
&fl->ifl_sds.ifsd_map[i]);
|
2017-01-27 23:08:06 +00:00
|
|
|
if (err != 0) {
|
2017-03-13 22:53:06 +00:00
|
|
|
device_printf(dev, "Unable to create RX buffer DMA map\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
2017-01-28 15:44:14 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
iflib_rx_structures_free(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal service routines
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct rxq_refill_cb_arg {
|
|
|
|
int error;
|
|
|
|
bus_dma_segment_t seg;
|
|
|
|
int nseg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
_rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
|
|
|
|
{
|
|
|
|
struct rxq_refill_cb_arg *cb_arg = arg;
|
|
|
|
|
|
|
|
cb_arg->error = error;
|
|
|
|
cb_arg->seg = segs[0];
|
|
|
|
cb_arg->nseg = nseg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rxq_refill - refill an rxq free-buffer list
|
|
|
|
* @ctx: the iflib context
|
|
|
|
* @rxq: the free-list to refill
|
|
|
|
* @n: the number of new buffers to allocate
|
|
|
|
*
|
|
|
|
* (Re)populate an rxq free-buffer list with up to @n new packet buffers.
|
|
|
|
* The caller must assure that @n does not exceed the queue's capacity.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
|
|
|
|
{
|
2019-01-26 21:35:51 +00:00
|
|
|
struct if_rxd_update iru;
|
|
|
|
struct rxq_refill_cb_arg cb_arg;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct mbuf *m;
|
2017-01-27 23:08:06 +00:00
|
|
|
caddr_t cl, *sd_cl;
|
|
|
|
struct mbuf **sd_m;
|
|
|
|
bus_dmamap_t *sd_map;
|
2018-11-27 20:01:05 +00:00
|
|
|
bus_addr_t bus_addr, *sd_ba;
|
2019-01-26 21:35:51 +00:00
|
|
|
int err, frag_idx, i, idx, n, pidx;
|
2017-10-31 17:50:42 +00:00
|
|
|
qidx_t credits;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
sd_m = fl->ifl_sds.ifsd_m;
|
|
|
|
sd_map = fl->ifl_sds.ifsd_map;
|
|
|
|
sd_cl = fl->ifl_sds.ifsd_cl;
|
2018-11-27 20:01:05 +00:00
|
|
|
sd_ba = fl->ifl_sds.ifsd_ba;
|
2019-01-26 21:35:51 +00:00
|
|
|
pidx = fl->ifl_pidx;
|
2017-01-27 23:08:06 +00:00
|
|
|
idx = pidx;
|
2019-01-26 21:35:51 +00:00
|
|
|
frag_idx = fl->ifl_fragidx;
|
2017-10-31 17:50:42 +00:00
|
|
|
credits = fl->ifl_credits;
|
2017-01-27 23:08:06 +00:00
|
|
|
|
2019-01-26 21:35:51 +00:00
|
|
|
i = 0;
|
|
|
|
n = count;
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(n > 0);
|
2017-10-31 17:50:42 +00:00
|
|
|
MPASS(credits + n <= fl->ifl_size);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (pidx < fl->ifl_cidx)
|
|
|
|
MPASS(pidx + n <= fl->ifl_cidx);
|
2017-10-31 17:50:42 +00:00
|
|
|
if (pidx == fl->ifl_cidx && (credits < fl->ifl_size))
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(fl->ifl_gen == 0);
|
|
|
|
if (pidx > fl->ifl_cidx)
|
|
|
|
MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx);
|
|
|
|
|
|
|
|
DBG_COUNTER_INC(fl_refills);
|
|
|
|
if (n > 8)
|
|
|
|
DBG_COUNTER_INC(fl_refills_large);
|
2017-10-30 21:14:31 +00:00
|
|
|
iru_init(&iru, fl->ifl_rxq, fl->ifl_id);
|
2016-05-18 04:35:58 +00:00
|
|
|
while (n--) {
|
|
|
|
/*
|
|
|
|
* We allocate an uninitialized mbuf + cluster, mbuf is
|
|
|
|
* initialized after rx.
|
|
|
|
*
|
|
|
|
* If the cluster is still set then we know a minimum sized packet was received
|
|
|
|
*/
|
2019-01-26 21:35:51 +00:00
|
|
|
bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size,
|
|
|
|
&frag_idx);
|
|
|
|
if (frag_idx < 0)
|
|
|
|
bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx);
|
|
|
|
MPASS(frag_idx >= 0);
|
2017-07-03 18:23:35 +00:00
|
|
|
if ((cl = sd_cl[frag_idx]) == NULL) {
|
2018-11-27 20:01:05 +00:00
|
|
|
if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
cb_arg.error = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
MPASS(sd_map != NULL);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
|
2019-01-16 05:44:14 +00:00
|
|
|
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
|
|
|
|
BUS_DMA_NOWAIT);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (err != 0 || cb_arg.error) {
|
|
|
|
/*
|
|
|
|
* !zone_pack ?
|
|
|
|
*/
|
|
|
|
if (fl->ifl_zone == zone_pack)
|
|
|
|
uma_zfree(fl->ifl_zone, cl);
|
2018-11-27 20:01:05 +00:00
|
|
|
break;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2018-11-27 20:01:05 +00:00
|
|
|
|
|
|
|
sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
|
|
|
|
sd_cl[frag_idx] = cl;
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
fl->ifl_cl_enqueued++;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
bus_addr = sd_ba[frag_idx];
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
|
|
|
|
BUS_DMASYNC_PREREAD);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-07-03 18:23:35 +00:00
|
|
|
MPASS(sd_m[frag_idx] == NULL);
|
2018-11-27 20:01:05 +00:00
|
|
|
if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
|
|
|
|
break;
|
|
|
|
}
|
2017-07-03 18:23:35 +00:00
|
|
|
sd_m[frag_idx] = m;
|
2019-01-26 21:35:51 +00:00
|
|
|
bit_set(fl->ifl_rx_bitmap, frag_idx);
|
2018-11-27 20:01:05 +00:00
|
|
|
#if MEMORY_LOGGING
|
|
|
|
fl->ifl_m_enqueued++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
DBG_COUNTER_INC(rx_allocs);
|
2017-07-03 18:23:35 +00:00
|
|
|
fl->ifl_rxd_idxs[i] = frag_idx;
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_bus_addrs[i] = bus_addr;
|
|
|
|
fl->ifl_vm_addrs[i] = cl;
|
2017-10-31 17:50:42 +00:00
|
|
|
credits++;
|
2016-05-18 04:35:58 +00:00
|
|
|
i++;
|
2017-10-31 17:50:42 +00:00
|
|
|
MPASS(credits <= fl->ifl_size);
|
2017-01-27 23:08:06 +00:00
|
|
|
if (++idx == fl->ifl_size) {
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_gen = 1;
|
2017-01-27 23:08:06 +00:00
|
|
|
idx = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
|
2017-03-13 22:53:06 +00:00
|
|
|
iru.iru_pidx = pidx;
|
|
|
|
iru.iru_count = i;
|
|
|
|
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
|
2016-05-18 04:35:58 +00:00
|
|
|
i = 0;
|
2017-01-27 23:08:06 +00:00
|
|
|
pidx = idx;
|
2017-07-03 18:23:35 +00:00
|
|
|
fl->ifl_pidx = idx;
|
2017-10-31 17:50:42 +00:00
|
|
|
fl->ifl_credits = credits;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-27 20:01:05 +00:00
|
|
|
|
2017-10-31 17:50:42 +00:00
|
|
|
if (i) {
|
|
|
|
iru.iru_pidx = pidx;
|
|
|
|
iru.iru_count = i;
|
|
|
|
ctx->isc_rxd_refill(ctx->ifc_softc, &iru);
|
|
|
|
fl->ifl_pidx = idx;
|
|
|
|
fl->ifl_credits = credits;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(rxd_flush);
|
|
|
|
if (fl->ifl_pidx == 0)
|
|
|
|
pidx = fl->ifl_size - 1;
|
|
|
|
else
|
|
|
|
pidx = fl->ifl_pidx - 1;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2018-11-27 20:01:05 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx);
|
2017-07-03 18:23:35 +00:00
|
|
|
fl->ifl_fragidx = frag_idx;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
|
|
|
__iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max)
|
|
|
|
{
|
|
|
|
/* we avoid allowing pidx to catch up with cidx as it confuses ixl */
|
|
|
|
int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1;
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
MPASS(fl->ifl_credits <= fl->ifl_size);
|
|
|
|
MPASS(reclaimable == delta);
|
|
|
|
|
|
|
|
if (reclaimable > 0)
|
|
|
|
_iflib_fl_refill(ctx, fl, min(max, reclaimable));
|
|
|
|
}
|
|
|
|
|
2018-10-12 22:40:54 +00:00
|
|
|
uint8_t
|
|
|
|
iflib_in_detach(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
bool in_detach;
|
|
|
|
STATE_LOCK(ctx);
|
|
|
|
in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH);
|
|
|
|
STATE_UNLOCK(ctx);
|
|
|
|
return (in_detach);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
|
|
|
iflib_fl_bufs_free(iflib_fl_t fl)
|
|
|
|
{
|
|
|
|
iflib_dma_info_t idi = fl->ifl_ifdi;
|
2019-01-16 05:44:14 +00:00
|
|
|
bus_dmamap_t sd_map;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < fl->ifl_size; i++) {
|
2017-01-27 23:08:06 +00:00
|
|
|
struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
|
|
|
|
caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
|
|
|
|
|
2018-11-27 20:01:05 +00:00
|
|
|
if (*sd_cl != NULL) {
|
2019-01-16 05:44:14 +00:00
|
|
|
sd_map = fl->ifl_sds.ifsd_map[i];
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
|
2019-01-16 05:44:14 +00:00
|
|
|
BUS_DMASYNC_POSTREAD);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
|
2018-11-27 20:01:05 +00:00
|
|
|
if (*sd_cl != NULL)
|
|
|
|
uma_zfree(fl->ifl_zone, *sd_cl);
|
|
|
|
// XXX: Should this get moved out?
|
|
|
|
if (iflib_in_detach(fl->ifl_rxq->ifr_ctx))
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_destroy(fl->ifl_buf_tag, sd_map);
|
2017-01-27 23:08:06 +00:00
|
|
|
if (*sd_m != NULL) {
|
|
|
|
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
|
|
|
|
uma_zfree(zone_mbuf, *sd_m);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
} else {
|
2017-01-27 23:08:06 +00:00
|
|
|
MPASS(*sd_cl == NULL);
|
|
|
|
MPASS(*sd_m == NULL);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
#if MEMORY_LOGGING
|
2017-09-16 02:41:38 +00:00
|
|
|
fl->ifl_m_dequeued++;
|
|
|
|
fl->ifl_cl_dequeued++;
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
2017-01-27 23:08:06 +00:00
|
|
|
*sd_cl = NULL;
|
|
|
|
*sd_m = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
for (i = 0; i < fl->ifl_size; i++) {
|
|
|
|
MPASS(fl->ifl_sds.ifsd_cl[i] == NULL);
|
|
|
|
MPASS(fl->ifl_sds.ifsd_m[i] == NULL);
|
|
|
|
}
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Reset free list values
|
|
|
|
*/
|
2017-07-03 18:23:35 +00:00
|
|
|
fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero(idi->idi_vaddr, idi->idi_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize a receive ring and its buffers.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
iflib_fl_setup(iflib_fl_t fl)
|
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = fl->ifl_rxq;
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
|
|
|
|
2017-11-20 21:57:04 +00:00
|
|
|
bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1);
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
** Free current RX buffer structs and their mbufs
|
|
|
|
*/
|
|
|
|
iflib_fl_bufs_free(fl);
|
|
|
|
/* Now replenish the mbufs */
|
|
|
|
MPASS(fl->ifl_credits == 0);
|
|
|
|
/*
|
|
|
|
* XXX don't set the max_frame_size to larger
|
|
|
|
* than the hardware can handle
|
|
|
|
*/
|
|
|
|
if (sctx->isc_max_frame_size <= 2048)
|
|
|
|
fl->ifl_buf_size = MCLBYTES;
|
2017-03-13 22:53:06 +00:00
|
|
|
else
|
|
|
|
fl->ifl_buf_size = MJUMPAGESIZE;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size)
|
|
|
|
ctx->ifc_max_fl_buf_size = fl->ifl_buf_size;
|
|
|
|
fl->ifl_cltype = m_gettype(fl->ifl_buf_size);
|
|
|
|
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
|
|
|
|
|
|
|
|
|
|
|
|
/* avoid pre-allocating zillions of clusters to an idle card
|
|
|
|
* potentially speeding up attach
|
|
|
|
*/
|
|
|
|
_iflib_fl_refill(ctx, fl, min(128, fl->ifl_size));
|
|
|
|
MPASS(min(128, fl->ifl_size) == fl->ifl_credits);
|
|
|
|
if (min(128, fl->ifl_size) != fl->ifl_credits)
|
|
|
|
return (ENOBUFS);
|
|
|
|
/*
|
|
|
|
* handle failure
|
|
|
|
*/
|
|
|
|
MPASS(rxq != NULL);
|
|
|
|
MPASS(fl->ifl_ifdi != NULL);
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free receive ring data structures
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
iflib_rx_sds_free(iflib_rxq_t rxq)
|
|
|
|
{
|
|
|
|
iflib_fl_t fl;
|
2019-01-16 05:44:14 +00:00
|
|
|
int i, j;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (rxq->ifr_fl != NULL) {
|
|
|
|
for (i = 0; i < rxq->ifr_nfl; i++) {
|
|
|
|
fl = &rxq->ifr_fl[i];
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
if (fl->ifl_buf_tag != NULL) {
|
2019-01-16 05:44:14 +00:00
|
|
|
if (fl->ifl_sds.ifsd_map != NULL) {
|
2019-01-25 15:02:18 +00:00
|
|
|
for (j = 0; j < fl->ifl_size; j++) {
|
|
|
|
if (fl->ifl_sds.ifsd_map[j] ==
|
2019-01-16 05:44:14 +00:00
|
|
|
NULL)
|
2019-01-25 15:02:18 +00:00
|
|
|
continue;
|
2019-01-16 05:44:14 +00:00
|
|
|
bus_dmamap_sync(
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
fl->ifl_buf_tag,
|
2019-01-25 15:02:18 +00:00
|
|
|
fl->ifl_sds.ifsd_map[j],
|
2019-01-16 05:44:14 +00:00
|
|
|
BUS_DMASYNC_POSTREAD);
|
|
|
|
bus_dmamap_unload(
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
fl->ifl_buf_tag,
|
2019-01-25 15:02:18 +00:00
|
|
|
fl->ifl_sds.ifsd_map[j]);
|
2019-01-16 05:44:14 +00:00
|
|
|
}
|
|
|
|
}
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dma_tag_destroy(fl->ifl_buf_tag);
|
|
|
|
fl->ifl_buf_tag = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-01-27 23:08:06 +00:00
|
|
|
free(fl->ifl_sds.ifsd_m, M_IFLIB);
|
|
|
|
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
|
2018-11-27 20:01:05 +00:00
|
|
|
free(fl->ifl_sds.ifsd_ba, M_IFLIB);
|
2017-01-27 23:08:06 +00:00
|
|
|
free(fl->ifl_sds.ifsd_map, M_IFLIB);
|
|
|
|
fl->ifl_sds.ifsd_m = NULL;
|
|
|
|
fl->ifl_sds.ifsd_cl = NULL;
|
2018-11-27 20:01:05 +00:00
|
|
|
fl->ifl_sds.ifsd_ba = NULL;
|
2017-01-27 23:08:06 +00:00
|
|
|
fl->ifl_sds.ifsd_map = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
free(rxq->ifr_fl, M_IFLIB);
|
|
|
|
rxq->ifr_fl = NULL;
|
|
|
|
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MI independent logic
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
iflib_timer(void *arg)
|
|
|
|
{
|
2017-09-16 02:41:38 +00:00
|
|
|
iflib_txq_t txq = arg;
|
2016-05-18 04:35:58 +00:00
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2017-09-16 02:41:38 +00:00
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
2018-07-20 17:24:45 +00:00
|
|
|
uint64_t this_tick = ticks;
|
|
|
|
uint32_t reset_on = hz / 2;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
** Check on the state of the TX queue(s), this
|
|
|
|
** can be done without the lock because its RO
|
|
|
|
** and the HUNG state will be static if set.
|
|
|
|
*/
|
2018-07-20 17:24:45 +00:00
|
|
|
if (this_tick - txq->ift_last_timer_tick >= hz / 2) {
|
|
|
|
txq->ift_last_timer_tick = this_tick;
|
|
|
|
IFDI_TIMER(ctx, txq->ift_id);
|
|
|
|
if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
|
|
|
|
((txq->ift_cleaned_prev == txq->ift_cleaned) ||
|
|
|
|
(sctx->isc_pause_frames == 0)))
|
|
|
|
goto hung;
|
|
|
|
|
|
|
|
if (ifmp_ring_is_stalled(txq->ift_br))
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_HUNG;
|
|
|
|
txq->ift_cleaned_prev = txq->ift_cleaned;
|
|
|
|
}
|
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
iflib_netmap_timer_adjust(ctx, txq, &reset_on);
|
2018-07-20 17:24:45 +00:00
|
|
|
#endif
|
2017-09-16 02:41:38 +00:00
|
|
|
/* handle any laggards */
|
|
|
|
if (txq->ift_db_pending)
|
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
sctx->isc_pause_frames = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)
|
2018-07-20 17:24:45 +00:00
|
|
|
callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
|
2017-09-16 02:41:38 +00:00
|
|
|
return;
|
2018-04-12 14:35:37 +00:00
|
|
|
hung:
|
2017-09-16 02:41:38 +00:00
|
|
|
device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n",
|
|
|
|
txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
|
|
|
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
|
|
|
|
ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET);
|
2018-10-23 04:37:29 +00:00
|
|
|
iflib_admin_intr_deferred(ctx);
|
2018-10-23 17:06:36 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_init_locked(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
2017-01-02 00:56:33 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
2017-09-16 02:41:38 +00:00
|
|
|
int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
|
|
|
|
tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Set hardware offload abilities */
|
|
|
|
if_clearhwassist(ifp);
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
|
2017-01-02 00:56:33 +00:00
|
|
|
if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
|
2017-01-02 00:56:33 +00:00
|
|
|
if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_TSO4)
|
|
|
|
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_TSO6)
|
|
|
|
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
|
|
|
|
|
|
|
|
for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
iflib_netmap_txq_init(ctx, txq);
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
i = if_getdrvflags(ifp);
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_INIT(ctx);
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(if_getdrvflags(ifp) == i);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX this should really be done on a per-queue basis */
|
2017-09-20 20:40:49 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
|
|
|
|
MPASS(rxq->ifr_id == i);
|
|
|
|
iflib_netmap_rxq_init(ctx, rxq);
|
2017-03-13 22:53:06 +00:00
|
|
|
continue;
|
2017-09-20 20:40:49 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
|
|
|
|
if (iflib_fl_setup(fl)) {
|
|
|
|
device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-18 17:27:43 +00:00
|
|
|
done:
|
2016-05-18 04:35:58 +00:00
|
|
|
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
txq = ctx->ifc_txqs;
|
|
|
|
for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
|
2017-09-16 02:41:38 +00:00
|
|
|
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
|
|
|
|
txq->ift_timer.c_cpu);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_media_change(if_t ifp)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0)
|
|
|
|
iflib_init_locked(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
IFDI_UPDATE_ADMIN_STATUS(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_MEDIA_STATUS(ctx, ifmr);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
void
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_stop(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2018-11-14 15:16:45 +00:00
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_info_t di;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
/* Tell the stack that the interface is no longer active */
|
|
|
|
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
|
|
|
|
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
DELAY(1000);
|
2016-11-18 04:19:21 +00:00
|
|
|
IFDI_STOP(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
DELAY(1000);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2016-11-18 04:19:21 +00:00
|
|
|
iflib_debug_reset();
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Wait for current tx queue users to exit to disarm watchdog timer. */
|
|
|
|
for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
/* make sure all transmitters have completed before proceeding XXX */
|
|
|
|
|
2018-03-02 18:48:07 +00:00
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/* clean any enqueued buffers */
|
2016-11-18 04:19:21 +00:00
|
|
|
iflib_ifmp_purge(txq);
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Free any existing tx buffers. */
|
2016-08-12 21:29:44 +00:00
|
|
|
for (j = 0; j < txq->ift_size; j++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txsd_free(ctx, txq, j);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
|
|
|
|
txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
|
|
|
|
txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
|
2017-09-16 02:41:38 +00:00
|
|
|
txq->ift_pullups = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
ifmp_ring_reset_stats(txq->ift_br);
|
2018-11-14 15:16:45 +00:00
|
|
|
for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++)
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero((void *)di->idi_vaddr, di->idi_size);
|
|
|
|
}
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) {
|
|
|
|
/* make sure all transmitters have completed before proceeding XXX */
|
|
|
|
|
2018-11-14 20:36:18 +00:00
|
|
|
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
|
2018-11-14 15:16:45 +00:00
|
|
|
for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++)
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero((void *)di->idi_vaddr, di->idi_size);
|
|
|
|
/* also resets the free lists pidx/cidx */
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
|
|
|
iflib_fl_bufs_free(fl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline caddr_t
|
|
|
|
calc_next_rxd(iflib_fl_t fl, int cidx)
|
|
|
|
{
|
|
|
|
qidx_t size;
|
|
|
|
int nrxd;
|
|
|
|
caddr_t start, end, cur, next;
|
|
|
|
|
|
|
|
nrxd = fl->ifl_size;
|
|
|
|
size = fl->ifl_rxd_size;
|
|
|
|
start = fl->ifl_ifdi->idi_vaddr;
|
|
|
|
|
|
|
|
if (__predict_false(size == 0))
|
|
|
|
return (start);
|
|
|
|
cur = start + size*cidx;
|
|
|
|
end = start + size*nrxd;
|
|
|
|
next = CACHE_PTR_NEXT(cur);
|
|
|
|
return (next < end ? next : start);
|
|
|
|
}
|
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
static inline void
|
|
|
|
prefetch_pkts(iflib_fl_t fl, int cidx)
|
|
|
|
{
|
|
|
|
int nextptr;
|
|
|
|
int nrxd = fl->ifl_size;
|
2017-03-13 22:53:06 +00:00
|
|
|
caddr_t next_rxd;
|
|
|
|
|
2017-01-27 23:08:06 +00:00
|
|
|
|
|
|
|
nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
|
|
|
|
prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
|
|
|
|
prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
|
2017-03-13 22:53:06 +00:00
|
|
|
next_rxd = calc_next_rxd(fl, cidx);
|
|
|
|
prefetch(next_rxd);
|
2017-01-27 23:08:06 +00:00
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
|
|
|
|
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
int flid, cidx;
|
2017-01-27 23:08:06 +00:00
|
|
|
bus_dmamap_t map;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_fl_t fl;
|
2017-01-27 23:08:06 +00:00
|
|
|
int next;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
map = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
flid = irf->irf_flid;
|
|
|
|
cidx = irf->irf_idx;
|
|
|
|
fl = &rxq->ifr_fl[flid];
|
2017-03-13 22:53:06 +00:00
|
|
|
sd->ifsd_fl = fl;
|
|
|
|
sd->ifsd_cidx = cidx;
|
|
|
|
sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
|
|
|
|
sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_credits--;
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
fl->ifl_m_dequeued++;
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH)
|
|
|
|
prefetch_pkts(fl, cidx);
|
2018-11-27 20:01:05 +00:00
|
|
|
next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
|
|
|
|
prefetch(&fl->ifl_sds.ifsd_map[next]);
|
|
|
|
map = fl->ifl_sds.ifsd_map[cidx];
|
|
|
|
next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* not valid assert if bxe really does SGE from non-contiguous elements */
|
2018-11-27 20:01:05 +00:00
|
|
|
MPASS(fl->ifl_cidx == cidx);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
|
2018-11-27 20:01:05 +00:00
|
|
|
if (unload)
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(fl->ifl_buf_tag, map);
|
2017-03-13 22:53:06 +00:00
|
|
|
fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
|
|
|
|
if (__predict_false(fl->ifl_cidx == 0))
|
2016-05-18 04:35:58 +00:00
|
|
|
fl->ifl_gen = 0;
|
2019-01-16 05:44:14 +00:00
|
|
|
bit_clear(fl->ifl_rx_bitmap, cidx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
2017-03-13 22:53:06 +00:00
|
|
|
assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
int i, padlen , flags;
|
|
|
|
struct mbuf *m, *mh, *mt;
|
|
|
|
caddr_t cl;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
i = 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
mh = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
do {
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
MPASS(*sd->ifsd_cl != NULL);
|
|
|
|
MPASS(*sd->ifsd_m != NULL);
|
2016-08-12 21:29:44 +00:00
|
|
|
|
|
|
|
/* Don't include zero-length frags */
|
|
|
|
if (ri->iri_frags[i].irf_len == 0) {
|
|
|
|
/* XXX we can save the cluster here, but not the mbuf */
|
2017-03-13 22:53:06 +00:00
|
|
|
m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
|
|
|
|
m_free(*sd->ifsd_m);
|
|
|
|
*sd->ifsd_m = NULL;
|
2016-08-12 21:29:44 +00:00
|
|
|
continue;
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
m = *sd->ifsd_m;
|
|
|
|
*sd->ifsd_m = NULL;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (mh == NULL) {
|
2016-05-18 04:35:58 +00:00
|
|
|
flags = M_PKTHDR|M_EXT;
|
|
|
|
mh = mt = m;
|
|
|
|
padlen = ri->iri_pad;
|
|
|
|
} else {
|
|
|
|
flags = M_EXT;
|
|
|
|
mt->m_next = m;
|
|
|
|
mt = m;
|
|
|
|
/* assuming padding is only on the first fragment */
|
|
|
|
padlen = 0;
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
cl = *sd->ifsd_cl;
|
|
|
|
*sd->ifsd_cl = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Can these two be made one ? */
|
|
|
|
m_init(m, M_NOWAIT, MT_DATA, flags);
|
2017-03-13 22:53:06 +00:00
|
|
|
m_cljset(m, cl, sd->ifsd_fl->ifl_cltype);
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* These must follow m_init and m_cljset
|
|
|
|
*/
|
|
|
|
m->m_data += padlen;
|
|
|
|
ri->iri_len -= padlen;
|
2016-08-12 21:29:44 +00:00
|
|
|
m->m_len = ri->iri_frags[i].irf_len;
|
2016-05-18 04:35:58 +00:00
|
|
|
} while (++i < ri->iri_nfrags);
|
|
|
|
|
|
|
|
return (mh);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process one software descriptor
|
|
|
|
*/
|
|
|
|
static struct mbuf *
|
|
|
|
iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
|
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
struct if_rxsd sd;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
/* should I merge this back in now that the two paths are basically duplicated? */
|
2016-08-12 21:29:44 +00:00
|
|
|
if (ri->iri_nfrags == 1 &&
|
2018-03-25 23:23:19 +00:00
|
|
|
ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) {
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd);
|
|
|
|
m = *sd.ifsd_m;
|
|
|
|
*sd.ifsd_m = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
if (!IP_ALIGNED(m))
|
|
|
|
m->m_data += 2;
|
|
|
|
#endif
|
|
|
|
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
|
2016-08-12 21:29:44 +00:00
|
|
|
m->m_len = ri->iri_frags[0].irf_len;
|
2016-05-18 04:35:58 +00:00
|
|
|
} else {
|
2017-03-13 22:53:06 +00:00
|
|
|
m = assemble_segments(rxq, ri, &sd);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
m->m_pkthdr.len = ri->iri_len;
|
|
|
|
m->m_pkthdr.rcvif = ri->iri_ifp;
|
|
|
|
m->m_flags |= ri->iri_flags;
|
|
|
|
m->m_pkthdr.ether_vtag = ri->iri_vtag;
|
|
|
|
m->m_pkthdr.flowid = ri->iri_flowid;
|
|
|
|
M_HASHTYPE_SET(m, ri->iri_rsstype);
|
|
|
|
m->m_pkthdr.csum_flags = ri->iri_csum_flags;
|
|
|
|
m->m_pkthdr.csum_data = ri->iri_csum_data;
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2017-11-06 16:23:21 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2017-12-05 20:43:24 +00:00
|
|
|
static void
|
|
|
|
iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
|
|
|
|
{
|
|
|
|
CURVNET_SET(lc->ifp->if_vnet);
|
|
|
|
#if defined(INET6)
|
|
|
|
*v6 = VNET(ip6_forwarding);
|
|
|
|
#endif
|
|
|
|
#if defined(INET)
|
|
|
|
*v4 = VNET(ipforwarding);
|
|
|
|
#endif
|
|
|
|
CURVNET_RESTORE();
|
|
|
|
}
|
|
|
|
|
2017-11-06 16:23:21 +00:00
|
|
|
/*
|
|
|
|
* Returns true if it's possible this packet could be LROed.
|
|
|
|
* if it returns false, it is guaranteed that tcp_lro_rx()
|
|
|
|
* would not return zero.
|
|
|
|
*/
|
|
|
|
static bool
|
2017-12-05 20:43:24 +00:00
|
|
|
iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
|
2017-11-06 16:23:21 +00:00
|
|
|
{
|
|
|
|
struct ether_header *eh;
|
|
|
|
uint16_t eh_type;
|
|
|
|
|
|
|
|
eh = mtod(m, struct ether_header *);
|
|
|
|
eh_type = ntohs(eh->ether_type);
|
|
|
|
switch (eh_type) {
|
2017-11-06 19:54:25 +00:00
|
|
|
#if defined(INET6)
|
2017-11-06 16:23:21 +00:00
|
|
|
case ETHERTYPE_IPV6:
|
2017-12-05 20:43:24 +00:00
|
|
|
return !v6_forwarding;
|
2017-11-06 19:54:25 +00:00
|
|
|
#endif
|
|
|
|
#if defined (INET)
|
2017-11-06 16:23:21 +00:00
|
|
|
case ETHERTYPE_IP:
|
2017-12-05 20:43:24 +00:00
|
|
|
return !v4_forwarding;
|
2017-11-06 19:54:25 +00:00
|
|
|
#endif
|
2017-11-06 16:23:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2017-12-05 20:43:24 +00:00
|
|
|
#else
|
|
|
|
static void
|
|
|
|
iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
|
|
|
|
{
|
|
|
|
}
|
2017-11-06 16:23:21 +00:00
|
|
|
#endif
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static bool
|
2017-03-13 22:53:06 +00:00
|
|
|
iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
int avail, i;
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t *cidxp;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct if_rxd_info ri;
|
|
|
|
int err, budget_left, rx_bytes, rx_pkts;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int lro_enabled;
|
2018-05-19 19:00:04 +00:00
|
|
|
bool v4_forwarding, v6_forwarding, lro_possible;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* XXX early demux data packets so that if_input processing only handles
|
|
|
|
* acks in interrupt context
|
|
|
|
*/
|
2017-09-23 01:35:14 +00:00
|
|
|
struct mbuf *m, *mh, *mt, *mf;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-19 19:00:04 +00:00
|
|
|
lro_possible = v4_forwarding = v6_forwarding = false;
|
2017-03-13 22:53:06 +00:00
|
|
|
ifp = ctx->ifc_ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
mh = mt = NULL;
|
|
|
|
MPASS(budget > 0);
|
2017-09-16 02:41:38 +00:00
|
|
|
rx_pkts = rx_bytes = 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ)
|
2016-05-18 04:35:58 +00:00
|
|
|
cidxp = &rxq->ifr_cq_cidx;
|
|
|
|
else
|
|
|
|
cidxp = &rxq->ifr_fl[0].ifl_cidx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
|
|
|
|
__iflib_fl_refill_lt(ctx, fl, budget + 8);
|
|
|
|
DBG_COUNTER_INC(rx_unavail);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2018-07-22 17:45:44 +00:00
|
|
|
for (budget_left = budget; budget_left > 0 && avail > 0;) {
|
2016-05-18 04:35:58 +00:00
|
|
|
if (__predict_false(!CTX_ACTIVE(ctx))) {
|
|
|
|
DBG_COUNTER_INC(rx_ctx_inactive);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Reset client set fields to their default values
|
|
|
|
*/
|
2017-03-13 22:53:06 +00:00
|
|
|
rxd_info_zero(&ri);
|
2016-05-18 04:35:58 +00:00
|
|
|
ri.iri_qsidx = rxq->ifr_id;
|
|
|
|
ri.iri_cidx = *cidxp;
|
2017-03-13 22:53:06 +00:00
|
|
|
ri.iri_ifp = ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
ri.iri_frags = rxq->ifr_frags;
|
|
|
|
err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
if (err)
|
|
|
|
goto err;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
|
|
|
|
*cidxp = ri.iri_cidx;
|
|
|
|
/* Update our consumer index */
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX NB: shurd - check if this is still safe */
|
2016-08-12 21:29:44 +00:00
|
|
|
while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
|
|
|
|
rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
|
2016-05-18 04:35:58 +00:00
|
|
|
rxq->ifr_cq_gen = 0;
|
|
|
|
}
|
|
|
|
/* was this only a completion queue message? */
|
|
|
|
if (__predict_false(ri.iri_nfrags == 0))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MPASS(ri.iri_nfrags != 0);
|
|
|
|
MPASS(ri.iri_len != 0);
|
|
|
|
|
|
|
|
/* will advance the cidx on the corresponding free lists */
|
|
|
|
m = iflib_rxd_pkt_get(rxq, &ri);
|
2018-07-22 17:45:44 +00:00
|
|
|
avail--;
|
|
|
|
budget_left--;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (avail == 0 && budget_left)
|
2016-08-12 21:29:44 +00:00
|
|
|
avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (__predict_false(m == NULL)) {
|
|
|
|
DBG_COUNTER_INC(rx_mbuf_null);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* imm_pkt: -- cxgb */
|
|
|
|
if (mh == NULL)
|
|
|
|
mh = mt = m;
|
|
|
|
else {
|
|
|
|
mt->m_nextpkt = m;
|
|
|
|
mt = m;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* make sure that we can refill faster than drain */
|
|
|
|
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
|
2017-09-16 02:41:38 +00:00
|
|
|
__iflib_fl_refill_lt(ctx, fl, budget + 8);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
|
2017-12-05 20:43:24 +00:00
|
|
|
if (lro_enabled)
|
|
|
|
iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
|
2017-09-23 01:35:14 +00:00
|
|
|
mt = mf = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
while (mh != NULL) {
|
|
|
|
m = mh;
|
|
|
|
mh = mh->m_nextpkt;
|
|
|
|
m->m_nextpkt = NULL;
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
|
|
|
|
continue;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
rx_bytes += m->m_pkthdr.len;
|
|
|
|
rx_pkts++;
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2017-11-06 16:23:21 +00:00
|
|
|
if (lro_enabled) {
|
|
|
|
if (!lro_possible) {
|
2017-12-05 20:43:24 +00:00
|
|
|
lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
|
2017-11-06 16:23:21 +00:00
|
|
|
if (lro_possible && mf != NULL) {
|
|
|
|
ifp->if_input(ifp, mf);
|
|
|
|
DBG_COUNTER_INC(rx_if_input);
|
|
|
|
mt = mf = NULL;
|
|
|
|
}
|
|
|
|
}
|
2017-12-21 01:22:36 +00:00
|
|
|
if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) ==
|
|
|
|
(CSUM_L4_CALC|CSUM_L4_VALID)) {
|
|
|
|
if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
|
2017-12-27 19:12:32 +00:00
|
|
|
continue;
|
2017-12-21 01:22:36 +00:00
|
|
|
}
|
2017-09-23 01:35:14 +00:00
|
|
|
}
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2017-11-06 16:23:21 +00:00
|
|
|
if (lro_possible) {
|
|
|
|
ifp->if_input(ifp, m);
|
|
|
|
DBG_COUNTER_INC(rx_if_input);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mf == NULL)
|
|
|
|
mf = m;
|
2017-09-23 01:35:14 +00:00
|
|
|
if (mt != NULL)
|
|
|
|
mt->m_nextpkt = m;
|
|
|
|
mt = m;
|
|
|
|
}
|
|
|
|
if (mf != NULL) {
|
|
|
|
ifp->if_input(ifp, mf);
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(rx_if_input);
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
|
|
|
|
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush any outstanding LRO work
|
|
|
|
*/
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2016-08-12 21:29:44 +00:00
|
|
|
tcp_lro_flush_all(&rxq->ifr_lc);
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2017-09-16 02:41:38 +00:00
|
|
|
if (avail)
|
|
|
|
return true;
|
|
|
|
return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
|
2017-03-13 22:53:06 +00:00
|
|
|
err:
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
ctx->ifc_flags |= IFC_DO_RESET;
|
2018-10-23 04:37:29 +00:00
|
|
|
iflib_admin_intr_deferred(ctx);
|
2018-10-23 17:06:36 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
|
|
|
|
static inline qidx_t
|
|
|
|
txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
|
|
|
|
{
|
|
|
|
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
|
|
|
|
qidx_t minthresh = txq->ift_size / 8;
|
|
|
|
if (in_use > 4*minthresh)
|
|
|
|
return (notify_count);
|
|
|
|
if (in_use > 2*minthresh)
|
|
|
|
return (notify_count >> 1);
|
|
|
|
if (in_use > minthresh)
|
|
|
|
return (notify_count >> 3);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline qidx_t
|
|
|
|
txq_max_rs_deferred(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
|
|
|
|
qidx_t minthresh = txq->ift_size / 8;
|
|
|
|
if (txq->ift_in_use > 4*minthresh)
|
|
|
|
return (notify_count);
|
|
|
|
if (txq->ift_in_use > 2*minthresh)
|
|
|
|
return (notify_count >> 1);
|
|
|
|
if (txq->ift_in_use > minthresh)
|
|
|
|
return (notify_count >> 2);
|
2017-03-30 16:54:01 +00:00
|
|
|
return (2);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
|
|
|
|
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
#define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
|
|
|
|
#define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
|
2016-08-12 21:29:44 +00:00
|
|
|
#define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* forward compatibility for cxgb */
|
|
|
|
#define FIRST_QSET(ctx) 0
|
|
|
|
#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
|
|
|
|
#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
|
|
|
|
#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
|
|
|
|
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
|
|
|
|
|
|
|
|
/* XXX we should be setting this to something other than zero */
|
|
|
|
#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
|
Use the maximum of isc_tx_{nsegments,tso_segments_max} for MAX_TX_DESC.
Since r336313, TSO support for LEM-class devices is removed again as it
was before the conversion of {l,}em(4) to iflib(4) in r311849 and as a
result, isc_tx_tso_segments_max is 0 for LEM-class devices now. Thus,
inappropriate watermarks were used for this class.
This is really only a band-aid, though, because so far iflib(9) doesn't
fully take into account that DMA engines can support different maxima
of segments for transfers of TSO and non-TSO packets. For example, the
DESC_RECLAIMABLE macro is based on isc_tx_nsegments while MAX_TX_DESC
used isc_tx_tso_segments_max only. For most in-tree consumers that
doesn't make a difference as the maxima are the same for both kinds of
transfers (that is, apart from the fact that TSO may require up to 2
sentinel descriptors but also not with every MAC supported). However,
isc_tx_nsegments is 8 but isc_tx_tso_segments_max is 85 by default
with ixl(4).
2018-07-22 17:51:11 +00:00
|
|
|
#define MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
|
|
|
|
(ctx)->ifc_softc_ctx.isc_tx_nsegments)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline bool
|
|
|
|
iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
|
|
|
|
{
|
|
|
|
qidx_t dbval, max;
|
|
|
|
bool rang;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
rang = false;
|
|
|
|
max = TXQ_MAX_DB_DEFERRED(txq, in_use);
|
|
|
|
if (ring || txq->ift_db_pending >= max) {
|
2016-05-18 04:35:58 +00:00
|
|
|
dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
|
|
|
|
txq->ift_db_pending = txq->ift_npending = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
rang = true;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
return (rang);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PKT_DEBUG
|
|
|
|
static void
|
|
|
|
print_pkt(if_pkt_info_t pi)
|
|
|
|
{
|
|
|
|
printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
|
|
|
|
pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
|
|
|
|
printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n",
|
|
|
|
pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag);
|
|
|
|
printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
|
|
|
|
pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO)
|
2018-06-07 13:03:07 +00:00
|
|
|
#define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO))
|
2016-05-18 04:35:58 +00:00
|
|
|
#define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO)
|
2018-06-07 13:03:07 +00:00
|
|
|
#define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO))
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
|
|
|
|
{
|
2017-09-16 02:41:38 +00:00
|
|
|
if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct ether_vlan_header *eh;
|
2018-07-24 23:40:27 +00:00
|
|
|
struct mbuf *m;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-07-22 17:45:44 +00:00
|
|
|
m = *mp;
|
2017-09-16 02:41:38 +00:00
|
|
|
if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
|
|
|
|
M_WRITABLE(m) == 0) {
|
|
|
|
if ((m = m_dup(m, M_NOWAIT)) == NULL) {
|
|
|
|
return (ENOMEM);
|
|
|
|
} else {
|
|
|
|
m_freem(*mp);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
2018-07-22 17:45:44 +00:00
|
|
|
*mp = m;
|
2017-09-16 02:41:38 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* Determine where frame payload starts.
|
|
|
|
* Jump over vlan headers if already present,
|
|
|
|
* helpful for QinQ too.
|
|
|
|
*/
|
|
|
|
if (__predict_false(m->m_len < sizeof(*eh))) {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
eh = mtod(m, struct ether_vlan_header *);
|
|
|
|
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
|
|
|
|
pi->ipi_etype = ntohs(eh->evl_proto);
|
|
|
|
pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
|
|
|
|
} else {
|
|
|
|
pi->ipi_etype = ntohs(eh->evl_encap_proto);
|
|
|
|
pi->ipi_ehdrlen = ETHER_HDR_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (pi->ipi_etype) {
|
|
|
|
#ifdef INET
|
|
|
|
case ETHERTYPE_IP:
|
|
|
|
{
|
2018-07-24 23:40:27 +00:00
|
|
|
struct mbuf *n;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct ip *ip = NULL;
|
|
|
|
struct tcphdr *th = NULL;
|
|
|
|
int minthlen;
|
|
|
|
|
|
|
|
minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
|
|
|
|
if (__predict_false(m->m_len < minthlen)) {
|
|
|
|
/*
|
|
|
|
* if this code bloat is causing too much of a hit
|
|
|
|
* move it to a separate function and mark it noinline
|
|
|
|
*/
|
|
|
|
if (m->m_len == pi->ipi_ehdrlen) {
|
|
|
|
n = m->m_next;
|
|
|
|
MPASS(n);
|
|
|
|
if (n->m_len >= sizeof(*ip)) {
|
|
|
|
ip = (struct ip *)n->m_data;
|
|
|
|
if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th))
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
|
|
|
|
} else {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, minthlen)) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th))
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
|
|
|
|
}
|
|
|
|
pi->ipi_ip_hlen = ip->ip_hl << 2;
|
|
|
|
pi->ipi_ipproto = ip->ip_p;
|
|
|
|
pi->ipi_flags |= IPI_TX_IPV4;
|
|
|
|
|
2018-06-07 13:03:07 +00:00
|
|
|
/* TCP checksum offload may require TCP header length */
|
|
|
|
if (IS_TX_OFFLOAD4(pi)) {
|
|
|
|
if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) {
|
2017-09-23 01:33:20 +00:00
|
|
|
if (__predict_false(th == NULL)) {
|
|
|
|
txq->ift_pullups++;
|
|
|
|
if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen);
|
|
|
|
}
|
|
|
|
pi->ipi_tcp_hflags = th->th_flags;
|
|
|
|
pi->ipi_tcp_hlen = th->th_off << 2;
|
|
|
|
pi->ipi_tcp_seq = th->th_seq;
|
|
|
|
}
|
2018-06-07 13:03:07 +00:00
|
|
|
if (IS_TSO4(pi)) {
|
|
|
|
if (__predict_false(ip->ip_p != IPPROTO_TCP))
|
|
|
|
return (ENXIO);
|
2018-11-14 15:23:39 +00:00
|
|
|
/*
|
|
|
|
* TSO always requires hardware checksum offload.
|
|
|
|
*/
|
|
|
|
pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP);
|
2018-06-07 13:03:07 +00:00
|
|
|
th->th_sum = in_pseudo(ip->ip_src.s_addr,
|
|
|
|
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
|
|
|
|
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
|
|
|
|
if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
|
|
|
|
ip->ip_sum = 0;
|
|
|
|
ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
|
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2018-11-14 15:23:39 +00:00
|
|
|
if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP))
|
|
|
|
ip->ip_sum = 0;
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef INET6
|
|
|
|
case ETHERTYPE_IPV6:
|
|
|
|
{
|
|
|
|
struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen);
|
|
|
|
struct tcphdr *th;
|
|
|
|
pi->ipi_ip_hlen = sizeof(struct ip6_hdr);
|
|
|
|
|
|
|
|
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) {
|
2018-09-06 18:51:52 +00:00
|
|
|
txq->ift_pullups++;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen);
|
|
|
|
|
|
|
|
/* XXX-BZ this will go badly in case of ext hdrs. */
|
|
|
|
pi->ipi_ipproto = ip6->ip6_nxt;
|
|
|
|
pi->ipi_flags |= IPI_TX_IPV6;
|
|
|
|
|
2018-06-07 13:03:07 +00:00
|
|
|
/* TCP checksum offload may require TCP header length */
|
|
|
|
if (IS_TX_OFFLOAD6(pi)) {
|
2017-09-23 01:33:20 +00:00
|
|
|
if (pi->ipi_ipproto == IPPROTO_TCP) {
|
|
|
|
if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) {
|
2018-06-07 13:03:07 +00:00
|
|
|
txq->ift_pullups++;
|
2017-09-23 01:33:20 +00:00
|
|
|
if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL))
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
pi->ipi_tcp_hflags = th->th_flags;
|
|
|
|
pi->ipi_tcp_hlen = th->th_off << 2;
|
2018-06-07 13:03:07 +00:00
|
|
|
pi->ipi_tcp_seq = th->th_seq;
|
|
|
|
}
|
|
|
|
if (IS_TSO6(pi)) {
|
|
|
|
if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP))
|
|
|
|
return (ENXIO);
|
|
|
|
/*
|
2018-11-14 15:23:39 +00:00
|
|
|
* TSO always requires hardware checksum offload.
|
2018-06-07 13:03:07 +00:00
|
|
|
*/
|
|
|
|
pi->ipi_csum_flags |= CSUM_IP6_TCP;
|
|
|
|
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
|
|
|
|
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
|
2017-09-23 01:33:20 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
pi->ipi_csum_flags &= ~CSUM_OFFLOAD;
|
|
|
|
pi->ipi_ip_hlen = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*mp = m;
|
2017-01-02 00:56:33 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If dodgy hardware rejects the scatter gather chain we've handed it
|
2016-08-12 21:29:44 +00:00
|
|
|
* we'll need to remove the mbuf chain from ifsg_m[] before we can add the
|
|
|
|
* m_defrag'd mbufs
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
|
|
|
static __noinline struct mbuf *
|
2016-08-12 21:29:44 +00:00
|
|
|
iflib_remove_mbuf(iflib_txq_t txq)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2018-11-27 20:01:05 +00:00
|
|
|
int ntxd, pidx;
|
|
|
|
struct mbuf *m, **ifsd_m;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
ifsd_m = txq->ift_sds.ifsd_m;
|
2016-08-12 21:29:44 +00:00
|
|
|
ntxd = txq->ift_size;
|
2018-11-27 20:01:05 +00:00
|
|
|
pidx = txq->ift_pidx & (ntxd - 1);
|
|
|
|
ifsd_m = txq->ift_sds.ifsd_m;
|
|
|
|
m = ifsd_m[pidx];
|
2016-05-18 04:35:58 +00:00
|
|
|
ifsd_m[pidx] = NULL;
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
|
2019-01-16 05:44:14 +00:00
|
|
|
if (txq->ift_sds.ifsd_tso_map != NULL)
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(txq->ift_tso_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_tso_map[pidx]);
|
2016-05-18 04:35:58 +00:00
|
|
|
#if MEMORY_LOGGING
|
|
|
|
txq->ift_dequeued++;
|
|
|
|
#endif
|
2018-11-27 20:01:05 +00:00
|
|
|
return (m);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
static inline caddr_t
|
|
|
|
calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
|
|
|
|
{
|
|
|
|
qidx_t size;
|
|
|
|
int ntxd;
|
|
|
|
caddr_t start, end, cur, next;
|
|
|
|
|
|
|
|
ntxd = txq->ift_size;
|
|
|
|
size = txq->ift_txd_size[qid];
|
|
|
|
start = txq->ift_ifdi[qid].idi_vaddr;
|
|
|
|
|
|
|
|
if (__predict_false(size == 0))
|
|
|
|
return (start);
|
|
|
|
cur = start + size*cidx;
|
|
|
|
end = start + size*ntxd;
|
|
|
|
next = CACHE_PTR_NEXT(cur);
|
|
|
|
return (next < end ? next : start);
|
|
|
|
}
|
|
|
|
|
2017-12-05 21:00:31 +00:00
|
|
|
/*
|
|
|
|
* Pad an mbuf to ensure a minimum ethernet frame size.
|
|
|
|
* min_frame_size is the frame size (less CRC) to pad the mbuf to
|
|
|
|
*/
|
|
|
|
static __noinline int
|
2017-12-08 18:43:31 +00:00
|
|
|
iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size)
|
2017-12-05 21:00:31 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* 18 is enough bytes to pad an ARP packet to 46 bytes, and
|
|
|
|
* and ARP message is the smallest common payload I can think of
|
|
|
|
*/
|
|
|
|
static char pad[18]; /* just zeros */
|
|
|
|
int n;
|
2017-12-08 18:43:31 +00:00
|
|
|
struct mbuf *new_head;
|
2017-12-05 21:00:31 +00:00
|
|
|
|
2017-12-08 18:43:31 +00:00
|
|
|
if (!M_WRITABLE(*m_head)) {
|
|
|
|
new_head = m_dup(*m_head, M_NOWAIT);
|
|
|
|
if (new_head == NULL) {
|
2017-12-08 19:50:06 +00:00
|
|
|
m_freem(*m_head);
|
2017-12-08 18:43:31 +00:00
|
|
|
device_printf(dev, "cannot pad short frame, m_dup() failed");
|
2017-12-11 20:01:28 +00:00
|
|
|
DBG_COUNTER_INC(encap_pad_mbuf_fail);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
2017-12-08 18:43:31 +00:00
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
m_freem(*m_head);
|
|
|
|
*m_head = new_head;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = min_frame_size - (*m_head)->m_pkthdr.len;
|
2017-12-05 21:00:31 +00:00
|
|
|
n > 0; n -= sizeof(pad))
|
2017-12-08 18:43:31 +00:00
|
|
|
if (!m_append(*m_head, min(n, sizeof(pad)), pad))
|
2017-12-05 21:00:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (n > 0) {
|
2017-12-08 18:43:31 +00:00
|
|
|
m_freem(*m_head);
|
2017-12-05 21:00:31 +00:00
|
|
|
device_printf(dev, "cannot pad short frame\n");
|
|
|
|
DBG_COUNTER_INC(encap_pad_mbuf_fail);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
2017-12-05 21:00:31 +00:00
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_shared_ctx_t sctx;
|
|
|
|
if_softc_ctx_t scctx;
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dma_tag_t buf_tag;
|
2016-05-18 04:35:58 +00:00
|
|
|
bus_dma_segment_t *segs;
|
2018-11-27 20:01:05 +00:00
|
|
|
struct mbuf *m_head, **ifsd_m;
|
2017-03-13 22:53:06 +00:00
|
|
|
void *next_txd;
|
2016-05-18 04:35:58 +00:00
|
|
|
bus_dmamap_t map;
|
|
|
|
struct if_pkt_info pi;
|
|
|
|
int remap = 0;
|
|
|
|
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
|
|
|
|
|
|
|
|
ctx = txq->ift_ctx;
|
|
|
|
sctx = ctx->ifc_sctx;
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
segs = txq->ift_segs;
|
2016-08-12 21:29:44 +00:00
|
|
|
ntxd = txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
m_head = *m_headp;
|
|
|
|
map = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're doing TSO the next descriptor to clean may be quite far ahead
|
|
|
|
*/
|
|
|
|
cidx = txq->ift_cidx;
|
|
|
|
pidx = txq->ift_pidx;
|
2017-03-13 22:53:06 +00:00
|
|
|
if (ctx->ifc_flags & IFC_PREFETCH) {
|
|
|
|
next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
|
|
|
|
if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) {
|
|
|
|
next_txd = calc_next_txd(txq, cidx, 0);
|
|
|
|
prefetch(next_txd);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* prefetch the next cache line of mbuf pointers and flags */
|
|
|
|
prefetch(&txq->ift_sds.ifsd_m[next]);
|
2018-11-27 20:01:05 +00:00
|
|
|
prefetch(&txq->ift_sds.ifsd_map[next]);
|
|
|
|
next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
|
|
|
|
}
|
|
|
|
map = txq->ift_sds.ifsd_map[pidx];
|
|
|
|
ifsd_m = txq->ift_sds.ifsd_m;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
buf_tag = txq->ift_tso_buf_tag;
|
2016-05-18 04:35:58 +00:00
|
|
|
max_segs = scctx->isc_tx_tso_segments_max;
|
2019-01-16 05:44:14 +00:00
|
|
|
map = txq->ift_sds.ifsd_tso_map[pidx];
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
MPASS(buf_tag != NULL);
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
MPASS(max_segs > 0);
|
2016-05-18 04:35:58 +00:00
|
|
|
} else {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
buf_tag = txq->ift_buf_tag;
|
2016-05-18 04:35:58 +00:00
|
|
|
max_segs = scctx->isc_tx_nsegments;
|
2019-01-16 05:44:14 +00:00
|
|
|
map = txq->ift_sds.ifsd_map[pidx];
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-12-05 21:00:31 +00:00
|
|
|
if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) &&
|
|
|
|
__predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) {
|
2017-12-08 18:43:31 +00:00
|
|
|
err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size);
|
2018-09-06 18:51:52 +00:00
|
|
|
if (err) {
|
|
|
|
DBG_COUNTER_INC(encap_txd_encap_fail);
|
2017-12-05 21:00:31 +00:00
|
|
|
return err;
|
2018-09-06 18:51:52 +00:00
|
|
|
}
|
2017-12-05 21:00:31 +00:00
|
|
|
}
|
2017-12-08 18:43:31 +00:00
|
|
|
m_head = *m_headp;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
pkt_info_zero(&pi);
|
2017-09-16 02:41:38 +00:00
|
|
|
pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST));
|
|
|
|
pi.ipi_pidx = pidx;
|
|
|
|
pi.ipi_qsidx = txq->ift_id;
|
2017-10-23 20:50:08 +00:00
|
|
|
pi.ipi_len = m_head->m_pkthdr.len;
|
|
|
|
pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags;
|
|
|
|
pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* deliberate bitwise OR to make one condition */
|
|
|
|
if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) {
|
2018-09-06 18:51:52 +00:00
|
|
|
if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) {
|
|
|
|
DBG_COUNTER_INC(encap_txd_encap_fail);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
2018-09-06 18:51:52 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
m_head = *m_headp;
|
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
|
2018-11-27 20:01:05 +00:00
|
|
|
BUS_DMA_NOWAIT);
|
2016-05-18 04:35:58 +00:00
|
|
|
defrag:
|
|
|
|
if (__predict_false(err)) {
|
|
|
|
switch (err) {
|
|
|
|
case EFBIG:
|
|
|
|
/* try collapse once and defrag once */
|
2018-04-30 23:53:27 +00:00
|
|
|
if (remap == 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
m_head = m_collapse(*m_headp, M_NOWAIT, max_segs);
|
2018-04-30 23:53:27 +00:00
|
|
|
/* try defrag if collapsing fails */
|
|
|
|
if (m_head == NULL)
|
|
|
|
remap++;
|
|
|
|
}
|
2018-09-06 18:51:52 +00:00
|
|
|
if (remap == 1) {
|
|
|
|
txq->ift_mbuf_defrag++;
|
2016-05-18 04:35:58 +00:00
|
|
|
m_head = m_defrag(*m_headp, M_NOWAIT);
|
2018-09-06 18:51:52 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
remap++;
|
|
|
|
if (__predict_false(m_head == NULL))
|
|
|
|
goto defrag_failed;
|
|
|
|
*m_headp = m_head;
|
|
|
|
goto retry;
|
|
|
|
break;
|
|
|
|
case ENOMEM:
|
|
|
|
txq->ift_no_tx_dma_setup++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
txq->ift_no_tx_dma_setup++;
|
|
|
|
m_freem(*m_headp);
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
*m_headp = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
txq->ift_map_failed++;
|
|
|
|
DBG_COUNTER_INC(encap_load_mbuf_fail);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(encap_txd_encap_fail);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
}
|
2018-11-27 20:01:05 +00:00
|
|
|
ifsd_m[pidx] = m_head;
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* XXX assumes a 1 to 1 relationship between segments and
|
|
|
|
* descriptors - this does not hold true on all drivers, e.g.
|
|
|
|
* cxgb
|
|
|
|
*/
|
|
|
|
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
|
|
|
|
txq->ift_no_desc_avail++;
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(buf_tag, map);
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(encap_txq_avail_fail);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(encap_txd_encap_fail);
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
/*
|
|
|
|
* On Intel cards we can greatly reduce the number of TX interrupts
|
|
|
|
* we see by only setting report status on every Nth descriptor.
|
|
|
|
* However, this also means that the driver will need to keep track
|
|
|
|
* of the descriptors that RS was set on to check them for the DD bit.
|
|
|
|
*/
|
|
|
|
txq->ift_rs_pending += nsegs + 1;
|
|
|
|
if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
|
2018-05-07 18:11:22 +00:00
|
|
|
iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
|
2017-03-13 22:53:06 +00:00
|
|
|
pi.ipi_flags |= IPI_TX_INTR;
|
|
|
|
txq->ift_rs_pending = 0;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
pi.ipi_segs = segs;
|
|
|
|
pi.ipi_nsegs = nsegs;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(pidx >= 0 && pidx < txq->ift_size);
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifdef PKT_DEBUG
|
|
|
|
print_pkt(&pi);
|
|
|
|
#endif
|
|
|
|
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(tx_encap);
|
2017-03-13 22:53:06 +00:00
|
|
|
MPASS(pi.ipi_new_pidx < txq->ift_size);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
|
|
|
|
if (pi.ipi_new_pidx < pi.ipi_pidx) {
|
2016-08-12 21:29:44 +00:00
|
|
|
ndesc += txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
txq->ift_gen = 1;
|
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
/*
|
|
|
|
* drivers can need as many as
|
|
|
|
* two sentinels
|
|
|
|
*/
|
|
|
|
MPASS(ndesc <= pi.ipi_nsegs + 2);
|
2016-05-18 04:35:58 +00:00
|
|
|
MPASS(pi.ipi_new_pidx != pidx);
|
|
|
|
MPASS(ndesc > 0);
|
|
|
|
txq->ift_in_use += ndesc;
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* We update the last software descriptor again here because there may
|
|
|
|
* be a sentinel and/or there may be more mbufs than segments
|
|
|
|
*/
|
|
|
|
txq->ift_pidx = pi.ipi_new_pidx;
|
|
|
|
txq->ift_npending += pi.ipi_ndescs;
|
2018-04-30 23:53:27 +00:00
|
|
|
} else {
|
2016-08-12 21:29:44 +00:00
|
|
|
*m_headp = m_head = iflib_remove_mbuf(txq);
|
2018-04-30 23:53:27 +00:00
|
|
|
if (err == EFBIG) {
|
|
|
|
txq->ift_txd_encap_efbig++;
|
|
|
|
if (remap < 2) {
|
|
|
|
remap = 1;
|
|
|
|
goto defrag;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
goto defrag_failed;
|
|
|
|
}
|
2018-09-06 18:51:52 +00:00
|
|
|
/*
|
|
|
|
* err can't possibly be non-zero here, so we don't neet to test it
|
|
|
|
* to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail).
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
|
|
|
|
defrag_failed:
|
|
|
|
txq->ift_mbuf_defrag_failed++;
|
|
|
|
txq->ift_map_failed++;
|
|
|
|
m_freem(*m_headp);
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
*m_headp = NULL;
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(encap_txd_encap_fail);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_tx_desc_free(iflib_txq_t txq, int n)
|
|
|
|
{
|
|
|
|
uint32_t qsize, cidx, mask, gen;
|
|
|
|
struct mbuf *m, **ifsd_m;
|
2017-03-13 22:53:06 +00:00
|
|
|
bool do_prefetch;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
cidx = txq->ift_cidx;
|
|
|
|
gen = txq->ift_gen;
|
2016-08-12 21:29:44 +00:00
|
|
|
qsize = txq->ift_size;
|
2016-05-18 04:35:58 +00:00
|
|
|
mask = qsize-1;
|
|
|
|
ifsd_m = txq->ift_sds.ifsd_m;
|
2017-03-13 22:53:06 +00:00
|
|
|
do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-06 00:57:52 +00:00
|
|
|
while (n-- > 0) {
|
2017-03-13 22:53:06 +00:00
|
|
|
if (do_prefetch) {
|
|
|
|
prefetch(ifsd_m[(cidx + 3) & mask]);
|
|
|
|
prefetch(ifsd_m[(cidx + 4) & mask]);
|
|
|
|
}
|
2018-11-27 20:01:05 +00:00
|
|
|
if ((m = ifsd_m[cidx]) != NULL) {
|
2016-05-18 04:35:58 +00:00
|
|
|
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
|
2019-01-16 05:44:14 +00:00
|
|
|
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(txq->ift_tso_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_tso_map[cidx],
|
|
|
|
BUS_DMASYNC_POSTWRITE);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(txq->ift_tso_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_tso_map[cidx]);
|
|
|
|
} else {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_sync(txq->ift_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_map[cidx],
|
|
|
|
BUS_DMASYNC_POSTWRITE);
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
bus_dmamap_unload(txq->ift_buf_tag,
|
2019-01-16 05:44:14 +00:00
|
|
|
txq->ift_sds.ifsd_map[cidx]);
|
|
|
|
}
|
2018-11-27 20:01:05 +00:00
|
|
|
/* XXX we don't support any drivers that batch packets yet */
|
|
|
|
MPASS(m->m_nextpkt == NULL);
|
|
|
|
m_freem(m);
|
|
|
|
ifsd_m[cidx] = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
#if MEMORY_LOGGING
|
2018-11-27 20:01:05 +00:00
|
|
|
txq->ift_dequeued++;
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
2018-11-27 20:01:05 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
if (__predict_false(++cidx == qsize)) {
|
|
|
|
cidx = 0;
|
|
|
|
gen = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
txq->ift_cidx = cidx;
|
|
|
|
txq->ift_gen = gen;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline int
|
|
|
|
iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
|
|
|
|
{
|
|
|
|
int reclaim;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
|
|
|
|
KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
|
|
|
|
MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Need a rate-limiting check so that this isn't called every time
|
|
|
|
*/
|
|
|
|
iflib_tx_credits_update(ctx, txq);
|
|
|
|
reclaim = DESC_RECLAIMABLE(txq);
|
|
|
|
|
|
|
|
if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (iflib_verbose_debug) {
|
|
|
|
printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__,
|
|
|
|
txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
|
|
|
|
reclaim, thresh);
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
iflib_tx_desc_free(txq, reclaim);
|
|
|
|
txq->ift_cleaned += reclaim;
|
|
|
|
txq->ift_in_use -= reclaim;
|
|
|
|
|
|
|
|
return (reclaim);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf **
|
2017-03-13 22:53:06 +00:00
|
|
|
_ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-03-13 22:53:06 +00:00
|
|
|
int next, size;
|
|
|
|
struct mbuf **items;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
size = r->size;
|
|
|
|
next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
|
|
|
|
items = __DEVOLATILE(struct mbuf **, &r->items[0]);
|
|
|
|
|
|
|
|
prefetch(items[(cidx + offset) & (size-1)]);
|
|
|
|
if (remaining > 1) {
|
2017-10-23 20:50:08 +00:00
|
|
|
prefetch2cachelines(&items[next]);
|
|
|
|
prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
|
|
|
|
prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
|
|
|
|
prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
|
|
|
return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_txq_check_drain(iflib_txq_t txq, int budget)
|
|
|
|
{
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
ifmp_ring_check_drainage(txq->ift_br, budget);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
iflib_txq_can_drain(struct ifmp_ring *r)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = r->cookie;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2)
|
|
|
|
return (1);
|
2019-01-16 05:44:14 +00:00
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id,
|
|
|
|
false));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = r->cookie;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2017-03-13 22:53:06 +00:00
|
|
|
struct ifnet *ifp = ctx->ifc_ifp;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct mbuf **mp, *m;
|
2017-03-13 22:53:06 +00:00
|
|
|
int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail;
|
|
|
|
int reclaimed, err, in_use_prev, desc_used;
|
|
|
|
bool do_prefetch, ring, rang;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) ||
|
|
|
|
!LINK_ACTIVE(ctx))) {
|
|
|
|
DBG_COUNTER_INC(txq_drain_notready);
|
|
|
|
return (0);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
|
|
|
|
rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
|
2016-05-18 04:35:58 +00:00
|
|
|
avail = IDXDIFF(pidx, cidx, r->size);
|
|
|
|
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
|
|
|
|
DBG_COUNTER_INC(txq_drain_flushing);
|
|
|
|
for (i = 0; i < avail; i++) {
|
2018-08-29 16:21:34 +00:00
|
|
|
if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq))
|
2018-08-29 15:55:25 +00:00
|
|
|
m_free(r->items[(cidx + i) & (r->size-1)]);
|
2016-05-18 04:35:58 +00:00
|
|
|
r->items[(cidx + i) & (r->size-1)] = NULL;
|
|
|
|
}
|
|
|
|
return (avail);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
DBG_COUNTER_INC(txq_drain_oactive);
|
|
|
|
return (0);
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
if (reclaimed)
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
2016-05-18 04:35:58 +00:00
|
|
|
consumed = mcast_sent = bytes_sent = pkt_sent = 0;
|
|
|
|
count = MIN(avail, TX_BATCH_SIZE);
|
2016-11-18 04:19:21 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (iflib_verbose_debug)
|
|
|
|
printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__,
|
|
|
|
avail, ctx->ifc_flags, TXQ_AVAIL(txq));
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
do_prefetch = (ctx->ifc_flags & IFC_PREFETCH);
|
|
|
|
avail = TXQ_AVAIL(txq);
|
2018-05-04 18:57:05 +00:00
|
|
|
err = 0;
|
2017-03-13 22:53:06 +00:00
|
|
|
for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) {
|
2018-05-04 18:57:05 +00:00
|
|
|
int rem = do_prefetch ? count - i : 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
mp = _ring_peek_one(r, cidx, i, rem);
|
2016-11-18 04:19:21 +00:00
|
|
|
MPASS(mp != NULL && *mp != NULL);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (__predict_false(*mp == (struct mbuf *)txq)) {
|
|
|
|
consumed++;
|
|
|
|
reclaimed++;
|
|
|
|
continue;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
in_use_prev = txq->ift_in_use;
|
2017-03-13 22:53:06 +00:00
|
|
|
err = iflib_encap(txq, mp);
|
|
|
|
if (__predict_false(err)) {
|
2016-11-18 04:19:21 +00:00
|
|
|
/* no room - bail out */
|
2017-03-13 22:53:06 +00:00
|
|
|
if (err == ENOBUFS)
|
|
|
|
break;
|
|
|
|
consumed++;
|
2016-11-18 04:19:21 +00:00
|
|
|
/* we can't send this packet - skip it */
|
2016-05-18 04:35:58 +00:00
|
|
|
continue;
|
2016-11-18 04:19:21 +00:00
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
consumed++;
|
2016-05-18 04:35:58 +00:00
|
|
|
pkt_sent++;
|
|
|
|
m = *mp;
|
|
|
|
DBG_COUNTER_INC(tx_sent);
|
|
|
|
bytes_sent += m->m_pkthdr.len;
|
2017-03-13 22:53:06 +00:00
|
|
|
mcast_sent += !!(m->m_flags & M_MCAST);
|
|
|
|
avail = TXQ_AVAIL(txq);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
|
|
|
|
desc_used += (txq->ift_in_use - in_use_prev);
|
|
|
|
ETHER_BPF_MTAP(ifp, m);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
2017-03-13 22:53:06 +00:00
|
|
|
rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
/* deliberate use of bitwise or to avoid gratuitous short-circuit */
|
|
|
|
ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
|
|
|
|
iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
|
|
|
|
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
|
|
|
|
if (mcast_sent)
|
|
|
|
if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
|
2016-11-18 04:19:21 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (iflib_verbose_debug)
|
|
|
|
printf("consumed=%d\n", consumed);
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
return (consumed);
|
|
|
|
}
|
|
|
|
|
2016-11-18 04:19:21 +00:00
|
|
|
static uint32_t
|
|
|
|
iflib_txq_drain_always(struct ifmp_ring *r)
|
|
|
|
{
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
|
|
|
|
{
|
|
|
|
int i, avail;
|
|
|
|
struct mbuf **mp;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
|
|
|
|
txq = r->cookie;
|
|
|
|
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
|
|
|
|
avail = IDXDIFF(pidx, cidx, r->size);
|
|
|
|
for (i = 0; i < avail; i++) {
|
2017-03-13 22:53:06 +00:00
|
|
|
mp = _ring_peek_one(r, cidx, i, avail - i);
|
|
|
|
if (__predict_false(*mp == (struct mbuf *)txq))
|
|
|
|
continue;
|
2016-11-18 04:19:21 +00:00
|
|
|
m_freem(*mp);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
2016-11-18 04:19:21 +00:00
|
|
|
}
|
|
|
|
MPASS(ifmp_ring_is_stalled(r) == 0);
|
|
|
|
return (avail);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_ifmp_purge(iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
struct ifmp_ring *r;
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
r = txq->ift_br;
|
2016-11-18 04:19:21 +00:00
|
|
|
r->drain = iflib_txq_drain_free;
|
|
|
|
r->can_drain = iflib_txq_drain_always;
|
|
|
|
|
|
|
|
ifmp_ring_check_drainage(r, r->size);
|
|
|
|
|
|
|
|
r->drain = iflib_txq_drain;
|
|
|
|
r->can_drain = iflib_txq_can_drain;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_tx(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_txq_t txq = context;
|
|
|
|
if_ctx_t ctx = txq->ift_ctx;
|
2019-02-12 22:33:17 +00:00
|
|
|
#if defined(ALTQ) || defined(DEV_NETMAP)
|
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
#endif
|
2018-07-20 17:45:26 +00:00
|
|
|
int abdicate = ctx->ifc_sysctl_tx_abdicate;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
txq->ift_cpu_exec_count[curcpu]++;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
|
|
|
|
return;
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
#ifdef DEV_NETMAP
|
2019-02-12 22:33:17 +00:00
|
|
|
if (if_getcapenable(ifp) & IFCAP_NETMAP) {
|
2019-01-16 05:44:14 +00:00
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
|
2019-02-12 22:33:17 +00:00
|
|
|
netmap_tx_irq(ifp, txq->ift_id);
|
2017-03-13 22:53:06 +00:00
|
|
|
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
|
|
|
|
return;
|
|
|
|
}
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
#endif
|
2018-07-25 22:46:36 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
if (ALTQ_IS_ENABLED(&ifp->if_snd))
|
|
|
|
iflib_altq_if_start(ifp);
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
if (txq->ift_db_pending)
|
2018-07-20 17:45:26 +00:00
|
|
|
ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate);
|
|
|
|
else if (!abdicate)
|
|
|
|
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
|
|
|
|
/*
|
|
|
|
* When abdicating, we always need to check drainage, not just when we don't enqueue
|
|
|
|
*/
|
|
|
|
if (abdicate)
|
|
|
|
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
|
2017-03-13 22:53:06 +00:00
|
|
|
if (ctx->ifc_flags & IFC_LEGACY)
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
else {
|
2018-05-04 18:57:05 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int rc =
|
|
|
|
#endif
|
|
|
|
IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
|
|
|
|
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
|
2017-03-13 22:53:06 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_rx(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = context;
|
|
|
|
if_ctx_t ctx = rxq->ifr_ctx;
|
|
|
|
bool more;
|
2017-09-23 01:37:01 +00:00
|
|
|
uint16_t budget;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef IFLIB_DIAGNOSTICS
|
|
|
|
rxq->ifr_cpu_exec_count[curcpu]++;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(task_fn_rxs);
|
|
|
|
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
|
|
|
|
return;
|
2017-09-20 20:40:49 +00:00
|
|
|
more = true;
|
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) {
|
|
|
|
u_int work = 0;
|
|
|
|
if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) {
|
|
|
|
more = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2017-09-23 01:37:01 +00:00
|
|
|
budget = ctx->ifc_sysctl_rx_budget;
|
|
|
|
if (budget == 0)
|
|
|
|
budget = 16; /* XXX */
|
|
|
|
if (more == false || (more = iflib_rxeof(rxq, budget)) == false) {
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ctx->ifc_flags & IFC_LEGACY)
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
else {
|
2018-05-04 18:57:05 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int rc =
|
|
|
|
#endif
|
|
|
|
IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
|
2016-08-12 21:29:44 +00:00
|
|
|
KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
|
2018-05-04 18:57:05 +00:00
|
|
|
DBG_COUNTER_INC(rx_intr_enables);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
|
|
|
|
return;
|
|
|
|
if (more)
|
|
|
|
GROUPTASK_ENQUEUE(&rxq->ifr_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_admin(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_ctx_t ctx = context;
|
|
|
|
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
|
|
|
|
iflib_txq_t txq;
|
2017-09-16 02:41:38 +00:00
|
|
|
int i;
|
2018-10-12 22:40:54 +00:00
|
|
|
bool oactive, running, do_reset, do_watchdog, in_detach;
|
2018-07-20 17:24:45 +00:00
|
|
|
uint32_t reset_on = hz / 2;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
|
|
|
running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
|
|
|
|
oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE);
|
|
|
|
do_reset = (ctx->ifc_flags & IFC_DO_RESET);
|
|
|
|
do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG);
|
2018-10-12 22:40:54 +00:00
|
|
|
in_detach = (ctx->ifc_flags & IFC_IN_DETACH);
|
2018-04-12 14:35:37 +00:00
|
|
|
ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG);
|
|
|
|
STATE_UNLOCK(ctx);
|
|
|
|
|
2018-10-12 22:40:54 +00:00
|
|
|
if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
|
|
|
|
return;
|
|
|
|
if (in_detach)
|
2018-04-12 14:35:37 +00:00
|
|
|
return;
|
2017-09-13 01:18:42 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
CALLOUT_LOCK(txq);
|
|
|
|
callout_stop(&txq->ift_timer);
|
|
|
|
CALLOUT_UNLOCK(txq);
|
|
|
|
}
|
2018-04-12 14:35:37 +00:00
|
|
|
if (do_watchdog) {
|
|
|
|
ctx->ifc_watchdog_events++;
|
|
|
|
IFDI_WATCHDOG_RESET(ctx);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
IFDI_UPDATE_ADMIN_STATUS(ctx);
|
2018-07-20 17:24:45 +00:00
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
reset_on = hz / 2;
|
|
|
|
if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP)
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
iflib_netmap_timer_adjust(ctx, txq, &reset_on);
|
2018-07-20 17:24:45 +00:00
|
|
|
#endif
|
|
|
|
callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu);
|
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
IFDI_LINK_INTR_ENABLE(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
if (do_reset)
|
2017-09-16 02:41:38 +00:00
|
|
|
iflib_if_init_locked(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
if (LINK_ACTIVE(ctx) == 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
return;
|
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
|
|
|
|
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2016-08-12 21:29:44 +00:00
|
|
|
_task_fn_iov(void *context)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_ctx_t ctx = context;
|
|
|
|
|
2018-10-12 22:40:54 +00:00
|
|
|
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) &&
|
|
|
|
!(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN))
|
2016-05-18 04:35:58 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_VFLR_HANDLE(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if_int_delay_info_t info;
|
|
|
|
if_ctx_t ctx;
|
|
|
|
|
|
|
|
info = (if_int_delay_info_t)arg1;
|
|
|
|
ctx = info->iidi_ctx;
|
|
|
|
info->iidi_req = req;
|
|
|
|
info->iidi_oidp = oidp;
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
err = IFDI_SYSCTL_INT_DELAY(ctx, info);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* IFNET FUNCTIONS
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_if_init_locked(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_stop(ctx);
|
|
|
|
iflib_init_locked(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_if_init(void *arg)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = arg;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
iflib_if_init_locked(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_if_transmit(if_t ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
iflib_txq_t txq;
|
2016-08-12 21:29:44 +00:00
|
|
|
int err, qidx;
|
2018-07-20 17:45:26 +00:00
|
|
|
int abdicate = ctx->ifc_sysctl_tx_abdicate;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
m_freem(m);
|
2016-11-18 04:19:21 +00:00
|
|
|
return (ENOBUFS);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(m->m_nextpkt == NULL);
|
2018-07-25 22:46:36 +00:00
|
|
|
/* ALTQ-enabled interfaces always use queue 0. */
|
2016-05-18 04:35:58 +00:00
|
|
|
qidx = 0;
|
2018-07-25 22:46:36 +00:00
|
|
|
if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd))
|
2016-05-18 04:35:58 +00:00
|
|
|
qidx = QIDX(ctx, m);
|
|
|
|
/*
|
|
|
|
* XXX calculate buf_ring based on flowid (divvy up bits?)
|
|
|
|
*/
|
|
|
|
txq = &ctx->ifc_txqs[qidx];
|
|
|
|
|
|
|
|
#ifdef DRIVER_BACKPRESSURE
|
|
|
|
if (txq->ift_closed) {
|
|
|
|
while (m != NULL) {
|
|
|
|
next = m->m_nextpkt;
|
|
|
|
m->m_nextpkt = NULL;
|
|
|
|
m_freem(m);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
2016-05-18 04:35:58 +00:00
|
|
|
m = next;
|
|
|
|
}
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
#endif
|
2016-08-12 21:29:44 +00:00
|
|
|
#ifdef notyet
|
2016-05-18 04:35:58 +00:00
|
|
|
qidx = count = 0;
|
|
|
|
mp = marr;
|
|
|
|
next = m;
|
|
|
|
do {
|
|
|
|
count++;
|
|
|
|
next = next->m_nextpkt;
|
|
|
|
} while (next != NULL);
|
|
|
|
|
2016-06-07 19:49:08 +00:00
|
|
|
if (count > nitems(marr))
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) {
|
|
|
|
/* XXX check nextpkt */
|
|
|
|
m_freem(m);
|
|
|
|
/* XXX simplify for now */
|
|
|
|
DBG_COUNTER_INC(tx_frees);
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
for (next = m, i = 0; next != NULL; i++) {
|
|
|
|
mp[i] = next;
|
|
|
|
next = next->m_nextpkt;
|
|
|
|
mp[i]->m_nextpkt = NULL;
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
DBG_COUNTER_INC(tx_seen);
|
2018-07-20 17:45:26 +00:00
|
|
|
err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-07-20 17:45:26 +00:00
|
|
|
if (abdicate)
|
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
|
|
|
if (err) {
|
|
|
|
if (!abdicate)
|
|
|
|
GROUPTASK_ENQUEUE(&txq->ift_task);
|
2016-05-18 04:35:58 +00:00
|
|
|
/* support forthcoming later */
|
|
|
|
#ifdef DRIVER_BACKPRESSURE
|
|
|
|
txq->ift_closed = TRUE;
|
|
|
|
#endif
|
2017-03-13 22:53:06 +00:00
|
|
|
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
|
2016-08-12 21:29:44 +00:00
|
|
|
m_freem(m);
|
2018-09-06 18:51:52 +00:00
|
|
|
DBG_COUNTER_INC(tx_frees);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2018-07-25 22:46:36 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
/*
|
|
|
|
* The overall approach to integrating iflib with ALTQ is to continue to use
|
|
|
|
* the iflib mp_ring machinery between the ALTQ queue(s) and the hardware
|
|
|
|
* ring. Technically, when using ALTQ, queueing to an intermediate mp_ring
|
|
|
|
* is redundant/unnecessary, but doing so minimizes the amount of
|
|
|
|
* ALTQ-specific code required in iflib. It is assumed that the overhead of
|
|
|
|
* redundantly queueing to an intermediate mp_ring is swamped by the
|
|
|
|
* performance limitations inherent in using ALTQ.
|
|
|
|
*
|
|
|
|
* When ALTQ support is compiled in, all iflib drivers will use a transmit
|
|
|
|
* routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the
|
|
|
|
* given interface. If ALTQ is enabled for an interface, then all
|
|
|
|
* transmitted packets for that interface will be submitted to the ALTQ
|
|
|
|
* subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit()
|
|
|
|
* implementation because it uses IFQ_HANDOFF(), which will duplicatively
|
|
|
|
* update stats that the iflib machinery handles, and which is sensitve to
|
|
|
|
* the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start()
|
|
|
|
* will be installed as the start routine for use by ALTQ facilities that
|
|
|
|
* need to trigger queue drains on a scheduled basis.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
iflib_altq_if_start(if_t ifp)
|
|
|
|
{
|
|
|
|
struct ifaltq *ifq = &ifp->if_snd;
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
IFQ_LOCK(ifq);
|
|
|
|
IFQ_DEQUEUE_NOLOCK(ifq, m);
|
|
|
|
while (m != NULL) {
|
|
|
|
iflib_if_transmit(ifp, m);
|
|
|
|
IFQ_DEQUEUE_NOLOCK(ifq, m);
|
|
|
|
}
|
|
|
|
IFQ_UNLOCK(ifq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_altq_if_transmit(if_t ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
|
|
|
|
IFQ_ENQUEUE(&ifp->if_snd, m, err);
|
|
|
|
if (err == 0)
|
|
|
|
iflib_altq_if_start(ifp);
|
|
|
|
} else
|
|
|
|
err = iflib_if_transmit(ifp, m);
|
|
|
|
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
#endif /* ALTQ */
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static void
|
|
|
|
iflib_if_qflush(if_t ifp)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
int i;
|
|
|
|
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
ctx->ifc_flags |= IFC_QFLUSH;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
|
2017-03-13 22:53:06 +00:00
|
|
|
while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txq_check_drain(txq, 0);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
ctx->ifc_flags &= ~IFC_QFLUSH;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-07-25 22:46:36 +00:00
|
|
|
/*
|
|
|
|
* When ALTQ is enabled, this will also take care of purging the
|
|
|
|
* ALTQ queue(s).
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
if_qflush(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-09-20 19:35:35 +00:00
|
|
|
#define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
|
|
|
|
IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \
|
|
|
|
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \
|
|
|
|
IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM)
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
struct ifreq *ifr = (struct ifreq *)data;
|
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
struct ifaddr *ifa = (struct ifaddr *)data;
|
|
|
|
#endif
|
|
|
|
bool avoid_reset = FALSE;
|
|
|
|
int err = 0, reinit = 0, bits;
|
|
|
|
|
|
|
|
switch (command) {
|
|
|
|
case SIOCSIFADDR:
|
|
|
|
#ifdef INET
|
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET)
|
|
|
|
avoid_reset = TRUE;
|
|
|
|
#endif
|
|
|
|
#ifdef INET6
|
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET6)
|
|
|
|
avoid_reset = TRUE;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
** Calling init results in link renegotiation,
|
|
|
|
** so we avoid doing it when possible.
|
|
|
|
*/
|
|
|
|
if (avoid_reset) {
|
|
|
|
if_setflagbits(ifp, IFF_UP,0);
|
2018-06-18 17:27:43 +00:00
|
|
|
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
|
2016-05-18 04:35:58 +00:00
|
|
|
reinit = 1;
|
|
|
|
#ifdef INET
|
|
|
|
if (!(if_getflags(ifp) & IFF_NOARP))
|
|
|
|
arp_ifinit(ifp, ifa);
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
err = ether_ioctl(ifp, command, data);
|
|
|
|
break;
|
|
|
|
case SIOCSIFMTU:
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
if (ifr->ifr_mtu == if_getmtu(ifp)) {
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bits = if_getdrvflags(ifp);
|
|
|
|
/* stop the driver and free any clusters before proceeding */
|
|
|
|
iflib_stop(ctx);
|
|
|
|
|
|
|
|
if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) {
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size)
|
|
|
|
ctx->ifc_flags |= IFC_MULTISEG;
|
|
|
|
else
|
|
|
|
ctx->ifc_flags &= ~IFC_MULTISEG;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
err = if_setmtu(ifp, ifr->ifr_mtu);
|
|
|
|
}
|
|
|
|
iflib_init_locked(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_setdrvflags(ifp, bits);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
break;
|
|
|
|
case SIOCSIFFLAGS:
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
|
|
|
if (if_getflags(ifp) & IFF_UP) {
|
|
|
|
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
|
|
|
|
if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
|
|
|
|
(IFF_PROMISC | IFF_ALLMULTI)) {
|
|
|
|
err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
reinit = 1;
|
|
|
|
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
|
|
|
|
iflib_stop(ctx);
|
|
|
|
}
|
|
|
|
ctx->ifc_if_flags = if_getflags(ifp);
|
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
|
|
|
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
|
2017-09-16 02:41:38 +00:00
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_INTR_DISABLE(ctx);
|
|
|
|
IFDI_MULTI_SET(ctx);
|
|
|
|
IFDI_INTR_ENABLE(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SIOCSIFMEDIA:
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_MEDIA_SET(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
/* falls thru */
|
|
|
|
case SIOCGIFMEDIA:
|
2017-12-01 17:58:20 +00:00
|
|
|
case SIOCGIFXMEDIA:
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command);
|
|
|
|
break;
|
|
|
|
case SIOCGI2C:
|
|
|
|
{
|
|
|
|
struct ifi2creq i2c;
|
|
|
|
|
2018-03-30 18:50:13 +00:00
|
|
|
err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
|
2016-05-18 04:35:58 +00:00
|
|
|
if (err != 0)
|
|
|
|
break;
|
|
|
|
if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
|
|
|
|
err = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i2c.len > sizeof(i2c.data)) {
|
|
|
|
err = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0)
|
2018-03-30 18:50:13 +00:00
|
|
|
err = copyout(&i2c, ifr_data_get_ptr(ifr),
|
|
|
|
sizeof(i2c));
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SIOCSIFCAP:
|
|
|
|
{
|
2018-09-20 19:35:35 +00:00
|
|
|
int mask, setmask, oldmask;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-09-20 19:35:35 +00:00
|
|
|
oldmask = if_getcapenable(ifp);
|
|
|
|
mask = ifr->ifr_reqcap ^ oldmask;
|
|
|
|
mask &= ctx->ifc_softc_ctx.isc_capabilities;
|
2016-05-18 04:35:58 +00:00
|
|
|
setmask = 0;
|
|
|
|
#ifdef TCP_OFFLOAD
|
|
|
|
setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6);
|
|
|
|
#endif
|
|
|
|
setmask |= (mask & IFCAP_FLAGS);
|
2018-09-20 19:35:35 +00:00
|
|
|
setmask |= (mask & IFCAP_WOL);
|
|
|
|
|
|
|
|
/*
|
2018-11-07 19:31:48 +00:00
|
|
|
* If any RX csum has changed, change all the ones that
|
|
|
|
* are supported by the driver.
|
2018-09-20 19:35:35 +00:00
|
|
|
*/
|
2018-11-07 19:31:48 +00:00
|
|
|
if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
|
|
|
|
setmask |= ctx->ifc_softc_ctx.isc_capabilities &
|
|
|
|
(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* want to ensure that traffic has stopped before we change any of the flags
|
|
|
|
*/
|
|
|
|
if (setmask) {
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
bits = if_getdrvflags(ifp);
|
2018-09-20 19:35:35 +00:00
|
|
|
if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_stop(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_togglecapenable(ifp, setmask);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2018-09-20 19:35:35 +00:00
|
|
|
if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL)
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_init_locked(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if_setdrvflags(ifp, bits);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
2018-09-20 19:35:35 +00:00
|
|
|
if_vlancap(ifp);
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
2018-06-18 17:27:43 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
case SIOCGPRIVATE_0:
|
|
|
|
case SIOCSDRVSPEC:
|
|
|
|
case SIOCGDRVSPEC:
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
err = IFDI_PRIV_IOCTL(ctx, command, data);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = ether_ioctl(ifp, command, data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (reinit)
|
|
|
|
iflib_if_init(ctx);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
iflib_if_get_counter(if_t ifp, ift_counter cnt)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
return (IFDI_GET_COUNTER(ctx, cnt));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* OTHER FUNCTIONS EXPORTED TO THE STACK
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
if ((void *)ctx != arg)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((vtag == 0) || (vtag > 4095))
|
|
|
|
return;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_VLAN_REGISTER(ctx, vtag);
|
|
|
|
/* Re-init to load the changes */
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
|
2017-08-23 21:49:56 +00:00
|
|
|
iflib_if_init_locked(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = if_getsoftc(ifp);
|
|
|
|
|
|
|
|
if ((void *)ctx != arg)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((vtag == 0) || (vtag > 4095))
|
|
|
|
return;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_VLAN_UNREGISTER(ctx, vtag);
|
|
|
|
/* Re-init to load the changes */
|
|
|
|
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
|
2017-08-23 21:49:56 +00:00
|
|
|
iflib_if_init_locked(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_led_func(void *arg, int onoff)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = arg;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_LED_FUNC(ctx, onoff);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* BUS FUNCTION DEFINITIONS
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_probe(device_t dev)
|
|
|
|
{
|
|
|
|
pci_vendor_info_t *ent;
|
|
|
|
|
|
|
|
uint16_t pci_vendor_id, pci_device_id;
|
|
|
|
uint16_t pci_subvendor_id, pci_subdevice_id;
|
|
|
|
uint16_t pci_rev_id;
|
|
|
|
if_shared_ctx_t sctx;
|
|
|
|
|
|
|
|
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
|
|
|
|
return (ENOTSUP);
|
|
|
|
|
|
|
|
pci_vendor_id = pci_get_vendor(dev);
|
|
|
|
pci_device_id = pci_get_device(dev);
|
|
|
|
pci_subvendor_id = pci_get_subvendor(dev);
|
|
|
|
pci_subdevice_id = pci_get_subdevice(dev);
|
|
|
|
pci_rev_id = pci_get_revid(dev);
|
|
|
|
if (sctx->isc_parse_devinfo != NULL)
|
|
|
|
sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id);
|
|
|
|
|
|
|
|
ent = sctx->isc_vendor_info;
|
|
|
|
while (ent->pvi_vendor_id != 0) {
|
|
|
|
if (pci_vendor_id != ent->pvi_vendor_id) {
|
|
|
|
ent++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((pci_device_id == ent->pvi_device_id) &&
|
|
|
|
((pci_subvendor_id == ent->pvi_subvendor_id) ||
|
|
|
|
(ent->pvi_subvendor_id == 0)) &&
|
|
|
|
((pci_subdevice_id == ent->pvi_subdevice_id) ||
|
|
|
|
(ent->pvi_subdevice_id == 0)) &&
|
|
|
|
((pci_rev_id == ent->pvi_rev_id) ||
|
|
|
|
(ent->pvi_rev_id == 0))) {
|
|
|
|
|
|
|
|
device_set_desc_copy(dev, ent->pvi_name);
|
|
|
|
/* this needs to be changed to zero if the bus probing code
|
|
|
|
* ever stops re-probing on best match because the sctx
|
|
|
|
* may have its values over written by register calls
|
|
|
|
* in subsequent probes
|
|
|
|
*/
|
|
|
|
return (BUS_PROBE_DEFAULT);
|
|
|
|
}
|
|
|
|
ent++;
|
|
|
|
}
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
static void
|
|
|
|
iflib_reset_qvalues(if_ctx_t ctx)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2018-05-11 20:08:28 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
2018-05-19 05:27:49 +00:00
|
|
|
int i;
|
2017-01-02 00:56:33 +00:00
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES;
|
|
|
|
scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH;
|
2016-08-12 21:29:44 +00:00
|
|
|
/*
|
|
|
|
* XXX sanity check that ntxd & nrxd are a power of 2
|
|
|
|
*/
|
|
|
|
if (ctx->ifc_sysctl_ntxqs != 0)
|
|
|
|
scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
|
|
|
|
if (ctx->ifc_sysctl_nrxqs != 0)
|
|
|
|
scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (ctx->ifc_sysctl_ntxds[i] != 0)
|
|
|
|
scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
|
|
|
|
else
|
|
|
|
scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (ctx->ifc_sysctl_nrxds[i] != 0)
|
|
|
|
scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
|
|
|
|
else
|
|
|
|
scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
|
|
|
|
device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
|
|
|
|
i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
|
|
|
|
scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
|
|
|
|
}
|
|
|
|
if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
|
|
|
|
device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
|
|
|
|
i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
|
|
|
|
scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
|
|
|
|
device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
|
|
|
|
i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
|
|
|
|
scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
|
|
|
|
}
|
|
|
|
if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
|
|
|
|
device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
|
|
|
|
i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
|
|
|
|
scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
|
|
|
|
}
|
|
|
|
}
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp)
|
|
|
|
{
|
|
|
|
int err, rid, msix;
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_t ifp;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
int i;
|
|
|
|
uint16_t main_txq;
|
|
|
|
uint16_t main_rxq;
|
|
|
|
|
|
|
|
|
|
|
|
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
|
|
|
|
if (sc == NULL) {
|
|
|
|
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
device_set_softc(dev, ctx);
|
|
|
|
ctx->ifc_flags |= IFC_SC_ALLOCATED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->ifc_sctx = sctx;
|
|
|
|
ctx->ifc_dev = dev;
|
|
|
|
ctx->ifc_softc = sc;
|
|
|
|
|
|
|
|
if ((err = iflib_register(ctx)) != 0) {
|
|
|
|
device_printf(dev, "iflib_register failed %d\n", err);
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_ctx_free;
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
iflib_add_device_sysctl_pre(ctx);
|
|
|
|
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
ifp = ctx->ifc_ifp;
|
2017-09-16 02:41:38 +00:00
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
iflib_reset_qvalues(ctx);
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_LOCK(ctx);
|
2017-09-16 02:41:38 +00:00
|
|
|
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_unlock;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-01-02 00:56:33 +00:00
|
|
|
_iflib_pre_assert(scctx);
|
|
|
|
ctx->ifc_txrx = *scctx->isc_txrx;
|
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
MPASS(scctx->isc_capabilities);
|
|
|
|
if (scctx->isc_capabilities & IFCAP_TXCSUM)
|
2017-01-02 00:56:33 +00:00
|
|
|
MPASS(scctx->isc_tx_csum_flags);
|
|
|
|
#endif
|
|
|
|
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS);
|
2017-08-10 03:11:05 +00:00
|
|
|
if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS);
|
2017-01-02 00:56:33 +00:00
|
|
|
|
|
|
|
if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
|
|
|
|
scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
|
|
|
|
if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
|
|
|
|
scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
|
|
|
|
main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
|
|
|
/* XXX change for per-queue sizes */
|
2019-01-30 13:21:26 +00:00
|
|
|
device_printf(dev, "Using %d tx descriptors and %d rx descriptors\n",
|
|
|
|
scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
|
2016-08-12 21:29:44 +00:00
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_nrxd[i])) {
|
|
|
|
/* round down instead? */
|
|
|
|
device_printf(dev, "# rx descriptors must be a power of 2\n");
|
|
|
|
err = EINVAL;
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_iflib_detach;
|
2016-08-12 21:29:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_ntxd[i])) {
|
|
|
|
device_printf(dev,
|
|
|
|
"# tx descriptors must be a power of 2");
|
|
|
|
err = EINVAL;
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_iflib_detach;
|
2016-08-12 21:29:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION);
|
|
|
|
if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_tso_segments_max = max(1,
|
|
|
|
scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
if (if_getcapabilities(ifp) & IFCAP_TSO) {
|
|
|
|
/*
|
|
|
|
* The stack can't handle a TSO size larger than IP_MAXPACKET,
|
|
|
|
* but some MACs do.
|
|
|
|
*/
|
|
|
|
if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
|
|
|
|
IP_MAXPACKET));
|
|
|
|
/*
|
|
|
|
* Take maximum number of m_pullup(9)'s in iflib_parse_header()
|
|
|
|
* into account. In the worst case, each of these calls will
|
|
|
|
* add another mbuf and, thus, the requirement for another DMA
|
|
|
|
* segment. So for best performance, it doesn't make sense to
|
|
|
|
* advertize a maximum of TSO segments that typically will
|
|
|
|
* require defragmentation in iflib_encap().
|
|
|
|
*/
|
|
|
|
if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
|
|
|
|
if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
if (scctx->isc_rss_table_size == 0)
|
|
|
|
scctx->isc_rss_table_size = 64;
|
2016-08-12 21:29:44 +00:00
|
|
|
scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
|
2016-11-18 04:19:21 +00:00
|
|
|
|
|
|
|
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
|
|
|
|
/* XXX format name */
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
|
|
|
|
NULL, NULL, "admin");
|
2017-11-29 18:14:57 +00:00
|
|
|
|
2017-11-29 18:21:17 +00:00
|
|
|
/* Set up cpu set. If it fails, use the set of all CPUs. */
|
2017-11-29 18:14:57 +00:00
|
|
|
if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
|
|
|
|
device_printf(dev, "Unable to fetch CPU list\n");
|
|
|
|
CPU_COPY(&all_cpus, &ctx->ifc_cpus);
|
|
|
|
}
|
|
|
|
MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
2019-01-30 13:21:26 +00:00
|
|
|
** Now set up MSI or MSI-X, should return us the number of supported
|
|
|
|
** vectors (will be 1 for a legacy interrupt and MSI).
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
|
|
|
if (sctx->isc_flags & IFLIB_SKIP_MSIX) {
|
|
|
|
msix = scctx->isc_vectors;
|
|
|
|
} else if (scctx->isc_msix_bar != 0)
|
2017-01-25 14:37:05 +00:00
|
|
|
/*
|
|
|
|
* The simple fact that isc_msix_bar is not 0 does not mean we
|
|
|
|
* we have a good value there that is known to work.
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
msix = iflib_msix_init(ctx);
|
|
|
|
else {
|
|
|
|
scctx->isc_vectors = 1;
|
|
|
|
scctx->isc_ntxqsets = 1;
|
|
|
|
scctx->isc_nrxqsets = 1;
|
|
|
|
scctx->isc_intr = IFLIB_INTR_LEGACY;
|
|
|
|
msix = 0;
|
|
|
|
}
|
|
|
|
/* Get memory for the station queues */
|
|
|
|
if ((err = iflib_queues_alloc(ctx))) {
|
|
|
|
device_printf(dev, "Unable to allocate queue memory\n");
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_intr_free;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:15:10 +00:00
|
|
|
if ((err = iflib_qset_structures_setup(ctx)))
|
2016-05-18 04:35:58 +00:00
|
|
|
goto fail_queues;
|
2017-01-26 13:50:09 +00:00
|
|
|
|
2017-01-24 16:05:42 +00:00
|
|
|
/*
|
|
|
|
* Group taskqueues aren't properly set up until SMP is started,
|
|
|
|
* so we disable interrupts until we can handle them post
|
|
|
|
* SI_SUB_SMP.
|
|
|
|
*
|
|
|
|
* XXX: disabling interrupts doesn't actually work, at least for
|
|
|
|
* the non-MSI case. When they occur before SI_SUB_SMP completes,
|
|
|
|
* we do null handling and depend on this not causing too large an
|
|
|
|
* interrupt storm.
|
|
|
|
*/
|
2017-01-02 00:56:33 +00:00
|
|
|
IFDI_INTR_DISABLE(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_queues;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
if (msix <= 1) {
|
|
|
|
rid = 0;
|
|
|
|
if (scctx->isc_intr == IFLIB_INTR_MSI) {
|
|
|
|
MPASS(msix == 1);
|
|
|
|
rid = 1;
|
|
|
|
}
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "iflib_legacy_setup failed %d\n", err);
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_queues;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
|
|
|
|
* This must appear after the call to ether_ifattach() because
|
|
|
|
* ether_ifattach() sets if_hdrlen to the default value.
|
|
|
|
*/
|
|
|
|
if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
|
|
|
|
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((err = iflib_netmap_attach(ctx))) {
|
|
|
|
device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
|
|
|
*ctxp = ctx;
|
|
|
|
|
2018-05-06 00:57:52 +00:00
|
|
|
NETDUMP_SET(ctx->ifc_ifp, iflib);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_add_device_sysctl_post(ctx);
|
2017-01-15 00:50:10 +00:00
|
|
|
ctx->ifc_flags |= IFC_INIT_DONE;
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
2018-10-12 22:40:54 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
fail_detach:
|
|
|
|
ether_ifdetach(ctx->ifc_ifp);
|
|
|
|
fail_intr_free:
|
2019-01-22 00:56:44 +00:00
|
|
|
iflib_free_intr_mem(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
fail_queues:
|
2018-05-08 16:56:02 +00:00
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
2019-01-22 00:56:44 +00:00
|
|
|
fail_iflib_detach:
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_DETACH(ctx);
|
2019-01-22 00:56:44 +00:00
|
|
|
fail_unlock:
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
2019-01-22 00:56:44 +00:00
|
|
|
fail_ctx_free:
|
|
|
|
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
|
|
|
|
free(ctx->ifc_softc, M_IFLIB);
|
|
|
|
free(ctx, M_IFLIB);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
int
|
|
|
|
iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
|
|
|
|
struct iflib_cloneattach_ctx *clctx)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_t ifp;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
int i;
|
|
|
|
void *sc;
|
|
|
|
uint16_t main_txq;
|
|
|
|
uint16_t main_rxq;
|
|
|
|
|
|
|
|
ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
|
|
|
|
ctx->ifc_flags |= IFC_SC_ALLOCATED;
|
|
|
|
if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL))
|
|
|
|
ctx->ifc_flags |= IFC_PSEUDO;
|
|
|
|
|
|
|
|
ctx->ifc_sctx = sctx;
|
|
|
|
ctx->ifc_softc = sc;
|
|
|
|
ctx->ifc_dev = dev;
|
|
|
|
|
|
|
|
if ((err = iflib_register(ctx)) != 0) {
|
|
|
|
device_printf(dev, "%s: iflib_register failed %d\n", __func__, err);
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_ctx_free;
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
iflib_add_device_sysctl_pre(ctx);
|
|
|
|
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
ifp = ctx->ifc_ifp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX sanity check that ntxd & nrxd are a power of 2
|
|
|
|
*/
|
|
|
|
iflib_reset_qvalues(ctx);
|
|
|
|
|
|
|
|
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_ctx_free;
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
if (sctx->isc_flags & IFLIB_GEN_MAC)
|
|
|
|
iflib_gen_mac(ctx);
|
|
|
|
if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name,
|
|
|
|
clctx->cc_params)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err);
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_ctx_free;
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
|
|
|
|
ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
|
|
|
|
ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO);
|
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
MPASS(scctx->isc_capabilities);
|
|
|
|
if (scctx->isc_capabilities & IFCAP_TXCSUM)
|
2018-05-11 20:08:28 +00:00
|
|
|
MPASS(scctx->isc_tx_csum_flags);
|
|
|
|
#endif
|
|
|
|
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE);
|
2018-05-11 20:08:28 +00:00
|
|
|
if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE);
|
|
|
|
|
|
|
|
ifp->if_flags |= IFF_NOGROUP;
|
|
|
|
if (sctx->isc_flags & IFLIB_PSEUDO) {
|
|
|
|
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
|
|
|
|
|
|
|
|
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
|
|
|
*ctxp = ctx;
|
|
|
|
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
/*
|
|
|
|
* Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
|
|
|
|
* This must appear after the call to ether_ifattach() because
|
|
|
|
* ether_ifattach() sets if_hdrlen to the default value.
|
|
|
|
*/
|
|
|
|
if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
|
|
|
|
if_setifheaderlen(ifp,
|
|
|
|
sizeof(struct ether_vlan_header));
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
|
|
|
|
iflib_add_device_sysctl_post(ctx);
|
|
|
|
ctx->ifc_flags |= IFC_INIT_DONE;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
_iflib_pre_assert(scctx);
|
|
|
|
ctx->ifc_txrx = *scctx->isc_txrx;
|
|
|
|
|
|
|
|
if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
|
|
|
|
scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
|
|
|
|
if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
|
|
|
|
scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
|
|
|
|
|
|
|
|
main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0;
|
|
|
|
main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0;
|
|
|
|
|
|
|
|
/* XXX change for per-queue sizes */
|
2019-01-30 13:21:26 +00:00
|
|
|
device_printf(dev, "Using %d tx descriptors and %d rx descriptors\n",
|
|
|
|
scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
|
2018-05-11 20:08:28 +00:00
|
|
|
for (i = 0; i < sctx->isc_nrxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_nrxd[i])) {
|
|
|
|
/* round down instead? */
|
|
|
|
device_printf(dev, "# rx descriptors must be a power of 2\n");
|
|
|
|
err = EINVAL;
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_iflib_detach;
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < sctx->isc_ntxqs; i++) {
|
|
|
|
if (!powerof2(scctx->isc_ntxd[i])) {
|
|
|
|
device_printf(dev,
|
|
|
|
"# tx descriptors must be a power of 2");
|
|
|
|
err = EINVAL;
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_iflib_detach;
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION);
|
|
|
|
if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
|
|
|
|
MAX_SINGLE_PACKET_FRACTION)
|
|
|
|
scctx->isc_tx_tso_segments_max = max(1,
|
|
|
|
scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
|
|
|
|
|
|
|
|
/* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
if (if_getcapabilities(ifp) & IFCAP_TSO) {
|
|
|
|
/*
|
|
|
|
* The stack can't handle a TSO size larger than IP_MAXPACKET,
|
|
|
|
* but some MACs do.
|
|
|
|
*/
|
|
|
|
if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max,
|
|
|
|
IP_MAXPACKET));
|
|
|
|
/*
|
|
|
|
* Take maximum number of m_pullup(9)'s in iflib_parse_header()
|
|
|
|
* into account. In the worst case, each of these calls will
|
|
|
|
* add another mbuf and, thus, the requirement for another DMA
|
|
|
|
* segment. So for best performance, it doesn't make sense to
|
|
|
|
* advertize a maximum of TSO segments that typically will
|
|
|
|
* require defragmentation in iflib_encap().
|
|
|
|
*/
|
|
|
|
if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3);
|
|
|
|
if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max);
|
|
|
|
}
|
2018-05-11 20:08:28 +00:00
|
|
|
if (scctx->isc_rss_table_size == 0)
|
|
|
|
scctx->isc_rss_table_size = 64;
|
|
|
|
scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
|
|
|
|
|
|
|
|
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
|
|
|
|
/* XXX format name */
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
|
|
|
|
NULL, NULL, "admin");
|
2018-05-11 20:08:28 +00:00
|
|
|
|
|
|
|
/* XXX --- can support > 1 -- but keep it simple for now */
|
|
|
|
scctx->isc_intr = IFLIB_INTR_LEGACY;
|
|
|
|
|
|
|
|
/* Get memory for the station queues */
|
|
|
|
if ((err = iflib_queues_alloc(ctx))) {
|
|
|
|
device_printf(dev, "Unable to allocate queue memory\n");
|
2019-01-22 00:56:44 +00:00
|
|
|
goto fail_iflib_detach;
|
2018-05-11 20:08:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((err = iflib_qset_structures_setup(ctx))) {
|
|
|
|
device_printf(dev, "qset structure setup failed %d\n", err);
|
|
|
|
goto fail_queues;
|
|
|
|
}
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
/*
|
|
|
|
* XXX What if anything do we want to do about interrupts?
|
|
|
|
*/
|
|
|
|
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
|
|
|
|
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
|
|
|
|
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
Assorted TSO fixes for em(4)/iflib(9) and dead code removal:
- Ever since the workaround for the silicon bug of TSO4 causing MAC hangs
was committed in r295133, CSUM_TSO always got disabled unconditionally
by em(4) on the first invocation of em_init_locked(). However, even with
that problem fixed, it turned out that for at least e. g. 82579 not all
necessary TSO workarounds are in place, still causing MAC hangs even at
Gigabit speed. Thus, for stable/11, TSO usage was deliberately disabled
in r323292 (r323293 for stable/10) for the EM-class by default, allowing
users to turn it on if it happens to work with their particular EM MAC
in a Gigabit-only environment.
In head, the TSO workaround for speeds other than Gigabit was lost with
the conversion to iflib(9) in r311849 (possibly along with another one
or two TSO workarounds). Yet at the same time, for EM-class MACs TSO4
got enabled by default again, causing device hangs. Therefore, change the
default for this hardware class back to have TSO4 off, allowing users
to turn it on manually if it happens to work in their environment as
we do in stable/{10,11}. An alternative would be to add a whitelist of
EM-class devices where TSO4 actually is reliable with the workarounds in
place, but given that the advantage of TSO at Gigabit speed is rather
limited - especially with the overhead of these workarounds -, that's
really not worth it. [1]
This change includes the addition of an isc_capabilities to struct
if_softc_ctx so iflib(9) can also handle interface capabilities that
shouldn't be enabled by default which is used to handle the default-off
capabilities of e1000 as suggested by shurd@ and moving their handling
from em_setup_interface() to em_if_attach_pre() accordingly.
- Although 82543 support TSO4 in theory, the former lem(4) didn't have
support for TSO4, presumably because TSO4 is even more broken in the
LEM-class of MACs than the later EM ones. Still, TSO4 for LEM-class
devices was enabled as part of the conversion to iflib(9) in r311849,
causing device hangs. So revert back to the pre-r311849 behavior of
not supporting TSO4 for LEM-class at all, which includes not creating
a TSO DMA tag in iflib(9) for devices not having IFCAP_TSO4 set. [2]
- In fact, the FreeBSD TCP stack can handle a TSO size of IP_MAXPACKET
(65535) rather than FREEBSD_TSO_SIZE_MAX (65518). However, the TSO
DMA must have a maxsize of the maximum TSO size plus the size of a
VLAN header for software VLAN tagging. The iflib(9) converted em(4),
thus, first correctly sets scctx->isc_tx_tso_size_max to EM_TSO_SIZE
in em_if_attach_pre(), but later on overrides it with IP_MAXPACKET
in em_setup_interface() (apparently, left-over from pre-iflib(9)
times). So remove the later and correct iflib(9) to correctly cap
the maximum TSO size reported to the stack at IP_MAXPACKET. While at
it, let iflib(9) use if_sethwtsomax*().
This change includes the addition of isc_tso_max{seg,}size DMA engine
constraints for the TSO DMA tag to struct if_shared_ctx and letting
iflib_txsd_alloc() automatically adjust the maxsize of that tag in case
IFCAP_VLAN_MTU is supported as requested by shurd@.
- Move the if_setifheaderlen(9) call for adjusting the maximum Ethernet
header length from {ixgbe,ixl,ixlv,ixv,em}_setup_interface() to iflib(9)
so adjustment is automatically done in case IFCAP_VLAN_MTU is supported.
As a consequence, this adjustment now is also done in case of bnxt(4)
which missed it previously.
- Move the reduction of the maximum TSO segment count reported to the
stack by the number of m_pullup(9) calls (which in the worst case,
can add another mbuf and, thus, the requirement for another DMA
segment each) in the transmit path for performance reasons from
em_setup_interface() to iflib_txsd_alloc() as these pull-ups are now
done in iflib_parse_header() rather than in the no longer existing
em_xmit(). Moreover, this optimization applies to all drivers using
iflib(9) and not just em(4); all in-tree iflib(9) consumers still
have enough room to handle full size TSO packets. Also, reduce the
adjustment to the maximum number of m_pullup(9)'s now performed in
iflib_parse_header().
- Prior to the conversion of em(4)/igb(4)/lem(4) and ixl(4) to iflib(9)
in r311849 and r335338 respectively, these drivers didn't enable
IFCAP_VLAN_HWFILTER by default due to VLAN events not being passed
through by lagg(4). With iflib(9), IFCAP_VLAN_HWFILTER was turned on
by default but also lagg(4) was fixed in that regard in r203548. So
just remove the now redundant and defunct IFCAP_VLAN_HWFILTER handling
in {em,ixl,ixlv}_setup_interface().
- Nuke other redundant IFCAP_* setting in {em,ixl,ixlv}_setup_interface()
which is (more completely) already done in {em,ixl,ixlv}_if_attach_pre()
now.
- Remove some redundant/dead setting of scctx->isc_tx_csum_flags in
em_if_attach_pre().
- Remove some IFCAP_* duplicated either directly or indirectly (e. g.
via IFCAP_HWCSUM) in {EM,IGB,IXL}_CAPS.
- Don't bother to fiddle with IFCAP_HWSTATS in ixgbe(4)/ixgbev(4) as
iflib(9) adds that capability unconditionally.
- Remove some unused macros from em(4).
- Bump __FreeBSD_version as some of the above changes require the modules
of drivers using iflib(9) to be recompiled.
Okayed by: sbruno@ at 201806 DevSummit Transport Working Group [1]
Reviewed by: sbruno (earlier version), erj
PR: 219428 (part of; comment #10) [1], 220997 (part of; comment #3) [2]
Differential Revision: https://reviews.freebsd.org/D15720
2018-07-15 19:04:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the upper layer(s) if IFCAP_VLAN_MTU is supported.
|
|
|
|
* This must appear after the call to ether_ifattach() because
|
|
|
|
* ether_ifattach() sets if_hdrlen to the default value.
|
|
|
|
*/
|
|
|
|
if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU)
|
|
|
|
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
|
|
|
|
|
2018-05-11 20:08:28 +00:00
|
|
|
/* XXX handle more than one queue */
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++)
|
|
|
|
IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl);
|
|
|
|
|
|
|
|
*ctxp = ctx;
|
|
|
|
|
|
|
|
if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
|
|
|
|
iflib_add_device_sysctl_post(ctx);
|
|
|
|
ctx->ifc_flags |= IFC_INIT_DONE;
|
|
|
|
return (0);
|
|
|
|
fail_detach:
|
|
|
|
ether_ifdetach(ctx->ifc_ifp);
|
|
|
|
fail_queues:
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
2019-01-22 00:56:44 +00:00
|
|
|
fail_iflib_detach:
|
2018-05-11 20:08:28 +00:00
|
|
|
IFDI_DETACH(ctx);
|
2019-01-22 00:56:44 +00:00
|
|
|
fail_ctx_free:
|
|
|
|
free(ctx->ifc_softc, M_IFLIB);
|
|
|
|
free(ctx, M_IFLIB);
|
2018-05-11 20:08:28 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_pseudo_deregister(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
int i, j;
|
|
|
|
struct taskqgroup *tqg;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
|
|
|
|
/* Unregister VLAN events */
|
|
|
|
if (ctx->ifc_vlan_attach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
|
|
|
|
if (ctx->ifc_vlan_detach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
|
|
|
|
|
|
|
|
ether_ifdetach(ifp);
|
|
|
|
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
|
|
|
|
CTX_LOCK_DESTROY(ctx);
|
|
|
|
/* XXX drain any dependent tasks */
|
|
|
|
tqg = qgroup_if_io_tqg;
|
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
|
|
|
|
callout_drain(&txq->ift_timer);
|
|
|
|
if (txq->ift_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &txq->ift_task);
|
|
|
|
}
|
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
|
|
|
|
if (rxq->ifr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &rxq->ifr_task);
|
|
|
|
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
|
|
|
free(fl->ifl_rx_bitmap, M_IFLIB);
|
|
|
|
}
|
|
|
|
tqg = qgroup_if_config_tqg;
|
|
|
|
if (ctx->ifc_admin_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_admin_task);
|
|
|
|
if (ctx->ifc_vflr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
|
|
|
|
|
|
|
|
if_free(ifp);
|
|
|
|
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
|
|
|
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
|
|
|
|
free(ctx->ifc_softc, M_IFLIB);
|
|
|
|
free(ctx, M_IFLIB);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
int
|
|
|
|
iflib_device_attach(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_shared_ctx_t sctx;
|
|
|
|
|
|
|
|
if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC)
|
|
|
|
return (ENOTSUP);
|
|
|
|
|
|
|
|
pci_enable_busmaster(dev);
|
|
|
|
|
|
|
|
return (iflib_device_register(dev, NULL, sctx, &ctx));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_deregister(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
2017-07-03 18:23:35 +00:00
|
|
|
int i, j;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct taskqgroup *tqg;
|
2017-07-03 18:23:35 +00:00
|
|
|
iflib_fl_t fl;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* Make sure VLANS are not using driver */
|
|
|
|
if (if_vlantrunkinuse(ifp)) {
|
2018-10-12 22:40:54 +00:00
|
|
|
device_printf(dev, "Vlan in use, detach first\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
return (EBUSY);
|
|
|
|
}
|
2018-10-12 22:40:54 +00:00
|
|
|
#ifdef PCI_IOV
|
|
|
|
if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) {
|
|
|
|
device_printf(dev, "SR-IOV in use; detach first.\n");
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
STATE_LOCK(ctx);
|
|
|
|
ctx->ifc_flags |= IFC_IN_DETACH;
|
|
|
|
STATE_UNLOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
iflib_stop(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
/* Unregister VLAN events */
|
|
|
|
if (ctx->ifc_vlan_attach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event);
|
|
|
|
if (ctx->ifc_vlan_detach_event != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event);
|
|
|
|
|
|
|
|
iflib_netmap_detach(ifp);
|
|
|
|
ether_ifdetach(ifp);
|
|
|
|
if (ctx->ifc_led_dev != NULL)
|
|
|
|
led_destroy(ctx->ifc_led_dev);
|
|
|
|
/* XXX drain any dependent tasks */
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-08-12 21:29:44 +00:00
|
|
|
for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
callout_drain(&txq->ift_timer);
|
|
|
|
if (txq->ift_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &txq->ift_task);
|
|
|
|
}
|
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) {
|
|
|
|
if (rxq->ifr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &rxq->ifr_task);
|
2017-07-03 18:23:35 +00:00
|
|
|
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
|
|
|
free(fl->ifl_rx_bitmap, M_IFLIB);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_config_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ctx->ifc_admin_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_admin_task);
|
|
|
|
if (ctx->ifc_vflr_task.gt_uniq != NULL)
|
|
|
|
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
|
2018-05-29 18:03:43 +00:00
|
|
|
CTX_LOCK(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_DETACH(ctx);
|
2018-05-29 18:03:43 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
/* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/
|
|
|
|
CTX_LOCK_DESTROY(ctx);
|
2016-08-12 21:29:44 +00:00
|
|
|
device_set_softc(ctx->ifc_dev, NULL);
|
2018-10-12 22:40:54 +00:00
|
|
|
iflib_free_intr_mem(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
bus_generic_detach(dev);
|
|
|
|
if_free(ifp);
|
|
|
|
|
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
iflib_rx_structures_free(ctx);
|
2016-08-12 21:29:44 +00:00
|
|
|
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
|
|
|
|
free(ctx->ifc_softc, M_IFLIB);
|
2018-10-12 22:40:54 +00:00
|
|
|
STATE_LOCK_DESTROY(ctx);
|
2016-08-12 21:29:44 +00:00
|
|
|
free(ctx, M_IFLIB);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2018-10-12 22:40:54 +00:00
|
|
|
static void
|
|
|
|
iflib_free_intr_mem(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) {
|
|
|
|
iflib_irq_free(ctx, &ctx->ifc_legacy_irq);
|
|
|
|
}
|
2019-01-30 13:21:26 +00:00
|
|
|
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
|
|
|
|
pci_release_msi(ctx->ifc_dev);
|
|
|
|
}
|
2018-10-12 22:40:54 +00:00
|
|
|
if (ctx->ifc_msix_mem != NULL) {
|
|
|
|
bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY,
|
2019-01-30 13:21:26 +00:00
|
|
|
rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem);
|
2018-10-12 22:40:54 +00:00
|
|
|
ctx->ifc_msix_mem = NULL;
|
|
|
|
}
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_detach(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
return (iflib_device_deregister(ctx));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_suspend(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_SUSPEND(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return bus_generic_suspend(dev);
|
|
|
|
}
|
|
|
|
int
|
|
|
|
iflib_device_shutdown(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_SHUTDOWN(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return bus_generic_suspend(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_resume(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_RESUME(ctx);
|
2019-01-07 23:46:54 +00:00
|
|
|
iflib_if_init_locked(ctx);
|
2016-05-18 04:35:58 +00:00
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
|
|
|
|
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
|
|
|
|
|
|
|
|
return (bus_generic_resume(dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
error = IFDI_IOV_INIT(ctx, num_vfs, params);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_device_iov_uninit(device_t dev)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
IFDI_IOV_UNINIT(ctx);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
if_ctx_t ctx = device_get_softc(dev);
|
|
|
|
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
error = IFDI_IOV_VF_ADD(ctx, vfnum, params);
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* MODULE FUNCTION DEFINITIONS
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
/*
|
|
|
|
* - Start a fast taskqueue thread for each core
|
|
|
|
* - Start a taskqueue for control operations
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_module_init(void)
|
|
|
|
{
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_module_event_handler(module_t mod, int what, void *arg)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
switch (what) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
if ((err = iflib_module_init()) != 0)
|
|
|
|
return (err);
|
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
return (EBUSY);
|
|
|
|
default:
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* PUBLIC FUNCTION DEFINITIONS
|
|
|
|
* ordered as in iflib.h
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
_iflib_assert(if_shared_ctx_t sctx)
|
|
|
|
{
|
|
|
|
MPASS(sctx->isc_tx_maxsize);
|
|
|
|
MPASS(sctx->isc_tx_maxsegsize);
|
|
|
|
|
|
|
|
MPASS(sctx->isc_rx_maxsize);
|
|
|
|
MPASS(sctx->isc_rx_nsegments);
|
|
|
|
MPASS(sctx->isc_rx_maxsegsize);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
MPASS(sctx->isc_nrxd_min[0]);
|
|
|
|
MPASS(sctx->isc_nrxd_max[0]);
|
|
|
|
MPASS(sctx->isc_nrxd_default[0]);
|
|
|
|
MPASS(sctx->isc_ntxd_min[0]);
|
|
|
|
MPASS(sctx->isc_ntxd_max[0]);
|
|
|
|
MPASS(sctx->isc_ntxd_default[0]);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
static void
|
|
|
|
_iflib_pre_assert(if_softc_ctx_t scctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
MPASS(scctx->isc_txrx->ift_txd_encap);
|
|
|
|
MPASS(scctx->isc_txrx->ift_txd_flush);
|
|
|
|
MPASS(scctx->isc_txrx->ift_txd_credits_update);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_available);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_refill);
|
|
|
|
MPASS(scctx->isc_txrx->ift_rxd_flush);
|
|
|
|
}
|
2016-10-18 14:02:45 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
static int
|
|
|
|
iflib_register(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
driver_t *driver = sctx->isc_driver;
|
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
if_t ifp;
|
|
|
|
|
|
|
|
_iflib_assert(sctx);
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
CTX_LOCK_INIT(ctx);
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
|
2018-10-12 22:40:54 +00:00
|
|
|
ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ifp == NULL) {
|
|
|
|
device_printf(dev, "can not allocate ifnet structure\n");
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize our context's device specific methods
|
|
|
|
*/
|
|
|
|
kobj_init((kobj_t) ctx, (kobj_class_t) driver);
|
|
|
|
kobj_class_compile((kobj_class_t) driver);
|
|
|
|
driver->refs++;
|
|
|
|
|
|
|
|
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
|
|
|
|
if_setsoftc(ifp, ctx);
|
|
|
|
if_setdev(ifp, dev);
|
|
|
|
if_setinitfn(ifp, iflib_if_init);
|
|
|
|
if_setioctlfn(ifp, iflib_if_ioctl);
|
2018-07-25 22:46:36 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
if_setstartfn(ifp, iflib_altq_if_start);
|
|
|
|
if_settransmitfn(ifp, iflib_altq_if_transmit);
|
2018-08-04 01:45:17 +00:00
|
|
|
if_setsendqready(ifp);
|
2018-07-25 22:46:36 +00:00
|
|
|
#else
|
2016-05-18 04:35:58 +00:00
|
|
|
if_settransmitfn(ifp, iflib_if_transmit);
|
2018-07-25 22:46:36 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
if_setqflushfn(ifp, iflib_if_qflush);
|
|
|
|
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
|
|
|
|
|
|
|
|
ctx->ifc_vlan_attach_event =
|
|
|
|
EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
|
|
|
|
EVENTHANDLER_PRI_FIRST);
|
|
|
|
ctx->ifc_vlan_detach_event =
|
|
|
|
EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx,
|
|
|
|
EVENTHANDLER_PRI_FIRST);
|
|
|
|
|
|
|
|
ifmedia_init(&ctx->ifc_media, IFM_IMASK,
|
|
|
|
iflib_media_change, iflib_media_status);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_queues_alloc(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-08-12 21:29:44 +00:00
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
device_t dev = ctx->ifc_dev;
|
2016-08-12 21:29:44 +00:00
|
|
|
int nrxqsets = scctx->isc_nrxqsets;
|
|
|
|
int ntxqsets = scctx->isc_ntxqsets;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
iflib_fl_t fl = NULL;
|
2016-08-12 21:29:44 +00:00
|
|
|
int i, j, cpu, err, txconf, rxconf;
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_info_t ifdip;
|
2016-08-12 21:29:44 +00:00
|
|
|
uint32_t *rxqsizes = scctx->isc_rxqsizes;
|
|
|
|
uint32_t *txqsizes = scctx->isc_txqsizes;
|
2016-05-18 04:35:58 +00:00
|
|
|
uint8_t nrxqs = sctx->isc_nrxqs;
|
|
|
|
uint8_t ntxqs = sctx->isc_ntxqs;
|
|
|
|
int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
|
|
|
|
caddr_t *vaddrs;
|
|
|
|
uint64_t *paddrs;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
|
|
|
|
KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-06-18 17:27:43 +00:00
|
|
|
/* Allocate the TX ring struct memory */
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
if (!(ctx->ifc_txqs =
|
2018-01-21 15:42:36 +00:00
|
|
|
(iflib_txq_t) malloc(sizeof(struct iflib_txq) *
|
|
|
|
ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate TX ring memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now allocate the RX */
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
if (!(ctx->ifc_rxqs =
|
2018-01-21 15:42:36 +00:00
|
|
|
(iflib_rxq_t) malloc(sizeof(struct iflib_rxq) *
|
|
|
|
nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate RX ring memory\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto rx_fail;
|
|
|
|
}
|
|
|
|
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
txq = ctx->ifc_txqs;
|
|
|
|
rxq = ctx->ifc_rxqs;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX handle allocation failure
|
|
|
|
*/
|
2016-07-06 14:09:49 +00:00
|
|
|
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
|
2016-05-18 04:35:58 +00:00
|
|
|
/* Set up some basics */
|
|
|
|
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
|
|
|
|
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate TX DMA info memory\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ENOMEM;
|
2016-06-07 20:26:00 +00:00
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
txq->ift_ifdi = ifdip;
|
|
|
|
for (j = 0; j < ntxqs; j++, ifdip++) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate TX descriptors\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
txq->ift_txd_size[j] = scctx->isc_txd_size[j];
|
2016-05-18 04:35:58 +00:00
|
|
|
bzero((void *)ifdip->idi_vaddr, txqsizes[j]);
|
|
|
|
}
|
|
|
|
txq->ift_ctx = ctx;
|
|
|
|
txq->ift_id = i;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
|
|
|
|
txq->ift_br_offset = 1;
|
|
|
|
} else {
|
|
|
|
txq->ift_br_offset = 0;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
/* XXX fix this */
|
2016-07-06 14:09:49 +00:00
|
|
|
txq->ift_timer.c_cpu = cpu;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (iflib_txsd_alloc(txq)) {
|
|
|
|
device_printf(dev, "Critical Failure setting up TX buffers\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the TX lock */
|
|
|
|
snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
|
|
|
|
device_get_nameunit(dev), txq->ift_id);
|
|
|
|
mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
|
|
|
|
callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
|
|
|
|
|
|
|
|
snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
|
|
|
|
device_get_nameunit(dev), txq->ift_id);
|
2017-03-13 22:53:06 +00:00
|
|
|
|
|
|
|
err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
|
|
|
|
iflib_txq_can_drain, M_IFLIB, M_WAITOK);
|
|
|
|
if (err) {
|
|
|
|
/* XXX free any allocated rings */
|
|
|
|
device_printf(dev, "Unable to allocate buf_ring\n");
|
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
|
|
|
|
/* Set up some basics */
|
|
|
|
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
|
|
|
|
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RX DMA info memory\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ENOMEM;
|
2016-06-07 20:26:00 +00:00
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rxq->ifr_ifdi = ifdip;
|
2017-03-13 22:53:06 +00:00
|
|
|
/* XXX this needs to be changed if #rx queues != #tx queues */
|
|
|
|
rxq->ifr_ntxqirq = 1;
|
|
|
|
rxq->ifr_txqid[0] = i;
|
2016-05-18 04:35:58 +00:00
|
|
|
for (j = 0; j < nrxqs; j++, ifdip++) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RX descriptors\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
|
|
|
bzero((void *)ifdip->idi_vaddr, rxqsizes[j]);
|
|
|
|
}
|
|
|
|
rxq->ifr_ctx = ctx;
|
|
|
|
rxq->ifr_id = i;
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
|
|
|
|
rxq->ifr_fl_offset = 1;
|
2016-05-18 04:35:58 +00:00
|
|
|
} else {
|
2016-08-12 21:29:44 +00:00
|
|
|
rxq->ifr_fl_offset = 0;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
rxq->ifr_nfl = nfree_lists;
|
|
|
|
if (!(fl =
|
2018-01-21 15:42:36 +00:00
|
|
|
(iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev, "Unable to allocate free list memory\n");
|
|
|
|
err = ENOMEM;
|
2016-06-07 20:26:00 +00:00
|
|
|
goto err_tx_desc;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
rxq->ifr_fl = fl;
|
|
|
|
for (j = 0; j < nfree_lists; j++) {
|
2017-03-13 22:53:06 +00:00
|
|
|
fl[j].ifl_rxq = rxq;
|
|
|
|
fl[j].ifl_id = j;
|
|
|
|
fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
|
|
|
|
fl[j].ifl_rxd_size = scctx->isc_rxd_size[j];
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2018-10-12 22:40:54 +00:00
|
|
|
/* Allocate receive buffers for the ring */
|
2016-05-18 04:35:58 +00:00
|
|
|
if (iflib_rxsd_alloc(rxq)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Critical Failure setting up receive buffers\n");
|
|
|
|
err = ENOMEM;
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
2017-07-03 18:23:35 +00:00
|
|
|
|
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++)
|
2019-01-26 21:35:51 +00:00
|
|
|
fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB,
|
|
|
|
M_WAITOK);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* TXQs */
|
|
|
|
vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
|
|
|
|
paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK);
|
|
|
|
for (i = 0; i < ntxqsets; i++) {
|
|
|
|
iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi;
|
|
|
|
|
|
|
|
for (j = 0; j < ntxqs; j++, di++) {
|
|
|
|
vaddrs[i*ntxqs + j] = di->idi_vaddr;
|
|
|
|
paddrs[i*ntxqs + j] = di->idi_paddr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(ctx->ifc_dev,
|
|
|
|
"Unable to allocate device TX queue\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
|
|
|
|
/* RXQs */
|
|
|
|
vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
|
|
|
|
paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK);
|
|
|
|
for (i = 0; i < nrxqsets; i++) {
|
|
|
|
iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi;
|
|
|
|
|
|
|
|
for (j = 0; j < nrxqs; j++, di++) {
|
|
|
|
vaddrs[i*nrxqs + j] = di->idi_vaddr;
|
|
|
|
paddrs[i*nrxqs + j] = di->idi_paddr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
|
o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet
MACs the control data describing the packet buffers typically
are named "descriptors". Each of these descriptors references
one buffer, multiple of which a packet can be composed of.
By contrast, in comments, messages and the names of structure
members, iflib(4) refers to DMA resources employed for RX and
TX buffers (rather than control data) as "desc(riptors)".
This odd naming convention of iflib(4) made reviewing r343085
and identifying wrong and missing bus_dmamap_sync(9) calls in
particular way harder than it already is. This convention may
also explain why the netmap(4) part of iflib(4) pairs the DMA
tags for control data with DMA maps of buffers and vice versa
in calls to bus_dma(9) functions.
Therefore, change iflib(4) to refer to buf(fers) when buffers
and not the usual understanding of descriptors is meant. This
change does not include corrections to the DMA resources used
in the netmap(4) parts. However, it revises error messages to
state which kind of allocation/creation failed. Specifically,
the "Unable to allocate tx_buffer (map) memory" copy & pasted
inappropriately on several occasions was replaced with proper
messages.
o Enhance some other error messages to indicate which half - RX
or TX - they apply to instead of using identical text in both
cases and generally canonicalize them.
o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect
reality; current code doesn't use {r,t}x_buffer structures.
o In iflib_queues_alloc():
- Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls,
- change the M_WAITOK from malloc(9) calls into M_NOWAIT. The
return values are already checked, deferred DMA allocations
not being an option at this point, BUS_DMA_NOWAIT has to be
used anyway and prior malloc(9) calls in this function also
specify M_NOWAIT.
Reviewed by: shurd
Differential Revision: https://reviews.freebsd.org/D19067
2019-02-04 20:46:57 +00:00
|
|
|
device_printf(ctx->ifc_dev,
|
|
|
|
"Unable to allocate device RX queue\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_tx_structures_free(ctx);
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
|
|
|
free(vaddrs, M_IFLIB);
|
|
|
|
free(paddrs, M_IFLIB);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/* XXX handle allocation failure changes */
|
|
|
|
err_rx_desc:
|
|
|
|
err_tx_desc:
|
iflib: fix invalid free during queue allocation failure
In r301567, code was added to cleanup to prevent memory leaks for the
Tx and Rx ring structs. This code carefully tracked txq and rxq, and
made sure to free them properly during cleanup.
Because we assigned the txq and rxq pointers into the ctx->ifc_txqs and
ctx->ifc_rxqs, we carefully reset these pointers to NULL, so that
cleanup code would not accidentally free the memory twice.
This was changed by r304021 ("Update iflib to support more NIC designs"),
which removed this resetting of the pointers to NULL, because it re-used
the txq and rxq pointers as an index into the queue set array.
Unfortunately, the cleanup code was left alone. Thus, if we fail to
allocate DMA or fail to configure the queues using the drivers ifdi
methods, we will attempt to free txq and rxq. These variables would now
incorrectly point to the wrong location, resulting in a page fault.
There are a number of methods to correct this, but ultimately the root
cause was that we reuse the txq and rxq pointers for two different
purposes.
Instead, when allocating, store the returned pointer directly into
ctx->ifc_txqs and ctx->ifc_rxqs. Then, assign this to txq and rxq as
index pointers before starting the loop to allocate each queue.
Drop the cleanup code for txq and rxq, and only use ctx->ifc_txqs and
ctx->ifc_rxqs.
Thus, we no longer need to free txq or rxq under any error flow, and
intsead rely solely on the pointers stored in ctx->ifc_txqs and
ctx->ifc_rxqs. This prevents the invalid free(), and ensures that we
still properly cleanup after ourselves as before when failing to
allocate.
Submitted by: Jacob Keller
Reviewed by: gallatin, sbruno
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D15285
2018-05-04 15:20:34 +00:00
|
|
|
rx_fail:
|
2016-05-18 04:35:58 +00:00
|
|
|
if (ctx->ifc_rxqs != NULL)
|
|
|
|
free(ctx->ifc_rxqs, M_IFLIB);
|
|
|
|
ctx->ifc_rxqs = NULL;
|
|
|
|
if (ctx->ifc_txqs != NULL)
|
|
|
|
free(ctx->ifc_txqs, M_IFLIB);
|
|
|
|
ctx->ifc_txqs = NULL;
|
|
|
|
fail:
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_tx_structures_setup(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NTXQSETS(ctx); i++, txq++)
|
|
|
|
iflib_txq_setup(txq);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_tx_structures_free(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
2018-11-14 15:16:45 +00:00
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
int i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
|
|
|
|
iflib_txq_destroy(txq);
|
2018-11-14 15:16:45 +00:00
|
|
|
for (j = 0; j < sctx->isc_ntxqs; j++)
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_dma_free(&txq->ift_ifdi[j]);
|
|
|
|
}
|
|
|
|
free(ctx->ifc_txqs, M_IFLIB);
|
|
|
|
ctx->ifc_txqs = NULL;
|
|
|
|
IFDI_QUEUES_FREE(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize all receive rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
iflib_rx_structures_setup(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
2016-05-18 14:18:03 +00:00
|
|
|
int q;
|
|
|
|
#if defined(INET6) || defined(INET)
|
|
|
|
int i, err;
|
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2016-05-18 04:35:58 +00:00
|
|
|
tcp_lro_free(&rxq->ifr_lc);
|
2016-08-12 21:29:44 +00:00
|
|
|
if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
|
|
|
|
TCP_LRO_ENTRIES, min(1024,
|
|
|
|
ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
rxq->ifr_lro_enabled = TRUE;
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
|
|
|
|
}
|
|
|
|
return (0);
|
2016-05-18 14:18:03 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2016-05-18 04:35:58 +00:00
|
|
|
fail:
|
|
|
|
/*
|
|
|
|
* Free RX software descriptors allocated so far, we will only handle
|
|
|
|
* the rings that completed, the failing case will have
|
|
|
|
* cleaned up for itself. 'q' failed, so its the terminus.
|
|
|
|
*/
|
|
|
|
rxq = ctx->ifc_rxqs;
|
|
|
|
for (i = 0; i < q; ++i, rxq++) {
|
|
|
|
iflib_rx_sds_free(rxq);
|
|
|
|
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
|
|
|
|
}
|
|
|
|
return (err);
|
2016-05-18 14:18:03 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free all receive rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
iflib_rx_structures_free(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_rx_sds_free(rxq);
|
|
|
|
}
|
2018-10-12 22:40:54 +00:00
|
|
|
free(ctx->ifc_rxqs, M_IFLIB);
|
|
|
|
ctx->ifc_rxqs = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_qset_structures_setup(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-05-08 16:56:02 +00:00
|
|
|
/*
|
|
|
|
* It is expected that the caller takes care of freeing queues if this
|
|
|
|
* fails.
|
|
|
|
*/
|
2018-05-08 17:15:10 +00:00
|
|
|
if ((err = iflib_tx_structures_setup(ctx)) != 0) {
|
|
|
|
device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
2018-05-08 17:15:10 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2018-05-08 16:56:02 +00:00
|
|
|
if ((err = iflib_rx_structures_setup(ctx)) != 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err);
|
2018-05-08 16:56:02 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
|
2018-05-29 21:56:39 +00:00
|
|
|
driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name));
|
|
|
|
}
|
|
|
|
|
2017-12-20 01:03:34 +00:00
|
|
|
#ifdef SMP
|
2016-10-18 13:12:19 +00:00
|
|
|
static int
|
2017-12-20 01:03:34 +00:00
|
|
|
find_nth(if_ctx_t ctx, int qid)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuset_t cpus;
|
2016-10-18 13:12:19 +00:00
|
|
|
int i, cpuid, eqid, count;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-12-20 01:03:34 +00:00
|
|
|
CPU_COPY(&ctx->ifc_cpus, &cpus);
|
|
|
|
count = CPU_COUNT(&cpus);
|
2016-10-18 13:12:19 +00:00
|
|
|
eqid = qid % count;
|
2016-05-18 04:35:58 +00:00
|
|
|
/* clear up to the qid'th bit */
|
2016-10-18 13:12:19 +00:00
|
|
|
for (i = 0; i < eqid; i++) {
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuid = CPU_FFS(&cpus);
|
2016-10-18 13:12:19 +00:00
|
|
|
MPASS(cpuid != 0);
|
2017-12-20 01:03:34 +00:00
|
|
|
CPU_CLR(cpuid-1, &cpus);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuid = CPU_FFS(&cpus);
|
2016-10-18 13:12:19 +00:00
|
|
|
MPASS(cpuid != 0);
|
|
|
|
return (cpuid-1);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
2017-12-20 01:03:34 +00:00
|
|
|
#ifdef SCHED_ULE
|
|
|
|
extern struct cpu_group *cpu_top; /* CPU topology */
|
|
|
|
|
|
|
|
static int
|
|
|
|
find_child_with_core(int cpu, struct cpu_group *grp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (grp->cg_children == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
MPASS(grp->cg_child);
|
|
|
|
for (i = 0; i < grp->cg_children; i++) {
|
|
|
|
if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask))
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-04-18 15:34:18 +00:00
|
|
|
* Find the nth "close" core to the specified core
|
|
|
|
* "close" is defined as the deepest level that shares
|
|
|
|
* at least an L2 cache. With threads, this will be
|
|
|
|
* threads on the same core. If the sahred cache is L3
|
|
|
|
* or higher, simply returns the same core.
|
2017-12-20 01:03:34 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-04-18 15:34:18 +00:00
|
|
|
find_close_core(int cpu, int core_offset)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
|
|
|
struct cpu_group *grp;
|
|
|
|
int i;
|
2018-04-18 15:34:18 +00:00
|
|
|
int fcpu;
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuset_t cs;
|
|
|
|
|
|
|
|
grp = cpu_top;
|
|
|
|
if (grp == NULL)
|
|
|
|
return cpu;
|
|
|
|
i = 0;
|
|
|
|
while ((i = find_child_with_core(cpu, grp)) != -1) {
|
|
|
|
/* If the child only has one cpu, don't descend */
|
|
|
|
if (grp->cg_child[i].cg_count <= 1)
|
|
|
|
break;
|
|
|
|
grp = &grp->cg_child[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If they don't share at least an L2 cache, use the same CPU */
|
|
|
|
if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE)
|
|
|
|
return cpu;
|
|
|
|
|
|
|
|
/* Now pick one */
|
|
|
|
CPU_COPY(&grp->cg_mask, &cs);
|
2018-04-18 15:34:18 +00:00
|
|
|
|
|
|
|
/* Add the selected CPU offset to core offset. */
|
|
|
|
for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) {
|
|
|
|
if (fcpu - 1 == cpu)
|
|
|
|
break;
|
|
|
|
CPU_CLR(fcpu - 1, &cs);
|
|
|
|
}
|
|
|
|
MPASS(fcpu);
|
|
|
|
|
|
|
|
core_offset += i;
|
|
|
|
|
|
|
|
CPU_COPY(&grp->cg_mask, &cs);
|
|
|
|
for (i = core_offset % grp->cg_count; i > 0; i--) {
|
2017-12-20 01:03:34 +00:00
|
|
|
MPASS(CPU_FFS(&cs));
|
|
|
|
CPU_CLR(CPU_FFS(&cs) - 1, &cs);
|
|
|
|
}
|
|
|
|
MPASS(CPU_FFS(&cs));
|
|
|
|
return CPU_FFS(&cs) - 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int
|
2018-04-18 15:34:18 +00:00
|
|
|
find_close_core(int cpu, int core_offset __unused)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
2017-12-21 23:05:13 +00:00
|
|
|
return cpu;
|
2017-12-20 01:03:34 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int
|
2018-04-18 15:34:18 +00:00
|
|
|
get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case IFLIB_INTR_TX:
|
2018-04-18 15:34:18 +00:00
|
|
|
/* TX queues get cores which share at least an L2 cache with the corresponding RX queue */
|
|
|
|
/* XXX handle multiple RX threads per core and more than two core per L2 group */
|
2017-12-20 01:03:34 +00:00
|
|
|
return qid / CPU_COUNT(&ctx->ifc_cpus) + 1;
|
|
|
|
case IFLIB_INTR_RX:
|
|
|
|
case IFLIB_INTR_RXTX:
|
2018-04-18 15:34:18 +00:00
|
|
|
/* RX queues get the specified core */
|
2017-12-20 01:03:34 +00:00
|
|
|
return qid / CPU_COUNT(&ctx->ifc_cpus);
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2018-04-18 15:34:18 +00:00
|
|
|
#define get_core_offset(ctx, type, qid) CPU_FIRST()
|
|
|
|
#define find_close_core(cpuid, tid) CPU_FIRST()
|
2017-12-20 01:03:34 +00:00
|
|
|
#define find_nth(ctx, gid) CPU_FIRST()
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Just to avoid copy/paste */
|
|
|
|
static inline int
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
|
|
|
|
int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
|
|
|
|
const char *name)
|
2017-12-20 01:03:34 +00:00
|
|
|
{
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
device_t dev;
|
|
|
|
int err, cpuid, tid;
|
2017-12-20 01:03:34 +00:00
|
|
|
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
dev = ctx->ifc_dev;
|
2017-12-20 01:03:34 +00:00
|
|
|
cpuid = find_nth(ctx, qid);
|
2018-04-18 15:34:18 +00:00
|
|
|
tid = get_core_offset(ctx, type, qid);
|
2017-12-20 01:03:34 +00:00
|
|
|
MPASS(tid >= 0);
|
2018-04-18 15:34:18 +00:00
|
|
|
cpuid = find_close_core(cpuid, tid);
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res,
|
|
|
|
name);
|
2017-12-20 01:03:34 +00:00
|
|
|
if (err) {
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
|
2017-12-20 01:03:34 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
#ifdef notyet
|
|
|
|
if (cpuid > ctx->ifc_cpuid_highest)
|
|
|
|
ctx->ifc_cpuid_highest = cpuid;
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
int
|
|
|
|
iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
|
2018-05-29 21:56:39 +00:00
|
|
|
iflib_intr_type_t type, driver_filter_t *filter,
|
|
|
|
void *filter_arg, int qid, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
device_t dev;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct grouptask *gtask;
|
|
|
|
struct taskqgroup *tqg;
|
|
|
|
iflib_filter_info_t info;
|
2016-08-12 21:29:44 +00:00
|
|
|
gtask_fn_t *fn;
|
2017-12-20 01:03:34 +00:00
|
|
|
int tqrid, err;
|
2017-03-13 22:53:06 +00:00
|
|
|
driver_filter_t *intr_fast;
|
2016-05-18 04:35:58 +00:00
|
|
|
void *q;
|
|
|
|
|
|
|
|
info = &ctx->ifc_filter_info;
|
2016-10-18 13:29:30 +00:00
|
|
|
tqrid = rid;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
/* XXX merge tx/rx for netmap? */
|
|
|
|
case IFLIB_INTR_TX:
|
|
|
|
q = &ctx->ifc_txqs[qid];
|
|
|
|
info = &ctx->ifc_txqs[qid].ift_filter_info;
|
|
|
|
gtask = &ctx->ifc_txqs[qid].ift_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_tx;
|
2017-03-13 22:53:06 +00:00
|
|
|
intr_fast = iflib_fast_intr;
|
2016-11-18 04:19:21 +00:00
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
2018-05-16 21:03:22 +00:00
|
|
|
ctx->ifc_flags |= IFC_NETMAP_TX_IRQ;
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case IFLIB_INTR_RX:
|
|
|
|
q = &ctx->ifc_rxqs[qid];
|
|
|
|
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
|
|
|
|
gtask = &ctx->ifc_rxqs[qid].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_rx;
|
2017-09-16 02:41:38 +00:00
|
|
|
intr_fast = iflib_fast_intr;
|
2017-03-13 22:53:06 +00:00
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
|
|
|
break;
|
|
|
|
case IFLIB_INTR_RXTX:
|
|
|
|
q = &ctx->ifc_rxqs[qid];
|
|
|
|
info = &ctx->ifc_rxqs[qid].ifr_filter_info;
|
|
|
|
gtask = &ctx->ifc_rxqs[qid].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2017-03-13 22:53:06 +00:00
|
|
|
fn = _task_fn_rx;
|
|
|
|
intr_fast = iflib_fast_intr_rxtx;
|
2016-11-18 04:19:21 +00:00
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
case IFLIB_INTR_ADMIN:
|
|
|
|
q = ctx;
|
2016-11-18 04:19:21 +00:00
|
|
|
tqrid = -1;
|
2016-05-18 04:35:58 +00:00
|
|
|
info = &ctx->ifc_filter_info;
|
|
|
|
gtask = &ctx->ifc_admin_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_config_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_admin;
|
2017-03-13 22:53:06 +00:00
|
|
|
intr_fast = iflib_fast_intr_ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("unknown net intr type");
|
|
|
|
}
|
|
|
|
|
|
|
|
info->ifi_filter = filter;
|
|
|
|
info->ifi_filter_arg = filter_arg;
|
|
|
|
info->ifi_task = gtask;
|
2017-03-13 22:53:06 +00:00
|
|
|
info->ifi_ctx = q;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
dev = ctx->ifc_dev;
|
2017-03-13 22:53:06 +00:00
|
|
|
err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
|
2016-11-18 04:19:21 +00:00
|
|
|
if (err != 0) {
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
2016-11-18 04:19:21 +00:00
|
|
|
}
|
|
|
|
if (type == IFLIB_INTR_ADMIN)
|
|
|
|
return (0);
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
if (tqrid != -1) {
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
|
|
|
|
q, name);
|
2017-12-20 01:03:34 +00:00
|
|
|
if (err)
|
|
|
|
return (err);
|
2016-10-18 13:12:19 +00:00
|
|
|
} else {
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
|
2016-10-18 13:12:19 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-10-12 22:40:54 +00:00
|
|
|
iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
struct grouptask *gtask;
|
|
|
|
struct taskqgroup *tqg;
|
2016-08-12 21:29:44 +00:00
|
|
|
gtask_fn_t *fn;
|
2016-05-18 04:35:58 +00:00
|
|
|
void *q;
|
2017-12-20 01:03:34 +00:00
|
|
|
int err;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case IFLIB_INTR_TX:
|
|
|
|
q = &ctx->ifc_txqs[qid];
|
|
|
|
gtask = &ctx->ifc_txqs[qid].ift_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_tx;
|
|
|
|
break;
|
|
|
|
case IFLIB_INTR_RX:
|
|
|
|
q = &ctx->ifc_rxqs[qid];
|
|
|
|
gtask = &ctx->ifc_rxqs[qid].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_rx;
|
|
|
|
break;
|
|
|
|
case IFLIB_INTR_IOV:
|
|
|
|
q = ctx;
|
|
|
|
gtask = &ctx->ifc_vflr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_config_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
fn = _task_fn_iov;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("unknown net intr type");
|
|
|
|
}
|
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
if (irq != NULL) {
|
|
|
|
err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
|
|
|
|
q, name);
|
2017-12-20 01:03:34 +00:00
|
|
|
if (err)
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev,
|
|
|
|
irq->ii_res, name);
|
|
|
|
} else {
|
|
|
|
taskqgroup_attach(tqg, gtask, q, NULL, NULL, name);
|
2017-12-20 01:03:34 +00:00
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_irq_free(if_ctx_t ctx, if_irq_t irq)
|
|
|
|
{
|
2019-01-30 13:21:26 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
if (irq->ii_tag)
|
|
|
|
bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag);
|
|
|
|
|
|
|
|
if (irq->ii_res)
|
2019-01-30 13:21:26 +00:00
|
|
|
bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ,
|
|
|
|
rman_get_rid(irq->ii_res), irq->ii_res);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-05-29 21:56:39 +00:00
|
|
|
iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
|
|
|
if_irq_t irq = &ctx->ifc_legacy_irq;
|
|
|
|
iflib_filter_info_t info;
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
device_t dev;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct grouptask *gtask;
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
struct resource *res;
|
2016-05-18 04:35:58 +00:00
|
|
|
struct taskqgroup *tqg;
|
2016-08-12 21:29:44 +00:00
|
|
|
gtask_fn_t *fn;
|
2016-05-18 04:35:58 +00:00
|
|
|
int tqrid;
|
|
|
|
void *q;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
q = &ctx->ifc_rxqs[0];
|
|
|
|
info = &rxq[0].ifr_filter_info;
|
|
|
|
gtask = &rxq[0].ifr_task;
|
2017-09-16 02:41:38 +00:00
|
|
|
tqg = qgroup_if_io_tqg;
|
2016-05-18 04:35:58 +00:00
|
|
|
tqrid = irq->ii_rid = *rid;
|
|
|
|
fn = _task_fn_rx;
|
|
|
|
|
|
|
|
ctx->ifc_flags |= IFC_LEGACY;
|
|
|
|
info->ifi_filter = filter;
|
|
|
|
info->ifi_filter_arg = filter_arg;
|
|
|
|
info->ifi_task = gtask;
|
2017-01-15 00:50:10 +00:00
|
|
|
info->ifi_ctx = ctx;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
dev = ctx->ifc_dev;
|
2016-05-18 04:35:58 +00:00
|
|
|
/* We allocate a single interrupt resource */
|
2017-03-13 22:53:06 +00:00
|
|
|
if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
return (err);
|
|
|
|
GROUPTASK_INIT(gtask, 0, fn, q);
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
res = irq->ii_res;
|
|
|
|
taskqgroup_attach(tqg, gtask, q, dev, res, name);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
|
|
|
|
"tx");
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_led_create(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
ctx->ifc_led_dev = led_create(iflib_led_func, ctx,
|
2017-03-13 22:53:06 +00:00
|
|
|
device_get_nameunit(ctx->ifc_dev));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_tx_intr_deferred(if_ctx_t ctx, int txqid)
|
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
|
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_admin_intr_deferred(if_ctx_t ctx)
|
|
|
|
{
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
struct grouptask *gtask;
|
2018-10-23 17:06:36 +00:00
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
gtask = &ctx->ifc_admin_task;
|
2017-09-20 20:40:49 +00:00
|
|
|
MPASS(gtask != NULL && gtask->gt_taskqueue != NULL);
|
2017-01-02 00:56:33 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_iov_intr_deferred(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
|
|
|
|
{
|
|
|
|
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
|
|
|
|
name);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-05-03 17:02:31 +00:00
|
|
|
iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
|
|
|
|
const char *name)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
GROUPTASK_INIT(gtask, 0, fn, ctx);
|
Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying
a device interrupt. This approach doesn't work on all architectures
supported, as a single int isn't sufficient to globally specify a
device interrupt. In particular, with multiple interrupt controllers
in one system as found on e. g. arm and arm64 machines, an interrupt
number as returned by rman_get_start(9) may be only unique relative
to the bus and, thus, interrupt controller, a certain device hangs
off from.
In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to
the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}()
not work across architectures. Yet in turn, iflib(4) as gtaskqueue
consumer so far doesn't fit architectures where interrupt numbers
aren't globally unique.
However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as
employed by the gtaskqueue implementation to bind an interrupt to a
particular CPU, using bus_bind_intr(9) instead is equivalent from
a functional point of view, with bus_bind_intr(9) taking the device
and interrupt resource arguments required for uniquely specifying a
device interrupt.
Thus, change the gtaskqueue implementation to employ bus_bind_intr(9)
instead and intr_{g,s}etaffinity(9) to take the device and interrupt
resource arguments required respectively. This change also moves
struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps
struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL
as userland likes to include <sys/_task.h> or indirectly drags it
in - for better or worse also with _KERNEL defined -, which with
device_t and struct resource dependencies otherwise is no longer
as easily possible now.
The userland inclusion problem probably can be improved a bit by
introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the
existing _WANT_PRISON etc., which is orthogonal to this change,
though, and likely needs an exp-run.
While at it:
- Change the gt_cpu member in the grouptask structure to be of type
int as used elswhere for specifying CPUs (an int16_t may be too
narrow sooner or later),
- move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to
the gtaskqueue implementation as it's only used and needed there,
- change the GTASK_INIT macro to use "gtask" rather than "task" as
argument given that it actually operates on a struct gtask rather
than a struct task, and
- let subr_gtaskqueue.c consistently use __func__ to print functions
names.
Reported by: mmel
Reviewed by: mmel
Differential Revision: https://reviews.freebsd.org/D19139
2019-02-12 21:23:59 +00:00
|
|
|
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
|
|
|
|
name);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-08-12 21:29:44 +00:00
|
|
|
iflib_config_gtask_deinit(struct grouptask *gtask)
|
|
|
|
{
|
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
taskqgroup_detach(qgroup_if_config_tqg, gtask);
|
2016-08-12 21:29:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
|
|
|
if_t ifp = ctx->ifc_ifp;
|
|
|
|
iflib_txq_t txq = ctx->ifc_txqs;
|
|
|
|
|
|
|
|
if_setbaudrate(ifp, baudrate);
|
2018-04-12 14:35:37 +00:00
|
|
|
if (baudrate >= IF_Gbps(10)) {
|
|
|
|
STATE_LOCK(ctx);
|
2017-03-13 22:53:06 +00:00
|
|
|
ctx->ifc_flags |= IFC_PREFETCH;
|
2018-04-12 14:35:37 +00:00
|
|
|
STATE_UNLOCK(ctx);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
/* If link down, disable watchdog */
|
|
|
|
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
|
|
|
|
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
|
|
|
|
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
|
|
|
|
}
|
|
|
|
ctx->ifc_link_state = link_state;
|
|
|
|
if_link_state_change(ifp, link_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
|
|
|
|
{
|
|
|
|
int credits;
|
2017-01-02 00:56:33 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int credits_pre = txq->ift_cidx_processed;
|
2017-03-13 22:53:06 +00:00
|
|
|
#endif
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
if (ctx->isc_txd_credits_update == NULL)
|
|
|
|
return (0);
|
|
|
|
|
2019-01-16 05:44:14 +00:00
|
|
|
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
2017-03-13 22:53:06 +00:00
|
|
|
if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
|
2016-05-18 04:35:58 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
txq->ift_processed += credits;
|
|
|
|
txq->ift_cidx_processed += credits;
|
|
|
|
|
2017-01-02 00:56:33 +00:00
|
|
|
MPASS(credits_pre + credits == txq->ift_cidx_processed);
|
2016-05-18 04:35:58 +00:00
|
|
|
if (txq->ift_cidx_processed >= txq->ift_size)
|
|
|
|
txq->ift_cidx_processed -= txq->ift_size;
|
|
|
|
return (credits);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-03-13 22:53:06 +00:00
|
|
|
iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
|
2016-05-18 04:35:58 +00:00
|
|
|
{
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
iflib_fl_t fl;
|
|
|
|
u_int i;
|
2016-05-18 04:35:58 +00:00
|
|
|
|
Further correct and optimize the bus_dma(9) usage of iflib(4):
o Correct the obvious bugs in the netmap(4) parts:
- No longer check for the existence of DMA maps as bus_dma(9)
is used unconditionally in iflib(4) since r341095.
- Supply the correct DMA tag and map pairs to bus_dma(9)
functions (see also the commit message of r343753).
- In iflib_netmap_timer_adjust(), add synchronization of the
TX descriptors before calling the ift_txd_credits_update
method as the latter evaluates the TX descriptors possibly
updated by the MAC.
- In _task_fn_tx(), wrap the netmap(4)-specific bits in
#ifdef DEV_NETMAP just as done in _task_fn_admin() and
_task_fn_rx() respectively.
o In iflib_fast_intr_rxtx(), synchronize the TX rather than
the RX descriptors before calling the ift_txd_credits_update
method (see also above).
o There's no need to synchronize an RX buffer that is going to
be recycled in iflib_rxd_pkt_get(), yet; it's sufficient to
do that as late as passing RX buffers to the MAC via the
ift_rxd_refill method. Hence, combine that synchronization
with the synchronization of new buffers into a common spot
in _iflib_fl_refill().
o There's no need to synchronize the RX descriptors of a free
list in preparation of the MAC updating their statuses with
every invocation of rxd_frag_to_sd(); it's enough to do this
once before handing control over to the MAC, i. e. before
calling ift_rxd_flush method in _iflib_fl_refill(), which
already performs the necessary synchronization.
o Given that the ift_rxd_available method evaluates the RX
descriptors which possibly have been altered by the MAC,
synchronize as appropriate beforehand. Most notably this
is now done in iflib_rxd_avail(), which in turn means that
we don't need to issue the same synchronization yet again
before calling the ift_rxd_pkt_get method in iflib_rxeof().
o In iflib_txd_db_check(), synchronize the TX descriptors
before handing them over to the MAC for transmission via
the ift_txd_flush method.
o In iflib_encap(), move the TX buffer synchronization after
the invocation of the ift_txd_encap() method. If the MAC
driver fails to encapsulate the packet and we retry with
a defragmented mbuf chain or finally fail, the cycles for
TX buffer synchronization have been wasted. Synchronizing
afterwards matches what non-iflib(4) drivers typically do
and is sufficient as the MAC will not actually start with
the transmission before - in this case - the ift_txd_flush
method is called.
Moreover, for the latter reason the synchronization of the
TX descriptors in iflib_encap() can go as it's enough to
synchronize them before passing control over to the MAC by
issuing the ift_txd_flush() method (see above).
o In iflib_txq_can_drain(), only synchronize TX descriptors
if the ift_txd_credits_update method accessing these is
actually called.
Differential Revision: https://reviews.freebsd.org/D19081
2019-02-12 21:08:44 +00:00
|
|
|
for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++)
|
|
|
|
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2016-08-12 21:29:44 +00:00
|
|
|
return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
|
|
|
|
budget));
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
|
|
|
|
const char *description, if_int_delay_info_t info,
|
|
|
|
int offset, int value)
|
|
|
|
{
|
|
|
|
info->iidi_ctx = ctx;
|
|
|
|
info->iidi_offset = offset;
|
|
|
|
info->iidi_value = value;
|
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)),
|
|
|
|
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
|
|
|
|
info, 0, iflib_sysctl_int_delay, "I", description);
|
|
|
|
}
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
struct sx *
|
2016-05-18 04:35:58 +00:00
|
|
|
iflib_ctx_lock_get(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
2018-05-03 17:02:31 +00:00
|
|
|
return (&ctx->ifc_ctx_sx);
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_msix_init(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
device_t dev = ctx->ifc_dev;
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs;
|
|
|
|
int iflib_num_tx_queues, iflib_num_rx_queues;
|
|
|
|
int err, admincnt, bar;
|
|
|
|
|
2017-11-16 18:52:58 +00:00
|
|
|
iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
|
|
|
|
iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
|
|
|
|
|
2019-01-30 13:21:26 +00:00
|
|
|
if (bootverbose)
|
|
|
|
device_printf(dev, "msix_init qsets capped at %d\n",
|
|
|
|
imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets));
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
bar = ctx->ifc_softc_ctx.isc_msix_bar;
|
|
|
|
admincnt = sctx->isc_admin_intrcnt;
|
|
|
|
/* Override by tuneable */
|
2017-04-04 21:03:34 +00:00
|
|
|
if (scctx->isc_disable_msix)
|
2016-05-18 04:35:58 +00:00
|
|
|
goto msi;
|
|
|
|
|
2019-01-30 13:21:26 +00:00
|
|
|
/* First try MSI-X */
|
|
|
|
if ((msgs = pci_msix_count(dev)) == 0) {
|
|
|
|
if (bootverbose)
|
|
|
|
device_printf(dev, "MSI-X not supported or disabled\n");
|
|
|
|
goto msi;
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
/*
|
|
|
|
* bar == -1 => "trust me I know what I'm doing"
|
|
|
|
* Some drivers are for hardware that is so shoddily
|
|
|
|
* documented that no one knows which bars are which
|
|
|
|
* so the developer has to map all bars. This hack
|
2019-01-30 13:21:26 +00:00
|
|
|
* allows shoddy garbage to use MSI-X in this framework.
|
2016-05-18 04:35:58 +00:00
|
|
|
*/
|
|
|
|
if (bar != -1) {
|
|
|
|
ctx->ifc_msix_mem = bus_alloc_resource_any(dev,
|
|
|
|
SYS_RES_MEMORY, &bar, RF_ACTIVE);
|
|
|
|
if (ctx->ifc_msix_mem == NULL) {
|
2019-01-30 13:21:26 +00:00
|
|
|
device_printf(dev, "Unable to map MSI-X table\n");
|
2016-05-18 04:35:58 +00:00
|
|
|
goto msi;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#if IFLIB_DEBUG
|
|
|
|
/* use only 1 qset in debug mode */
|
|
|
|
queuemsgs = min(msgs - admincnt, 1);
|
|
|
|
#else
|
|
|
|
queuemsgs = msgs - admincnt;
|
|
|
|
#endif
|
|
|
|
#ifdef RSS
|
2017-11-29 18:14:57 +00:00
|
|
|
queues = imin(queuemsgs, rss_getnumbuckets());
|
2016-05-18 04:35:58 +00:00
|
|
|
#else
|
2017-11-29 18:14:57 +00:00
|
|
|
queues = queuemsgs;
|
2016-05-18 04:35:58 +00:00
|
|
|
#endif
|
2017-11-29 18:14:57 +00:00
|
|
|
queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues);
|
2019-01-30 13:21:26 +00:00
|
|
|
if (bootverbose)
|
|
|
|
device_printf(dev,
|
|
|
|
"intr CPUs: %d queue msgs: %d admincnt: %d\n",
|
|
|
|
CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt);
|
2016-05-18 04:35:58 +00:00
|
|
|
#ifdef RSS
|
|
|
|
/* If we're doing RSS, clamp at the number of RSS buckets */
|
|
|
|
if (queues > rss_getnumbuckets())
|
|
|
|
queues = rss_getnumbuckets();
|
|
|
|
#endif
|
2016-08-12 21:29:44 +00:00
|
|
|
if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
|
|
|
|
rx_queues = iflib_num_rx_queues;
|
2016-05-18 04:35:58 +00:00
|
|
|
else
|
|
|
|
rx_queues = queues;
|
2017-11-16 18:52:58 +00:00
|
|
|
|
|
|
|
if (rx_queues > scctx->isc_nrxqsets)
|
|
|
|
rx_queues = scctx->isc_nrxqsets;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
/*
|
|
|
|
* We want this to be all logical CPUs by default
|
|
|
|
*/
|
2016-05-18 04:35:58 +00:00
|
|
|
if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
|
|
|
|
tx_queues = iflib_num_tx_queues;
|
|
|
|
else
|
2016-08-12 21:29:44 +00:00
|
|
|
tx_queues = mp_ncpus;
|
|
|
|
|
2017-11-16 18:52:58 +00:00
|
|
|
if (tx_queues > scctx->isc_ntxqsets)
|
|
|
|
tx_queues = scctx->isc_ntxqsets;
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
if (ctx->ifc_sysctl_qs_eq_override == 0) {
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (tx_queues != rx_queues)
|
2018-10-12 22:40:54 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
|
|
|
|
min(rx_queues, tx_queues), min(rx_queues, tx_queues));
|
2016-08-12 21:29:44 +00:00
|
|
|
#endif
|
|
|
|
tx_queues = min(rx_queues, tx_queues);
|
|
|
|
rx_queues = min(rx_queues, tx_queues);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2019-01-30 13:21:26 +00:00
|
|
|
device_printf(dev, "Using %d rx queues %d tx queues\n",
|
|
|
|
rx_queues, tx_queues);
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2017-09-16 02:41:38 +00:00
|
|
|
vectors = rx_queues + admincnt;
|
2016-05-18 04:35:58 +00:00
|
|
|
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
|
2019-01-30 13:21:26 +00:00
|
|
|
device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
|
|
|
|
vectors);
|
2016-05-18 04:35:58 +00:00
|
|
|
scctx->isc_vectors = vectors;
|
|
|
|
scctx->isc_nrxqsets = rx_queues;
|
|
|
|
scctx->isc_ntxqsets = tx_queues;
|
|
|
|
scctx->isc_intr = IFLIB_INTR_MSIX;
|
2016-08-12 21:29:44 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
return (vectors);
|
|
|
|
} else {
|
2018-10-12 22:40:54 +00:00
|
|
|
device_printf(dev,
|
2019-01-30 13:21:26 +00:00
|
|
|
"failed to allocate %d MSI-X vectors, err: %d - using MSI\n",
|
|
|
|
vectors, err);
|
Assorted fixes to MSI-X/MSI/INTx setup in iflib(9):
- In iflib_msix_init(), VMMs with broken MSI-X activation are trying
to be worked around by manually enabling PCIM_MSIXCTRL_MSIX_ENABLE
before calling pci_alloc_msix(9). Apart from constituting a layering
violation, this has the problem of leaving PCIM_MSIXCTRL_MSIX_ENABLE
enabled when falling back to MSI or INTx when e. g. MSI-X is black-
listed and initially also when disabled via hw.pci.enable_msix. The
later in turn was incorrectly worked around in r325166.
Since r310806, pci(4) itself has code to deal with broken MSI-X
handling of VMMs, so all of these workarounds in iflib(9) can go,
fixing non-working interrupts when falling back to MSI/INTx. In
any case, possibly further adjustments to broken MSI-X activation
of VMMs like enabling r310806 by default in VM environments need to
be placed into pci(4), not iflib(9). [1]
- Also remove the pci_enable_busmaster(9) call from iflib_msix_init(),
which is already more properly invoked from iflib_device_attach().
- When falling back to MSI/INTx, release the MSI-X BAR resource again.
- When falling back to INTx, ensure scctx->isc_vectors is set to 1 and
not to something higher from a device with more than one MSI message
supported.
- Make the nearby ring_state(s) stuff (static) const.
Discussed with: jhb at BSDCan 2018 [1]
Reviewed by: imp, jhb
Differential Revision: https://reviews.freebsd.org/D15729
2018-06-17 20:33:02 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_MEMORY, bar,
|
|
|
|
ctx->ifc_msix_mem);
|
|
|
|
ctx->ifc_msix_mem = NULL;
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
msi:
|
|
|
|
vectors = pci_msi_count(dev);
|
|
|
|
scctx->isc_nrxqsets = 1;
|
|
|
|
scctx->isc_ntxqsets = 1;
|
|
|
|
scctx->isc_vectors = vectors;
|
|
|
|
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) {
|
|
|
|
device_printf(dev,"Using an MSI interrupt\n");
|
|
|
|
scctx->isc_intr = IFLIB_INTR_MSI;
|
|
|
|
} else {
|
Assorted fixes to MSI-X/MSI/INTx setup in iflib(9):
- In iflib_msix_init(), VMMs with broken MSI-X activation are trying
to be worked around by manually enabling PCIM_MSIXCTRL_MSIX_ENABLE
before calling pci_alloc_msix(9). Apart from constituting a layering
violation, this has the problem of leaving PCIM_MSIXCTRL_MSIX_ENABLE
enabled when falling back to MSI or INTx when e. g. MSI-X is black-
listed and initially also when disabled via hw.pci.enable_msix. The
later in turn was incorrectly worked around in r325166.
Since r310806, pci(4) itself has code to deal with broken MSI-X
handling of VMMs, so all of these workarounds in iflib(9) can go,
fixing non-working interrupts when falling back to MSI/INTx. In
any case, possibly further adjustments to broken MSI-X activation
of VMMs like enabling r310806 by default in VM environments need to
be placed into pci(4), not iflib(9). [1]
- Also remove the pci_enable_busmaster(9) call from iflib_msix_init(),
which is already more properly invoked from iflib_device_attach().
- When falling back to MSI/INTx, release the MSI-X BAR resource again.
- When falling back to INTx, ensure scctx->isc_vectors is set to 1 and
not to something higher from a device with more than one MSI message
supported.
- Make the nearby ring_state(s) stuff (static) const.
Discussed with: jhb at BSDCan 2018 [1]
Reviewed by: imp, jhb
Differential Revision: https://reviews.freebsd.org/D15729
2018-06-17 20:33:02 +00:00
|
|
|
scctx->isc_vectors = 1;
|
2016-05-18 04:35:58 +00:00
|
|
|
device_printf(dev,"Using a Legacy interrupt\n");
|
|
|
|
scctx->isc_intr = IFLIB_INTR_LEGACY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (vectors);
|
|
|
|
}
|
|
|
|
|
Assorted fixes to MSI-X/MSI/INTx setup in iflib(9):
- In iflib_msix_init(), VMMs with broken MSI-X activation are trying
to be worked around by manually enabling PCIM_MSIXCTRL_MSIX_ENABLE
before calling pci_alloc_msix(9). Apart from constituting a layering
violation, this has the problem of leaving PCIM_MSIXCTRL_MSIX_ENABLE
enabled when falling back to MSI or INTx when e. g. MSI-X is black-
listed and initially also when disabled via hw.pci.enable_msix. The
later in turn was incorrectly worked around in r325166.
Since r310806, pci(4) itself has code to deal with broken MSI-X
handling of VMMs, so all of these workarounds in iflib(9) can go,
fixing non-working interrupts when falling back to MSI/INTx. In
any case, possibly further adjustments to broken MSI-X activation
of VMMs like enabling r310806 by default in VM environments need to
be placed into pci(4), not iflib(9). [1]
- Also remove the pci_enable_busmaster(9) call from iflib_msix_init(),
which is already more properly invoked from iflib_device_attach().
- When falling back to MSI/INTx, release the MSI-X BAR resource again.
- When falling back to INTx, ensure scctx->isc_vectors is set to 1 and
not to something higher from a device with more than one MSI message
supported.
- Make the nearby ring_state(s) stuff (static) const.
Discussed with: jhb at BSDCan 2018 [1]
Reviewed by: imp, jhb
Differential Revision: https://reviews.freebsd.org/D15729
2018-06-17 20:33:02 +00:00
|
|
|
static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" };
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
uint16_t *state = ((uint16_t *)oidp->oid_arg1);
|
|
|
|
struct sbuf *sb;
|
Assorted fixes to MSI-X/MSI/INTx setup in iflib(9):
- In iflib_msix_init(), VMMs with broken MSI-X activation are trying
to be worked around by manually enabling PCIM_MSIXCTRL_MSIX_ENABLE
before calling pci_alloc_msix(9). Apart from constituting a layering
violation, this has the problem of leaving PCIM_MSIXCTRL_MSIX_ENABLE
enabled when falling back to MSI or INTx when e. g. MSI-X is black-
listed and initially also when disabled via hw.pci.enable_msix. The
later in turn was incorrectly worked around in r325166.
Since r310806, pci(4) itself has code to deal with broken MSI-X
handling of VMMs, so all of these workarounds in iflib(9) can go,
fixing non-working interrupts when falling back to MSI/INTx. In
any case, possibly further adjustments to broken MSI-X activation
of VMMs like enabling r310806 by default in VM environments need to
be placed into pci(4), not iflib(9). [1]
- Also remove the pci_enable_busmaster(9) call from iflib_msix_init(),
which is already more properly invoked from iflib_device_attach().
- When falling back to MSI/INTx, release the MSI-X BAR resource again.
- When falling back to INTx, ensure scctx->isc_vectors is set to 1 and
not to something higher from a device with more than one MSI message
supported.
- Make the nearby ring_state(s) stuff (static) const.
Discussed with: jhb at BSDCan 2018 [1]
Reviewed by: imp, jhb
Differential Revision: https://reviews.freebsd.org/D15729
2018-06-17 20:33:02 +00:00
|
|
|
const char *ring_state = "UNKNOWN";
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
/* XXX needed ? */
|
|
|
|
rc = sysctl_wire_old_buffer(req, 0);
|
|
|
|
MPASS(rc == 0);
|
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
|
|
|
sb = sbuf_new_for_sysctl(NULL, NULL, 80, req);
|
|
|
|
MPASS(sb != NULL);
|
|
|
|
if (sb == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
if (state[3] <= 3)
|
|
|
|
ring_state = ring_states[state[3]];
|
|
|
|
|
|
|
|
sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
|
|
|
|
state[0], state[1], state[2], ring_state);
|
|
|
|
rc = sbuf_finish(sb);
|
|
|
|
sbuf_delete(sb);
|
|
|
|
return(rc);
|
|
|
|
}
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
enum iflib_ndesc_handler {
|
|
|
|
IFLIB_NTXD_HANDLER,
|
|
|
|
IFLIB_NRXD_HANDLER,
|
|
|
|
};
|
2016-05-18 04:35:58 +00:00
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
static int
|
|
|
|
mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx = (void *)arg1;
|
|
|
|
enum iflib_ndesc_handler type = arg2;
|
|
|
|
char buf[256] = {0};
|
2017-03-13 22:53:06 +00:00
|
|
|
qidx_t *ndesc;
|
2016-08-12 21:29:44 +00:00
|
|
|
char *p, *next;
|
|
|
|
int nqs, rc, i;
|
|
|
|
|
|
|
|
MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
|
|
|
|
|
|
|
|
nqs = 8;
|
|
|
|
switch(type) {
|
|
|
|
case IFLIB_NTXD_HANDLER:
|
|
|
|
ndesc = ctx->ifc_sysctl_ntxds;
|
|
|
|
if (ctx->ifc_sctx)
|
|
|
|
nqs = ctx->ifc_sctx->isc_ntxqs;
|
|
|
|
break;
|
|
|
|
case IFLIB_NRXD_HANDLER:
|
|
|
|
ndesc = ctx->ifc_sysctl_nrxds;
|
|
|
|
if (ctx->ifc_sctx)
|
|
|
|
nqs = ctx->ifc_sctx->isc_nrxqs;
|
|
|
|
break;
|
2018-05-04 18:57:05 +00:00
|
|
|
default:
|
|
|
|
panic("unhandled type");
|
2016-08-12 21:29:44 +00:00
|
|
|
}
|
|
|
|
if (nqs == 0)
|
|
|
|
nqs = 8;
|
|
|
|
|
|
|
|
for (i=0; i<8; i++) {
|
|
|
|
if (i >= nqs)
|
|
|
|
break;
|
|
|
|
if (i)
|
|
|
|
strcat(buf, ",");
|
|
|
|
sprintf(strchr(buf, 0), "%d", ndesc[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
|
|
|
|
if (rc || req->newptr == NULL)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
|
|
|
|
i++, p = strsep(&next, " ,")) {
|
|
|
|
ndesc[i] = strtoul(p, NULL, 10);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(rc);
|
|
|
|
}
|
2016-05-18 04:35:58 +00:00
|
|
|
|
|
|
|
#define NAME_BUFLEN 32
|
|
|
|
static void
|
|
|
|
iflib_add_device_sysctl_pre(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
struct sysctl_oid_list *child, *oid_list;
|
|
|
|
struct sysctl_ctx_list *ctx_list;
|
|
|
|
struct sysctl_oid *node;
|
|
|
|
|
|
|
|
ctx_list = device_get_sysctl_ctx(dev);
|
|
|
|
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
|
|
|
|
ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib",
|
|
|
|
CTLFLAG_RD, NULL, "IFLIB fields");
|
|
|
|
oid_list = SYSCTL_CHILDREN(node);
|
|
|
|
|
2016-08-12 21:29:44 +00:00
|
|
|
SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
|
|
|
|
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
|
|
|
|
"driver version");
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
|
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
|
|
|
|
"# of txqs to use, 0 => use default #");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
|
2016-08-12 21:29:44 +00:00
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
|
|
|
|
"# of rxqs to use, 0 => use default #");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
|
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
|
|
|
|
"permit #txq != #rxq");
|
2017-09-23 01:37:01 +00:00
|
|
|
SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix",
|
2017-04-04 21:03:34 +00:00
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0,
|
2019-01-30 13:21:26 +00:00
|
|
|
"disable MSI-X (default 0)");
|
2017-09-23 01:37:01 +00:00
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget",
|
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0,
|
|
|
|
"set the rx budget");
|
2018-07-20 17:45:26 +00:00
|
|
|
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate",
|
|
|
|
CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0,
|
|
|
|
"cause tx to abdicate instead of running to completion");
|
2016-08-12 21:29:44 +00:00
|
|
|
|
|
|
|
/* XXX change for per-queue sizes */
|
|
|
|
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
|
|
|
|
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
|
|
|
|
mp_ndesc_handler, "A",
|
|
|
|
"list of # of tx descriptors to use, 0 = use default #");
|
|
|
|
SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
|
|
|
|
CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
|
|
|
|
mp_ndesc_handler, "A",
|
|
|
|
"list of # of rx descriptors to use, 0 = use default #");
|
2016-05-18 04:35:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_add_device_sysctl_post(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
if_shared_ctx_t sctx = ctx->ifc_sctx;
|
|
|
|
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
|
|
|
|
device_t dev = iflib_get_dev(ctx);
|
|
|
|
struct sysctl_oid_list *child;
|
|
|
|
struct sysctl_ctx_list *ctx_list;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
int i, j;
|
|
|
|
char namebuf[NAME_BUFLEN];
|
|
|
|
char *qfmt;
|
|
|
|
struct sysctl_oid *queue_node, *fl_node, *node;
|
|
|
|
struct sysctl_oid_list *queue_list, *fl_list;
|
|
|
|
ctx_list = device_get_sysctl_ctx(dev);
|
|
|
|
|
|
|
|
node = ctx->ifc_sysctl_node;
|
|
|
|
child = SYSCTL_CHILDREN(node);
|
|
|
|
|
|
|
|
if (scctx->isc_ntxqsets > 100)
|
|
|
|
qfmt = "txq%03d";
|
|
|
|
else if (scctx->isc_ntxqsets > 10)
|
|
|
|
qfmt = "txq%02d";
|
|
|
|
else
|
|
|
|
qfmt = "txq%d";
|
|
|
|
for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
|
|
|
|
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
|
|
|
|
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "Queue Name");
|
|
|
|
queue_list = SYSCTL_CHILDREN(queue_node);
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_dequeued, "total mbufs freed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_enqueued, "total mbufs enqueued");
|
|
|
|
#endif
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_mbuf_defrag, "# of times m_defrag was called");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_pullups, "# of times m_pullup was called");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
|
|
|
|
CTLFLAG_RD,
|
2016-08-12 21:29:44 +00:00
|
|
|
&txq->ift_no_desc_avail, "# of times no descriptors were available");
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_map_failed, "# of times dma map failed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_pidx, 1, "Producer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_cidx, 1, "Consumer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_in_use, 1, "descriptors in use");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_processed, "descriptors procesed for clean");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&txq->ift_cleaned, "total cleaned");
|
|
|
|
SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
|
2016-05-18 04:35:58 +00:00
|
|
|
0, mp_ring_state_handler, "A", "soft ring state");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->enqueues,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of enqueues to the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->drops,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of drops in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->starts,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of normal consumer starts in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->stalls,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of consumer stalls in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->restarts,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of consumer restarts in the mp_ring for this queue");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications",
|
2017-03-13 22:53:06 +00:00
|
|
|
CTLFLAG_RD, &txq->ift_br->abdications,
|
2016-05-18 04:35:58 +00:00
|
|
|
"# of consumer abdications in the mp_ring for this queue");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scctx->isc_nrxqsets > 100)
|
|
|
|
qfmt = "rxq%03d";
|
|
|
|
else if (scctx->isc_nrxqsets > 10)
|
|
|
|
qfmt = "rxq%02d";
|
|
|
|
else
|
|
|
|
qfmt = "rxq%d";
|
|
|
|
for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) {
|
|
|
|
snprintf(namebuf, NAME_BUFLEN, qfmt, i);
|
|
|
|
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "Queue Name");
|
|
|
|
queue_list = SYSCTL_CHILDREN(queue_node);
|
2016-08-12 21:29:44 +00:00
|
|
|
if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
|
2016-05-18 04:35:58 +00:00
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&rxq->ifr_cq_pidx, 1, "Producer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&rxq->ifr_cq_cidx, 1, "Consumer Index");
|
|
|
|
}
|
2016-11-18 04:19:21 +00:00
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
|
|
|
|
snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j);
|
|
|
|
fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "freelist Name");
|
|
|
|
fl_list = SYSCTL_CHILDREN(fl_node);
|
|
|
|
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_pidx, 1, "Producer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_cidx, 1, "Consumer Index");
|
|
|
|
SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_credits, 1, "credits available");
|
|
|
|
#if MEMORY_LOGGING
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_m_enqueued, "mbufs allocated");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_m_dequeued, "mbufs freed");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_cl_enqueued, "clusters allocated");
|
|
|
|
SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued",
|
|
|
|
CTLFLAG_RD,
|
|
|
|
&fl->ifl_cl_dequeued, "clusters freed");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2017-03-13 22:53:06 +00:00
|
|
|
|
2018-10-12 22:40:54 +00:00
|
|
|
void
|
|
|
|
iflib_request_reset(if_ctx_t ctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
STATE_LOCK(ctx);
|
|
|
|
ctx->ifc_flags |= IFC_DO_RESET;
|
|
|
|
STATE_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
2017-03-13 22:53:06 +00:00
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
static struct mbuf *
|
|
|
|
iflib_fixup_rx(struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct mbuf *n;
|
|
|
|
|
|
|
|
if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
|
|
|
|
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
|
|
|
|
m->m_data += ETHER_HDR_LEN;
|
|
|
|
n = m;
|
|
|
|
} else {
|
|
|
|
MGETHDR(n, M_NOWAIT, MT_DATA);
|
|
|
|
if (n == NULL) {
|
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
|
|
|
|
m->m_data += ETHER_HDR_LEN;
|
|
|
|
m->m_len -= ETHER_HDR_LEN;
|
|
|
|
n->m_len = ETHER_HDR_LEN;
|
|
|
|
M_MOVE_PKTHDR(n, m);
|
|
|
|
n->m_next = m;
|
|
|
|
}
|
|
|
|
return (n);
|
|
|
|
}
|
|
|
|
#endif
|
2018-05-06 00:57:52 +00:00
|
|
|
|
|
|
|
#ifdef NETDUMP
|
|
|
|
static void
|
|
|
|
iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
CTX_LOCK(ctx);
|
|
|
|
*nrxr = NRXQSETS(ctx);
|
|
|
|
*ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size;
|
|
|
|
*clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size;
|
|
|
|
CTX_UNLOCK(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
iflib_fl_t fl;
|
|
|
|
iflib_rxq_t rxq;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDUMP_START:
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++) {
|
|
|
|
rxq = &ctx->ifc_rxqs[i];
|
|
|
|
for (j = 0; j < rxq->ifr_nfl; j++) {
|
|
|
|
fl = rxq->ifr_fl;
|
|
|
|
fl->ifl_zone = m_getzone(fl->ifl_buf_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iflib_no_tx_batch = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
|
|
|
|
IFF_DRV_RUNNING)
|
|
|
|
return (EBUSY);
|
|
|
|
|
|
|
|
txq = &ctx->ifc_txqs[0];
|
|
|
|
error = iflib_encap(txq, &m);
|
|
|
|
if (error == 0)
|
|
|
|
(void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iflib_netdump_poll(struct ifnet *ifp, int count)
|
|
|
|
{
|
|
|
|
if_ctx_t ctx;
|
|
|
|
if_softc_ctx_t scctx;
|
|
|
|
iflib_txq_t txq;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ctx = if_getsoftc(ifp);
|
|
|
|
scctx = &ctx->ifc_softc_ctx;
|
|
|
|
|
|
|
|
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
|
|
|
|
IFF_DRV_RUNNING)
|
|
|
|
return (EBUSY);
|
|
|
|
|
|
|
|
txq = &ctx->ifc_txqs[0];
|
|
|
|
(void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
|
|
|
|
|
|
|
|
for (i = 0; i < scctx->isc_nrxqsets; i++)
|
|
|
|
(void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
#endif /* NETDUMP */
|