2011-02-18 08:00:26 +00:00
|
|
|
/*-
|
2017-11-27 14:52:40 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2011-02-18 08:00:26 +00:00
|
|
|
* Copyright (c) 2011 Chelsio Communications, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
* Written by: Navdeep Parhar <np@FreeBSD.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include "opt_inet.h"
|
2012-06-29 19:51:06 +00:00
|
|
|
#include "opt_inet6.h"
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
#include <sys/types.h>
|
2013-10-28 07:29:16 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2011-02-18 08:00:26 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/kernel.h>
|
2011-03-05 03:06:38 +00:00
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/queue.h>
|
2014-03-18 20:14:13 +00:00
|
|
|
#include <sys/sbuf.h>
|
2011-03-05 03:06:38 +00:00
|
|
|
#include <sys/taskqueue.h>
|
2013-08-29 06:26:22 +00:00
|
|
|
#include <sys/time.h>
|
2014-12-31 23:19:16 +00:00
|
|
|
#include <sys/sglist.h>
|
2011-02-18 08:00:26 +00:00
|
|
|
#include <sys/sysctl.h>
|
2011-12-16 02:09:51 +00:00
|
|
|
#include <sys/smp.h>
|
2014-07-23 22:29:22 +00:00
|
|
|
#include <sys/counter.h>
|
2011-02-18 08:00:26 +00:00
|
|
|
#include <net/bpf.h>
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_vlan_var.h>
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/ip.h>
|
2012-06-29 19:51:06 +00:00
|
|
|
#include <netinet/ip6.h>
|
2011-02-18 08:00:26 +00:00
|
|
|
#include <netinet/tcp.h>
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
#include <machine/in_cksum.h>
|
2013-10-07 22:30:03 +00:00
|
|
|
#include <machine/md_var.h>
|
2014-03-18 20:14:13 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
2014-05-27 18:18:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <sys/selinfo.h>
|
|
|
|
#include <net/if_var.h>
|
|
|
|
#include <net/netmap.h>
|
|
|
|
#include <dev/netmap/netmap_kern.h>
|
|
|
|
#endif
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
#include "common/common.h"
|
|
|
|
#include "common/t4_regs.h"
|
|
|
|
#include "common/t4_regs_values.h"
|
|
|
|
#include "common/t4_msg.h"
|
2016-07-05 01:29:24 +00:00
|
|
|
#include "t4_l2t.h"
|
2014-12-31 23:19:16 +00:00
|
|
|
#include "t4_mp_ring.h"
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
#ifdef T4_PKT_TIMESTAMP
|
|
|
|
#define RX_COPY_THRESHOLD (MINCLSIZE - 8)
|
|
|
|
#else
|
|
|
|
#define RX_COPY_THRESHOLD MINCLSIZE
|
|
|
|
#endif
|
|
|
|
|
2012-08-14 21:47:41 +00:00
|
|
|
/*
|
|
|
|
* Ethernet frames are DMA'd at this byte offset into the freelist buffer.
|
|
|
|
* 0-7 are valid values.
|
|
|
|
*/
|
2016-07-28 17:37:12 +00:00
|
|
|
static int fl_pktshift = 2;
|
2012-08-14 21:47:41 +00:00
|
|
|
TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2012-08-14 21:47:41 +00:00
|
|
|
/*
|
|
|
|
* Pad ethernet payload up to this boundary.
|
|
|
|
* -1: driver should figure out a good value.
|
2013-08-30 01:45:36 +00:00
|
|
|
* 0: disable padding.
|
|
|
|
* Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
|
2012-08-14 21:47:41 +00:00
|
|
|
*/
|
2014-05-27 18:18:41 +00:00
|
|
|
int fl_pad = -1;
|
2012-08-14 21:47:41 +00:00
|
|
|
TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Status page length.
|
|
|
|
* -1: driver should figure out a good value.
|
|
|
|
* 64 or 128 are the only other valid values.
|
|
|
|
*/
|
2016-07-28 17:37:12 +00:00
|
|
|
static int spg_len = -1;
|
2012-08-14 21:47:41 +00:00
|
|
|
TUNABLE_INT("hw.cxgbe.spg_len", &spg_len);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Congestion drops.
|
|
|
|
* -1: no congestion feedback (not recommended).
|
|
|
|
* 0: backpressure the channel instead of dropping packets right away.
|
|
|
|
* 1: no backpressure, drop packets for the congested queue immediately.
|
|
|
|
*/
|
|
|
|
static int cong_drop = 0;
|
|
|
|
TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2013-08-30 01:45:36 +00:00
|
|
|
/*
|
|
|
|
* Deliver multiple frames in the same free list buffer if they fit.
|
|
|
|
* -1: let the driver decide whether to enable buffer packing or not.
|
|
|
|
* 0: disable buffer packing.
|
|
|
|
* 1: enable buffer packing.
|
|
|
|
*/
|
|
|
|
static int buffer_packing = -1;
|
|
|
|
TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start next frame in a packed buffer at this boundary.
|
|
|
|
* -1: driver should figure out a good value.
|
2014-12-06 00:13:56 +00:00
|
|
|
* T4: driver will ignore this and use the same value as fl_pad above.
|
|
|
|
* T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
|
2013-08-30 01:45:36 +00:00
|
|
|
*/
|
|
|
|
static int fl_pack = -1;
|
|
|
|
TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack);
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* Allow the driver to create mbuf(s) in a cluster allocated for rx.
|
|
|
|
* 0: never; always allocate mbufs from the zone_mbuf UMA zone.
|
|
|
|
* 1: ok to create mbuf(s) within a cluster if there is room.
|
|
|
|
*/
|
|
|
|
static int allow_mbufs_in_cluster = 1;
|
|
|
|
TUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Largest rx cluster size that the driver is allowed to allocate.
|
|
|
|
*/
|
|
|
|
static int largest_rx_cluster = MJUM16BYTES;
|
|
|
|
TUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Size of cluster allocation that's most likely to succeed. The driver will
|
|
|
|
* fall back to this size if it fails to allocate clusters larger than this.
|
|
|
|
*/
|
|
|
|
static int safest_rx_cluster = PAGE_SIZE;
|
|
|
|
TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster);
|
|
|
|
|
2017-04-15 17:00:50 +00:00
|
|
|
/*
|
|
|
|
* The interrupt holdoff timers are multiplied by this value on T6+.
|
|
|
|
* 1 and 3-17 (both inclusive) are legal values.
|
|
|
|
*/
|
|
|
|
static int tscale = 1;
|
|
|
|
TUNABLE_INT("hw.cxgbe.tscale", &tscale);
|
|
|
|
|
2017-04-17 09:00:20 +00:00
|
|
|
/*
|
|
|
|
* Number of LRO entries in the lro_ctrl structure per rx queue.
|
|
|
|
*/
|
|
|
|
static int lro_entries = TCP_LRO_ENTRIES;
|
|
|
|
TUNABLE_INT("hw.cxgbe.lro_entries", &lro_entries);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This enables presorting of frames before they're fed into tcp_lro_rx.
|
|
|
|
*/
|
|
|
|
static int lro_mbufs = 0;
|
|
|
|
TUNABLE_INT("hw.cxgbe.lro_mbufs", &lro_mbufs);
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
struct txpkts {
|
2014-12-31 23:19:16 +00:00
|
|
|
u_int wr_type; /* type 0 or type 1 */
|
|
|
|
u_int npkt; /* # of packets in this work request */
|
|
|
|
u_int plen; /* total payload (sum of all packets) */
|
|
|
|
u_int len16; /* # of 16B pieces used by this work request */
|
2011-02-18 08:00:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* A packet's SGL. This + m_pkthdr has all info needed for tx */
|
|
|
|
struct sgl {
|
2014-12-31 23:19:16 +00:00
|
|
|
struct sglist sg;
|
|
|
|
struct sglist_seg seg[TX_SGL_SEGS];
|
2011-02-18 08:00:26 +00:00
|
|
|
};
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
static int service_iq(struct sge_iq *, int);
|
2014-08-02 06:55:36 +00:00
|
|
|
static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t);
|
2011-12-16 02:09:51 +00:00
|
|
|
static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *);
|
2014-08-02 00:56:34 +00:00
|
|
|
static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int);
|
2014-12-06 00:13:56 +00:00
|
|
|
static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *);
|
2016-03-08 00:23:56 +00:00
|
|
|
static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t,
|
|
|
|
uint16_t, char *);
|
2011-02-18 08:00:26 +00:00
|
|
|
static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
|
|
|
|
bus_addr_t *, void **);
|
|
|
|
static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
|
|
|
|
void *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *,
|
2011-05-18 22:09:04 +00:00
|
|
|
int, int);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *);
|
2017-12-29 02:30:21 +00:00
|
|
|
static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
|
|
|
|
struct sge_iq *);
|
2016-09-23 20:03:28 +00:00
|
|
|
static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *,
|
|
|
|
struct sysctl_oid *, struct sge_fl *);
|
2011-12-16 02:09:51 +00:00
|
|
|
static int alloc_fwq(struct adapter *);
|
|
|
|
static int free_fwq(struct adapter *);
|
|
|
|
static int alloc_mgmtq(struct adapter *);
|
|
|
|
static int free_mgmtq(struct adapter *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int free_rxq(struct vi_info *, struct sge_rxq *);
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2015-12-03 00:02:01 +00:00
|
|
|
static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
|
2011-12-16 02:09:51 +00:00
|
|
|
#endif
|
2014-05-27 18:18:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
2015-12-03 00:02:01 +00:00
|
|
|
static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int,
|
2014-05-27 18:18:41 +00:00
|
|
|
struct sysctl_oid *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *);
|
|
|
|
static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int,
|
2014-05-27 18:18:41 +00:00
|
|
|
struct sysctl_oid *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int free_nm_txq(struct vi_info *, struct sge_nm_txq *);
|
2014-05-27 18:18:41 +00:00
|
|
|
#endif
|
2011-12-16 02:09:51 +00:00
|
|
|
static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2015-12-03 00:02:01 +00:00
|
|
|
static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
|
2011-12-16 02:09:51 +00:00
|
|
|
#endif
|
2015-12-03 00:02:01 +00:00
|
|
|
static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *);
|
2011-12-16 02:09:51 +00:00
|
|
|
static int free_eq(struct adapter *, struct sge_eq *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid *);
|
|
|
|
static int free_wrq(struct adapter *, struct sge_wrq *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int alloc_txq(struct vi_info *, struct sge_txq *, int,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid *);
|
2015-12-03 00:02:01 +00:00
|
|
|
static int free_txq(struct vi_info *, struct sge_txq *);
|
2011-02-18 08:00:26 +00:00
|
|
|
static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
|
|
|
|
static inline void ring_fl_db(struct adapter *, struct sge_fl *);
|
2011-12-16 02:09:51 +00:00
|
|
|
static int refill_fl(struct adapter *, struct sge_fl *, int);
|
|
|
|
static void refill_sfl(void *);
|
2011-02-18 08:00:26 +00:00
|
|
|
static int alloc_fl_sdesc(struct sge_fl *);
|
2013-08-30 01:45:36 +00:00
|
|
|
static void free_fl_sdesc(struct adapter *, struct sge_fl *);
|
2014-03-18 20:14:13 +00:00
|
|
|
static void find_best_refill_source(struct adapter *, struct sge_fl *, int);
|
|
|
|
static void find_safe_refill_source(struct adapter *, struct sge_fl *);
|
2011-12-16 02:09:51 +00:00
|
|
|
static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline void get_pkt_gl(struct mbuf *, struct sglist *);
|
|
|
|
static inline u_int txpkt_len16(u_int, u_int);
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
static inline u_int txpkt_vm_len16(u_int, u_int);
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline u_int txpkts0_len16(u_int);
|
|
|
|
static inline u_int txpkts1_len16(void);
|
|
|
|
static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *,
|
|
|
|
struct mbuf *, u_int);
|
2016-09-11 16:11:51 +00:00
|
|
|
static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *,
|
|
|
|
struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int);
|
2014-12-31 23:19:16 +00:00
|
|
|
static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int);
|
|
|
|
static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int);
|
|
|
|
static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *,
|
|
|
|
struct mbuf *, const struct txpkts *, u_int);
|
|
|
|
static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int);
|
2011-02-18 08:00:26 +00:00
|
|
|
static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int);
|
|
|
|
static inline uint16_t read_hw_cidx(struct sge_eq *);
|
|
|
|
static inline u_int reclaimable_tx_desc(struct sge_eq *);
|
|
|
|
static inline u_int total_available_tx_desc(struct sge_eq *);
|
|
|
|
static u_int reclaim_tx_descs(struct sge_txq *, u_int);
|
|
|
|
static void tx_reclaim(void *, int);
|
|
|
|
static __be64 get_flit(struct sglist_seg *, int, int);
|
2011-12-16 02:09:51 +00:00
|
|
|
static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
|
|
|
|
struct mbuf *);
|
2012-08-16 18:31:50 +00:00
|
|
|
static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct mbuf *);
|
2016-07-22 21:52:07 +00:00
|
|
|
static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *);
|
2014-12-31 23:19:16 +00:00
|
|
|
static void wrq_tx_drain(void *, int);
|
|
|
|
static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-05-30 21:34:44 +00:00
|
|
|
static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
|
2014-03-18 20:14:13 +00:00
|
|
|
static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
|
2016-06-08 14:15:29 +00:00
|
|
|
static int sysctl_tc(SYSCTL_HANDLER_ARGS);
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2014-07-23 22:29:22 +00:00
|
|
|
static counter_u64_t extfree_refs;
|
|
|
|
static counter_u64_t extfree_rels;
|
|
|
|
|
2016-07-05 01:29:24 +00:00
|
|
|
an_handler_t t4_an_handler;
|
|
|
|
fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES];
|
|
|
|
cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS];
|
2018-04-30 15:18:38 +00:00
|
|
|
cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES];
|
|
|
|
cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
|
2018-04-30 21:47:30 +00:00
|
|
|
cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
|
cxgbe(4): Add support for hash filters.
These filters reside in the card's memory instead of its TCAM and can be
configured via a new "hashfilter" subcommand in cxgbetool. Hash and
normal TCAM filters can be used together. The hardware does an
exact-match of packet fields for hash filters, unlike the masked match
performed for TCAM filters. Any T5/T6 card with memory can support at
least half a million hash filters. The sample config file with the
driver configures 512K of these, it is possible to double this to 1
million+ in some cases.
The chip does an exact-match of fields of incoming datagrams with hash
filters and performs the action configured for the filter if it matches.
The fields to match are specified in a "filter mask" in the firmware
config file. The filter mask always includes the 5-tuple (sip, dip,
sport, dport, ipproto). It can, optionally, also include any subset of
the filter mode (see filterMode and filterMask in the firmware config
file).
For example:
filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
filterMask = protocol, port, vlan
Exact values of the 5-tuple, the physical port, and VLAN tag would have
to be provided while setting up a hash filter with the chip
configuration above.
Hash filters support all actions supported by TCAM filters. A packet
that hits a hash filter can be dropped, let through (with optional
steering to a specific queue or RSS region), switched out of another
port (with optional L2 rewrite of DMAC, SMAC, VLAN tag), or get NAT'ed.
(Support for some of these will show up in the driver in a follow-up
commit very shortly).
Sponsored by: Chelsio Communications
2018-05-09 04:09:49 +00:00
|
|
|
cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
void
|
|
|
|
t4_register_an_handler(an_handler_t h)
|
2016-07-05 01:29:24 +00:00
|
|
|
{
|
2018-04-30 15:18:38 +00:00
|
|
|
uintptr_t *loc;
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
MPASS(h == NULL || t4_an_handler == NULL);
|
|
|
|
|
|
|
|
loc = (uintptr_t *)&t4_an_handler;
|
|
|
|
atomic_store_rel_ptr(loc, (uintptr_t)h);
|
2016-07-05 01:29:24 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
void
|
|
|
|
t4_register_fw_msg_handler(int type, fw_msg_handler_t h)
|
2016-07-05 01:29:24 +00:00
|
|
|
{
|
2018-04-30 15:18:38 +00:00
|
|
|
uintptr_t *loc;
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
MPASS(type < nitems(t4_fw_msg_handler));
|
|
|
|
MPASS(h == NULL || t4_fw_msg_handler[type] == NULL);
|
|
|
|
/*
|
|
|
|
* These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
|
|
|
|
* handler dispatch table. Reject any attempt to install a handler for
|
|
|
|
* this subtype.
|
|
|
|
*/
|
|
|
|
MPASS(type != FW_TYPE_RSSCPL);
|
|
|
|
MPASS(type != FW6_TYPE_RSSCPL);
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
loc = (uintptr_t *)&t4_fw_msg_handler[type];
|
|
|
|
atomic_store_rel_ptr(loc, (uintptr_t)h);
|
2016-07-05 01:29:24 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
void
|
|
|
|
t4_register_cpl_handler(int opcode, cpl_handler_t h)
|
2016-07-05 01:29:24 +00:00
|
|
|
{
|
2018-04-30 15:18:38 +00:00
|
|
|
uintptr_t *loc;
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
MPASS(opcode < nitems(t4_cpl_handler));
|
|
|
|
MPASS(h == NULL || t4_cpl_handler[opcode] == NULL);
|
|
|
|
|
|
|
|
loc = (uintptr_t *)&t4_cpl_handler[opcode];
|
|
|
|
atomic_store_rel_ptr(loc, (uintptr_t)h);
|
2016-07-05 01:29:24 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
static int
|
|
|
|
set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
|
|
|
|
struct mbuf *m)
|
2016-07-05 01:29:24 +00:00
|
|
|
{
|
2018-04-30 15:18:38 +00:00
|
|
|
const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
|
|
|
|
u_int tid;
|
|
|
|
int cookie;
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
MPASS(m == NULL);
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
tid = GET_TID(cpl);
|
|
|
|
if (is_ftid(iq->adapter, tid)) {
|
|
|
|
/*
|
|
|
|
* The return code for filter-write is put in the CPL cookie so
|
|
|
|
* we have to rely on the hardware tid (is_ftid) to determine
|
|
|
|
* that this is a response to a filter.
|
|
|
|
*/
|
|
|
|
cookie = CPL_COOKIE_FILTER;
|
|
|
|
} else {
|
|
|
|
cookie = G_COOKIE(cpl->cookie);
|
|
|
|
}
|
|
|
|
MPASS(cookie > CPL_COOKIE_RESERVED);
|
|
|
|
MPASS(cookie < nitems(set_tcb_rpl_handlers));
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
return (set_tcb_rpl_handlers[cookie](iq, rss, m));
|
2016-07-05 01:29:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-04-30 15:18:38 +00:00
|
|
|
l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
|
|
|
|
struct mbuf *m)
|
2016-07-05 01:29:24 +00:00
|
|
|
{
|
2018-04-30 15:18:38 +00:00
|
|
|
const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
|
|
|
|
unsigned int cookie;
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
MPASS(m == NULL);
|
|
|
|
|
|
|
|
cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER;
|
|
|
|
return (l2t_write_rpl_handlers[cookie](iq, rss, m));
|
2016-07-05 01:29:24 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 21:47:30 +00:00
|
|
|
static int
|
|
|
|
act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
|
|
|
|
struct mbuf *m)
|
|
|
|
{
|
|
|
|
const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
|
|
|
|
u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status)));
|
|
|
|
|
|
|
|
MPASS(m == NULL);
|
|
|
|
MPASS(cookie != CPL_COOKIE_RESERVED);
|
|
|
|
|
|
|
|
return (act_open_rpl_handlers[cookie](iq, rss, m));
|
|
|
|
}
|
|
|
|
|
cxgbe(4): Add support for hash filters.
These filters reside in the card's memory instead of its TCAM and can be
configured via a new "hashfilter" subcommand in cxgbetool. Hash and
normal TCAM filters can be used together. The hardware does an
exact-match of packet fields for hash filters, unlike the masked match
performed for TCAM filters. Any T5/T6 card with memory can support at
least half a million hash filters. The sample config file with the
driver configures 512K of these, it is possible to double this to 1
million+ in some cases.
The chip does an exact-match of fields of incoming datagrams with hash
filters and performs the action configured for the filter if it matches.
The fields to match are specified in a "filter mask" in the firmware
config file. The filter mask always includes the 5-tuple (sip, dip,
sport, dport, ipproto). It can, optionally, also include any subset of
the filter mode (see filterMode and filterMask in the firmware config
file).
For example:
filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
filterMask = protocol, port, vlan
Exact values of the 5-tuple, the physical port, and VLAN tag would have
to be provided while setting up a hash filter with the chip
configuration above.
Hash filters support all actions supported by TCAM filters. A packet
that hits a hash filter can be dropped, let through (with optional
steering to a specific queue or RSS region), switched out of another
port (with optional L2 rewrite of DMAC, SMAC, VLAN tag), or get NAT'ed.
(Support for some of these will show up in the driver in a follow-up
commit very shortly).
Sponsored by: Chelsio Communications
2018-05-09 04:09:49 +00:00
|
|
|
static int
|
|
|
|
abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss,
|
|
|
|
struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct adapter *sc = iq->adapter;
|
|
|
|
u_int cookie;
|
|
|
|
|
|
|
|
MPASS(m == NULL);
|
|
|
|
if (is_hashfilter(sc))
|
|
|
|
cookie = CPL_COOKIE_HASHFILTER;
|
|
|
|
else
|
|
|
|
cookie = CPL_COOKIE_TOM;
|
|
|
|
|
|
|
|
return (abort_rpl_rss_handlers[cookie](iq, rss, m));
|
|
|
|
}
|
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
static void
|
|
|
|
t4_init_shared_cpl_handlers(void)
|
2016-07-05 01:29:24 +00:00
|
|
|
{
|
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler);
|
|
|
|
t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler);
|
2018-04-30 21:47:30 +00:00
|
|
|
t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
|
cxgbe(4): Add support for hash filters.
These filters reside in the card's memory instead of its TCAM and can be
configured via a new "hashfilter" subcommand in cxgbetool. Hash and
normal TCAM filters can be used together. The hardware does an
exact-match of packet fields for hash filters, unlike the masked match
performed for TCAM filters. Any T5/T6 card with memory can support at
least half a million hash filters. The sample config file with the
driver configures 512K of these, it is possible to double this to 1
million+ in some cases.
The chip does an exact-match of fields of incoming datagrams with hash
filters and performs the action configured for the filter if it matches.
The fields to match are specified in a "filter mask" in the firmware
config file. The filter mask always includes the 5-tuple (sip, dip,
sport, dport, ipproto). It can, optionally, also include any subset of
the filter mode (see filterMode and filterMask in the firmware config
file).
For example:
filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
filterMask = protocol, port, vlan
Exact values of the 5-tuple, the physical port, and VLAN tag would have
to be provided while setting up a hash filter with the chip
configuration above.
Hash filters support all actions supported by TCAM filters. A packet
that hits a hash filter can be dropped, let through (with optional
steering to a specific queue or RSS region), switched out of another
port (with optional L2 rewrite of DMAC, SMAC, VLAN tag), or get NAT'ed.
(Support for some of these will show up in the driver in a follow-up
commit very shortly).
Sponsored by: Chelsio Communications
2018-05-09 04:09:49 +00:00
|
|
|
t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
|
2018-04-30 15:18:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
|
|
|
|
{
|
|
|
|
uintptr_t *loc;
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
MPASS(opcode < nitems(t4_cpl_handler));
|
|
|
|
MPASS(cookie > CPL_COOKIE_RESERVED);
|
|
|
|
MPASS(cookie < NUM_CPL_COOKIES);
|
|
|
|
MPASS(t4_cpl_handler[opcode] != NULL);
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
switch (opcode) {
|
|
|
|
case CPL_SET_TCB_RPL:
|
|
|
|
loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie];
|
|
|
|
break;
|
|
|
|
case CPL_L2T_WRITE_RPL:
|
|
|
|
loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie];
|
|
|
|
break;
|
2018-04-30 21:47:30 +00:00
|
|
|
case CPL_ACT_OPEN_RPL:
|
|
|
|
loc = (uintptr_t *)&act_open_rpl_handlers[cookie];
|
|
|
|
break;
|
cxgbe(4): Add support for hash filters.
These filters reside in the card's memory instead of its TCAM and can be
configured via a new "hashfilter" subcommand in cxgbetool. Hash and
normal TCAM filters can be used together. The hardware does an
exact-match of packet fields for hash filters, unlike the masked match
performed for TCAM filters. Any T5/T6 card with memory can support at
least half a million hash filters. The sample config file with the
driver configures 512K of these, it is possible to double this to 1
million+ in some cases.
The chip does an exact-match of fields of incoming datagrams with hash
filters and performs the action configured for the filter if it matches.
The fields to match are specified in a "filter mask" in the firmware
config file. The filter mask always includes the 5-tuple (sip, dip,
sport, dport, ipproto). It can, optionally, also include any subset of
the filter mode (see filterMode and filterMask in the firmware config
file).
For example:
filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
filterMask = protocol, port, vlan
Exact values of the 5-tuple, the physical port, and VLAN tag would have
to be provided while setting up a hash filter with the chip
configuration above.
Hash filters support all actions supported by TCAM filters. A packet
that hits a hash filter can be dropped, let through (with optional
steering to a specific queue or RSS region), switched out of another
port (with optional L2 rewrite of DMAC, SMAC, VLAN tag), or get NAT'ed.
(Support for some of these will show up in the driver in a follow-up
commit very shortly).
Sponsored by: Chelsio Communications
2018-05-09 04:09:49 +00:00
|
|
|
case CPL_ABORT_RPL_RSS:
|
|
|
|
loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie];
|
|
|
|
break;
|
2018-04-30 15:18:38 +00:00
|
|
|
default:
|
|
|
|
MPASS(0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
MPASS(h == NULL || *loc == (uintptr_t)NULL);
|
|
|
|
atomic_store_rel_ptr(loc, (uintptr_t)h);
|
2016-07-05 01:29:24 +00:00
|
|
|
}
|
|
|
|
|
2011-03-08 03:04:07 +00:00
|
|
|
/*
|
2013-08-30 01:45:36 +00:00
|
|
|
* Called on MOD_LOAD. Validates and calculates the SGE tunables.
|
2011-03-08 03:04:07 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
t4_sge_modload(void)
|
|
|
|
{
|
2012-06-23 22:12:27 +00:00
|
|
|
|
2012-08-14 21:47:41 +00:00
|
|
|
if (fl_pktshift < 0 || fl_pktshift > 7) {
|
|
|
|
printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
|
|
|
|
" using 2 instead.\n", fl_pktshift);
|
|
|
|
fl_pktshift = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spg_len != 64 && spg_len != 128) {
|
|
|
|
int len;
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__amd64__)
|
|
|
|
len = cpu_clflush_line_size > 64 ? 128 : 64;
|
|
|
|
#else
|
|
|
|
len = 64;
|
|
|
|
#endif
|
|
|
|
if (spg_len != -1) {
|
|
|
|
printf("Invalid hw.cxgbe.spg_len value (%d),"
|
|
|
|
" using %d instead.\n", spg_len, len);
|
|
|
|
}
|
|
|
|
spg_len = len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cong_drop < -1 || cong_drop > 1) {
|
|
|
|
printf("Invalid hw.cxgbe.cong_drop value (%d),"
|
|
|
|
" using 0 instead.\n", cong_drop);
|
|
|
|
cong_drop = 0;
|
|
|
|
}
|
2014-07-23 22:29:22 +00:00
|
|
|
|
2017-04-15 17:00:50 +00:00
|
|
|
if (tscale != 1 && (tscale < 3 || tscale > 17)) {
|
|
|
|
printf("Invalid hw.cxgbe.tscale value (%d),"
|
|
|
|
" using 1 instead.\n", tscale);
|
|
|
|
tscale = 1;
|
|
|
|
}
|
|
|
|
|
2014-07-23 22:29:22 +00:00
|
|
|
extfree_refs = counter_u64_alloc(M_WAITOK);
|
|
|
|
extfree_rels = counter_u64_alloc(M_WAITOK);
|
|
|
|
counter_u64_zero(extfree_refs);
|
|
|
|
counter_u64_zero(extfree_rels);
|
2016-07-05 01:29:24 +00:00
|
|
|
|
2018-04-30 15:18:38 +00:00
|
|
|
t4_init_shared_cpl_handlers();
|
2016-07-05 01:29:24 +00:00
|
|
|
t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg);
|
|
|
|
t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg);
|
|
|
|
t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
|
|
|
|
t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx);
|
|
|
|
t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
|
2016-07-22 21:52:07 +00:00
|
|
|
t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl);
|
2014-07-23 22:29:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
t4_sge_modunload(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
counter_u64_free(extfree_refs);
|
|
|
|
counter_u64_free(extfree_rels);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
t4_sge_extfree_refs(void)
|
|
|
|
{
|
|
|
|
uint64_t refs, rels;
|
|
|
|
|
|
|
|
rels = counter_u64_fetch(extfree_rels);
|
|
|
|
refs = counter_u64_fetch(extfree_refs);
|
|
|
|
|
|
|
|
return (refs - rels);
|
2011-03-08 03:04:07 +00:00
|
|
|
}
|
|
|
|
|
2014-12-06 00:13:56 +00:00
|
|
|
static inline void
|
|
|
|
setup_pad_and_pack_boundaries(struct adapter *sc)
|
|
|
|
{
|
|
|
|
uint32_t v, m;
|
2016-09-11 17:22:54 +00:00
|
|
|
int pad, pack, pad_shift;
|
2014-12-06 00:13:56 +00:00
|
|
|
|
2016-09-11 17:22:54 +00:00
|
|
|
pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT :
|
|
|
|
X_INGPADBOUNDARY_SHIFT;
|
2014-12-06 00:13:56 +00:00
|
|
|
pad = fl_pad;
|
2016-09-11 17:22:54 +00:00
|
|
|
if (fl_pad < (1 << pad_shift) ||
|
|
|
|
fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) ||
|
|
|
|
!powerof2(fl_pad)) {
|
2014-12-06 00:13:56 +00:00
|
|
|
/*
|
|
|
|
* If there is any chance that we might use buffer packing and
|
|
|
|
* the chip is a T4, then pick 64 as the pad/pack boundary. Set
|
2016-09-11 17:22:54 +00:00
|
|
|
* it to the minimum allowed in all other cases.
|
2014-12-06 00:13:56 +00:00
|
|
|
*/
|
2016-09-11 17:22:54 +00:00
|
|
|
pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift;
|
2014-12-06 00:13:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For fl_pad = 0 we'll still write a reasonable value to the
|
|
|
|
* register but all the freelists will opt out of padding.
|
|
|
|
* We'll complain here only if the user tried to set it to a
|
|
|
|
* value greater than 0 that was invalid.
|
|
|
|
*/
|
|
|
|
if (fl_pad > 0) {
|
|
|
|
device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value"
|
|
|
|
" (%d), using %d instead.\n", fl_pad, pad);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m = V_INGPADBOUNDARY(M_INGPADBOUNDARY);
|
2016-09-11 17:22:54 +00:00
|
|
|
v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift);
|
2014-12-06 00:13:56 +00:00
|
|
|
t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
|
|
|
|
|
|
|
|
if (is_t4(sc)) {
|
|
|
|
if (fl_pack != -1 && fl_pack != pad) {
|
|
|
|
/* Complain but carry on. */
|
|
|
|
device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored,"
|
|
|
|
" using %d instead.\n", fl_pack, pad);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pack = fl_pack;
|
|
|
|
if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 ||
|
|
|
|
!powerof2(fl_pack)) {
|
|
|
|
pack = max(sc->params.pci.mps, CACHE_LINE_SIZE);
|
|
|
|
MPASS(powerof2(pack));
|
|
|
|
if (pack < 16)
|
|
|
|
pack = 16;
|
|
|
|
if (pack == 32)
|
|
|
|
pack = 64;
|
|
|
|
if (pack > 4096)
|
|
|
|
pack = 4096;
|
|
|
|
if (fl_pack != -1) {
|
|
|
|
device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value"
|
|
|
|
" (%d), using %d instead.\n", fl_pack, pack);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
|
|
|
|
if (pack == 16)
|
|
|
|
v = V_INGPACKBOUNDARY(0);
|
|
|
|
else
|
|
|
|
v = V_INGPACKBOUNDARY(ilog2(pack) - 5);
|
|
|
|
|
|
|
|
MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */
|
|
|
|
t4_set_reg_field(sc, A_SGE_CONTROL2, m, v);
|
|
|
|
}
|
|
|
|
|
2013-04-11 22:46:39 +00:00
|
|
|
/*
|
|
|
|
* adap->params.vpd.cclk must be set up before this is called.
|
|
|
|
*/
|
2013-03-30 02:26:20 +00:00
|
|
|
void
|
|
|
|
t4_tweak_chip_settings(struct adapter *sc)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint32_t v, m;
|
|
|
|
int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
|
2013-04-11 22:46:39 +00:00
|
|
|
int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
|
2013-03-30 02:26:20 +00:00
|
|
|
int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
|
|
|
|
uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
|
2014-03-18 20:14:13 +00:00
|
|
|
static int sge_flbuf_sizes[] = {
|
2013-08-30 01:45:36 +00:00
|
|
|
MCLBYTES,
|
|
|
|
#if MJUMPAGESIZE != MCLBYTES
|
|
|
|
MJUMPAGESIZE,
|
2014-03-18 20:14:13 +00:00
|
|
|
MJUMPAGESIZE - CL_METADATA_SIZE,
|
|
|
|
MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE,
|
2013-08-30 01:45:36 +00:00
|
|
|
#endif
|
|
|
|
MJUM9BYTES,
|
|
|
|
MJUM16BYTES,
|
2014-03-18 20:14:13 +00:00
|
|
|
MCLBYTES - MSIZE - CL_METADATA_SIZE,
|
|
|
|
MJUM9BYTES - CL_METADATA_SIZE,
|
|
|
|
MJUM16BYTES - CL_METADATA_SIZE,
|
2013-08-30 01:45:36 +00:00
|
|
|
};
|
2013-03-30 02:26:20 +00:00
|
|
|
|
|
|
|
KASSERT(sc->flags & MASTER_PF,
|
|
|
|
("%s: trying to change chip settings when not master.", __func__));
|
|
|
|
|
2013-08-30 01:45:36 +00:00
|
|
|
m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
|
2013-03-30 02:26:20 +00:00
|
|
|
v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
|
2012-06-23 22:12:27 +00:00
|
|
|
V_EGRSTATUSPAGESIZE(spg_len == 128);
|
2013-03-30 02:26:20 +00:00
|
|
|
t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-06 00:13:56 +00:00
|
|
|
setup_pad_and_pack_boundaries(sc);
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
|
2011-12-16 02:09:51 +00:00
|
|
|
V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
|
|
|
|
V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
|
|
|
|
V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
|
|
|
|
V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
|
|
|
|
V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
|
|
|
|
V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
|
|
|
|
V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
|
2013-03-30 02:26:20 +00:00
|
|
|
t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES,
|
|
|
|
("%s: hw buffer size table too big", __func__));
|
|
|
|
for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) {
|
2013-03-30 02:26:20 +00:00
|
|
|
t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
|
2014-03-18 20:14:13 +00:00
|
|
|
sge_flbuf_sizes[i]);
|
2013-03-30 02:26:20 +00:00
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
|
|
|
|
V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
|
|
|
|
t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2013-04-11 22:46:39 +00:00
|
|
|
KASSERT(intr_timer[0] <= timer_max,
|
|
|
|
("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
|
|
|
|
timer_max));
|
|
|
|
for (i = 1; i < nitems(intr_timer); i++) {
|
|
|
|
KASSERT(intr_timer[i] >= intr_timer[i - 1],
|
|
|
|
("%s: timers not listed in increasing order (%d)",
|
|
|
|
__func__, i));
|
|
|
|
|
|
|
|
while (intr_timer[i] > timer_max) {
|
|
|
|
if (i == nitems(intr_timer) - 1) {
|
|
|
|
intr_timer[i] = timer_max;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
intr_timer[i] += intr_timer[i - 1];
|
|
|
|
intr_timer[i] /= 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
|
|
|
|
V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
|
|
|
|
t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
|
|
|
|
v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
|
|
|
|
V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
|
|
|
|
t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
|
|
|
|
v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
|
|
|
|
V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
|
|
|
|
t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
|
|
|
|
|
2017-04-15 17:00:50 +00:00
|
|
|
if (chip_id(sc) >= CHELSIO_T6) {
|
|
|
|
m = V_TSCALE(M_TSCALE);
|
|
|
|
if (tscale == 1)
|
|
|
|
v = 0;
|
|
|
|
else
|
|
|
|
v = V_TSCALE(tscale - 2);
|
|
|
|
t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v);
|
2017-08-30 23:41:04 +00:00
|
|
|
|
|
|
|
if (sc->debug_flags & DF_DISABLE_TCB_CACHE) {
|
|
|
|
m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN |
|
|
|
|
V_WRTHRTHRESH(M_WRTHRTHRESH);
|
|
|
|
t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1);
|
|
|
|
v &= ~m;
|
|
|
|
v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN |
|
|
|
|
V_WRTHRTHRESH(16);
|
|
|
|
t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1);
|
|
|
|
}
|
2017-04-15 17:00:50 +00:00
|
|
|
}
|
|
|
|
|
2016-09-01 20:43:01 +00:00
|
|
|
/* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */
|
2013-03-30 02:26:20 +00:00
|
|
|
v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
|
|
|
|
t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
|
|
|
|
|
2016-09-01 20:43:01 +00:00
|
|
|
/*
|
|
|
|
* 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been
|
|
|
|
* chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we
|
|
|
|
* may have to deal with is MAXPHYS + 1 page.
|
|
|
|
*/
|
|
|
|
v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4);
|
|
|
|
t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v);
|
|
|
|
|
|
|
|
/* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
|
|
|
|
m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
|
2013-03-30 02:26:20 +00:00
|
|
|
t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
|
|
|
|
|
|
|
|
m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
|
|
|
|
F_RESETDDPOFFSET;
|
|
|
|
v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
|
|
|
|
t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
|
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
2014-12-06 00:13:56 +00:00
|
|
|
* SGE wants the buffer to be at least 64B and then a multiple of 16. If
|
2016-08-08 21:28:02 +00:00
|
|
|
* padding is in use, the buffer's start and end need to be aligned to the pad
|
2014-12-06 01:47:38 +00:00
|
|
|
* boundary as well. We'll just make sure that the size is a multiple of the
|
|
|
|
* boundary here, it is up to the buffer allocation code to make sure the start
|
|
|
|
* of the buffer is aligned as well.
|
2014-03-18 20:14:13 +00:00
|
|
|
*/
|
|
|
|
static inline int
|
2014-12-06 00:13:56 +00:00
|
|
|
hwsz_ok(struct adapter *sc, int hwsz)
|
2014-03-18 20:14:13 +00:00
|
|
|
{
|
2016-03-08 00:23:56 +00:00
|
|
|
int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1;
|
2014-03-18 20:14:13 +00:00
|
|
|
|
2014-12-06 01:47:38 +00:00
|
|
|
return (hwsz >= 64 && (hwsz & mask) == 0);
|
2014-03-18 20:14:13 +00:00
|
|
|
}
|
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
/*
|
|
|
|
* XXX: driver really should be able to deal with unexpected settings.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
t4_read_chip_settings(struct adapter *sc)
|
|
|
|
{
|
|
|
|
struct sge *s = &sc->sge;
|
2016-03-08 00:23:56 +00:00
|
|
|
struct sge_params *sp = &sc->params.sge;
|
2013-08-30 01:45:36 +00:00
|
|
|
int i, j, n, rc = 0;
|
2013-03-30 02:26:20 +00:00
|
|
|
uint32_t m, v, r;
|
|
|
|
uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
|
2014-03-18 20:14:13 +00:00
|
|
|
static int sw_buf_sizes[] = { /* Sorted by size */
|
2013-08-30 01:45:36 +00:00
|
|
|
MCLBYTES,
|
|
|
|
#if MJUMPAGESIZE != MCLBYTES
|
|
|
|
MJUMPAGESIZE,
|
|
|
|
#endif
|
|
|
|
MJUM9BYTES,
|
|
|
|
MJUM16BYTES
|
|
|
|
};
|
2014-03-18 20:14:13 +00:00
|
|
|
struct sw_zone_info *swz, *safe_swz;
|
|
|
|
struct hw_buf_info *hwb;
|
2013-03-30 02:26:20 +00:00
|
|
|
|
2016-03-08 00:23:56 +00:00
|
|
|
m = F_RXPKTCPLMODE;
|
|
|
|
v = F_RXPKTCPLMODE;
|
2016-08-15 17:40:05 +00:00
|
|
|
r = sc->params.sge.sge_control;
|
2013-03-30 02:26:20 +00:00
|
|
|
if ((r & m) != v) {
|
|
|
|
device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
|
2011-12-16 02:09:51 +00:00
|
|
|
rc = EINVAL;
|
|
|
|
}
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2016-03-08 00:23:56 +00:00
|
|
|
/*
|
|
|
|
* If this changes then every single use of PAGE_SHIFT in the driver
|
|
|
|
* needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
|
|
|
|
*/
|
|
|
|
if (sp->page_shift != PAGE_SHIFT) {
|
2013-03-30 02:26:20 +00:00
|
|
|
device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
|
2011-12-16 02:09:51 +00:00
|
|
|
rc = EINVAL;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/* Filter out unusable hw buffer sizes entirely (mark with -2). */
|
|
|
|
hwb = &s->hw_buf_info[0];
|
|
|
|
for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) {
|
2016-08-15 17:40:05 +00:00
|
|
|
r = sc->params.sge.sge_fl_buffer_size[i];
|
2014-03-18 20:14:13 +00:00
|
|
|
hwb->size = r;
|
2014-12-06 00:13:56 +00:00
|
|
|
hwb->zidx = hwsz_ok(sc, r) ? -1 : -2;
|
2014-03-18 20:14:13 +00:00
|
|
|
hwb->next = -1;
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a sorted list in decreasing order of hw buffer sizes (and so
|
|
|
|
* increasing order of spare area) for each software zone.
|
2014-12-06 00:13:56 +00:00
|
|
|
*
|
|
|
|
* If padding is enabled then the start and end of the buffer must align
|
|
|
|
* to the pad boundary; if packing is enabled then they must align with
|
|
|
|
* the pack boundary as well. Allocations from the cluster zones are
|
|
|
|
* aligned to min(size, 4K), so the buffer starts at that alignment and
|
|
|
|
* ends at hwb->size alignment. If mbuf inlining is allowed the
|
|
|
|
* starting alignment will be reduced to MSIZE and the driver will
|
|
|
|
* exercise appropriate caution when deciding on the best buffer layout
|
|
|
|
* to use.
|
2014-03-18 20:14:13 +00:00
|
|
|
*/
|
|
|
|
n = 0; /* no usable buffer size to begin with */
|
|
|
|
swz = &s->sw_zone_info[0];
|
|
|
|
safe_swz = NULL;
|
|
|
|
for (i = 0; i < SW_ZONE_SIZES; i++, swz++) {
|
|
|
|
int8_t head = -1, tail = -1;
|
|
|
|
|
|
|
|
swz->size = sw_buf_sizes[i];
|
|
|
|
swz->zone = m_getzone(swz->size);
|
|
|
|
swz->type = m_gettype(swz->size);
|
|
|
|
|
2014-12-06 00:13:56 +00:00
|
|
|
if (swz->size < PAGE_SIZE) {
|
|
|
|
MPASS(powerof2(swz->size));
|
2016-03-08 00:23:56 +00:00
|
|
|
if (fl_pad && (swz->size % sp->pad_boundary != 0))
|
2014-12-06 00:13:56 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (swz->size == safest_rx_cluster)
|
|
|
|
safe_swz = swz;
|
|
|
|
|
|
|
|
hwb = &s->hw_buf_info[0];
|
|
|
|
for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) {
|
|
|
|
if (hwb->zidx != -1 || hwb->size > swz->size)
|
2013-08-30 01:45:36 +00:00
|
|
|
continue;
|
2014-12-06 00:13:56 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (fl_pad)
|
2016-03-08 00:23:56 +00:00
|
|
|
MPASS(hwb->size % sp->pad_boundary == 0);
|
2014-12-06 00:13:56 +00:00
|
|
|
#endif
|
2014-03-18 20:14:13 +00:00
|
|
|
hwb->zidx = i;
|
|
|
|
if (head == -1)
|
|
|
|
head = tail = j;
|
|
|
|
else if (hwb->size < s->hw_buf_info[tail].size) {
|
|
|
|
s->hw_buf_info[tail].next = j;
|
|
|
|
tail = j;
|
|
|
|
} else {
|
|
|
|
int8_t *cur;
|
|
|
|
struct hw_buf_info *t;
|
|
|
|
|
|
|
|
for (cur = &head; *cur != -1; cur = &t->next) {
|
|
|
|
t = &s->hw_buf_info[*cur];
|
|
|
|
if (hwb->size == t->size) {
|
|
|
|
hwb->zidx = -2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (hwb->size > t->size) {
|
|
|
|
hwb->next = *cur;
|
|
|
|
*cur = j;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
swz->head_hwidx = head;
|
|
|
|
swz->tail_hwidx = tail;
|
|
|
|
|
|
|
|
if (tail != -1) {
|
2013-08-30 01:45:36 +00:00
|
|
|
n++;
|
2014-03-18 20:14:13 +00:00
|
|
|
if (swz->size - s->hw_buf_info[tail].size >=
|
|
|
|
CL_METADATA_SIZE)
|
|
|
|
sc->flags |= BUF_PACKING_OK;
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2013-08-30 01:45:36 +00:00
|
|
|
if (n == 0) {
|
|
|
|
device_printf(sc->dev, "no usable SGE FL buffer size.\n");
|
|
|
|
rc = EINVAL;
|
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
|
|
|
|
s->safe_hwidx1 = -1;
|
|
|
|
s->safe_hwidx2 = -1;
|
|
|
|
if (safe_swz != NULL) {
|
|
|
|
s->safe_hwidx1 = safe_swz->head_hwidx;
|
|
|
|
for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) {
|
|
|
|
int spare;
|
|
|
|
|
|
|
|
hwb = &s->hw_buf_info[i];
|
2014-12-06 00:13:56 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (fl_pad)
|
2016-03-08 00:23:56 +00:00
|
|
|
MPASS(hwb->size % sp->pad_boundary == 0);
|
2014-12-06 00:13:56 +00:00
|
|
|
#endif
|
2014-03-18 20:14:13 +00:00
|
|
|
spare = safe_swz->size - hwb->size;
|
2014-12-06 00:13:56 +00:00
|
|
|
if (spare >= CL_METADATA_SIZE) {
|
2014-03-18 20:14:13 +00:00
|
|
|
s->safe_hwidx2 = i;
|
|
|
|
break;
|
2014-12-06 00:13:56 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
}
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (sc->flags & IS_VF)
|
|
|
|
return (0);
|
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
|
|
|
|
r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
|
|
|
|
if (r != v) {
|
|
|
|
device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
|
|
|
|
rc = EINVAL;
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
m = v = F_TDDPTAGTCB;
|
|
|
|
r = t4_read_reg(sc, A_ULP_RX_CTL);
|
|
|
|
if ((r & m) != v) {
|
|
|
|
device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
|
|
|
|
rc = EINVAL;
|
|
|
|
}
|
2011-06-04 23:31:33 +00:00
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
|
|
|
|
F_RESETDDPOFFSET;
|
|
|
|
v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
|
|
|
|
r = t4_read_reg(sc, A_TP_PARA_REG5);
|
|
|
|
if ((r & m) != v) {
|
|
|
|
device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
|
|
|
|
rc = EINVAL;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2017-07-26 20:20:58 +00:00
|
|
|
t4_init_tp_params(sc, 1);
|
2013-03-30 02:26:20 +00:00
|
|
|
|
|
|
|
t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
|
|
|
|
t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
return (rc);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
t4_create_dma_tag(struct adapter *sc)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
|
|
|
|
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
|
|
|
|
BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
|
|
|
|
NULL, &sc->dmat);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
|
|
|
"failed to create main DMA tag: %d\n", rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2013-07-31 05:12:51 +00:00
|
|
|
void
|
|
|
|
t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
|
|
|
|
struct sysctl_oid_list *children)
|
|
|
|
{
|
2016-03-08 00:23:56 +00:00
|
|
|
struct sge_params *sp = &sc->params.sge;
|
2013-07-31 05:12:51 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes",
|
|
|
|
CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A",
|
|
|
|
"freelist buffer sizes");
|
|
|
|
|
2013-07-31 05:12:51 +00:00
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
|
2016-03-08 00:23:56 +00:00
|
|
|
NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)");
|
2013-07-31 05:12:51 +00:00
|
|
|
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
|
2016-03-08 00:23:56 +00:00
|
|
|
NULL, sp->pad_boundary, "payload pad boundary (bytes)");
|
2013-07-31 05:12:51 +00:00
|
|
|
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
|
2016-03-08 00:23:56 +00:00
|
|
|
NULL, sp->spg_len, "status page size (bytes)");
|
2013-07-31 05:12:51 +00:00
|
|
|
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
|
|
|
|
NULL, cong_drop, "congestion drop setting");
|
2013-08-30 01:45:36 +00:00
|
|
|
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
|
2016-03-08 00:23:56 +00:00
|
|
|
NULL, sp->pack_boundary, "payload pack boundary (bytes)");
|
2013-07-31 05:12:51 +00:00
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
int
|
|
|
|
t4_destroy_dma_tag(struct adapter *sc)
|
|
|
|
{
|
|
|
|
if (sc->dmat)
|
|
|
|
bus_dma_tag_destroy(sc->dmat);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-12-16 02:09:51 +00:00
|
|
|
* Allocate and initialize the firmware event queue and the management queue.
|
2011-02-18 08:00:26 +00:00
|
|
|
*
|
|
|
|
* Returns errno on failure. Resources allocated up to that point may still be
|
|
|
|
* allocated. Caller is responsible for cleanup in case this function fails.
|
|
|
|
*/
|
|
|
|
int
|
2011-04-19 22:08:28 +00:00
|
|
|
t4_setup_adapter_queues(struct adapter *sc)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
int rc;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
sysctl_ctx_init(&sc->ctx);
|
|
|
|
sc->flags |= ADAP_SYSCTL_CTX;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-05-30 21:34:44 +00:00
|
|
|
/*
|
|
|
|
* Firmware event queue
|
|
|
|
*/
|
2011-12-16 02:09:51 +00:00
|
|
|
rc = alloc_fwq(sc);
|
2012-10-10 16:54:14 +00:00
|
|
|
if (rc != 0)
|
2011-04-19 22:08:28 +00:00
|
|
|
return (rc);
|
|
|
|
|
|
|
|
/*
|
2011-12-16 02:09:51 +00:00
|
|
|
* Management queue. This is just a control queue that uses the fwq as
|
|
|
|
* its associated iq.
|
2011-04-19 22:08:28 +00:00
|
|
|
*/
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (!(sc->flags & IS_VF))
|
|
|
|
rc = alloc_mgmtq(sc);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Idempotent
|
|
|
|
*/
|
|
|
|
int
|
2011-04-19 22:08:28 +00:00
|
|
|
t4_teardown_adapter_queues(struct adapter *sc)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/* Do this before freeing the queue */
|
|
|
|
if (sc->flags & ADAP_SYSCTL_CTX) {
|
2011-04-19 22:08:28 +00:00
|
|
|
sysctl_ctx_free(&sc->ctx);
|
2011-12-16 02:09:51 +00:00
|
|
|
sc->flags &= ~ADAP_SYSCTL_CTX;
|
2011-04-19 22:08:28 +00:00
|
|
|
}
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
free_mgmtq(sc);
|
|
|
|
free_fwq(sc);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/* Maximum payload that can be delivered with a single iq descriptor */
|
2012-08-15 01:03:13 +00:00
|
|
|
static inline int
|
2014-03-18 20:14:13 +00:00
|
|
|
mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
|
2012-08-15 01:03:13 +00:00
|
|
|
{
|
2014-03-18 20:14:13 +00:00
|
|
|
int payload;
|
2012-08-15 01:03:13 +00:00
|
|
|
|
2013-07-04 21:19:01 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2014-03-18 20:14:13 +00:00
|
|
|
if (toe) {
|
cxgbe(4): Add support for Connection Offload Policy (aka COP).
COP allows fine-grained control on whether to offload a TCP connection
using t4_tom, and what settings to apply to a connection selected for
offload. t4_tom must still be loaded and IFCAP_TOE must still be
enabled for full TCP offload to take place on an interface. The
difference is that IFCAP_TOE used to be the only knob and would enable
TOE for all new connections on the inteface, but now the driver will
also consult the COP, if any, before offloading to the hardware TOE.
A policy is a plain text file with any number of rules, one per line.
Each rule has a "match" part consisting of a socket-type (L = listen,
A = active open, P = passive open, D = don't care) and a pcap-filter(7)
expression, and a "settings" part that specifies whether to offload the
connection or not and the parameters to use if so. The general format
of a rule is: [socket-type] expr => settings
Example. See cxgbetool(8) for more information.
[L] ip && port http => offload
[L] port 443 => !offload
[L] port ssh => offload
[P] src net 192.168/16 && dst port ssh => offload !nagle !timestamp cong newreno
[P] dst port ssh => offload !nagle ecn cong tahoe
[P] dst port http => offload
[A] dst port 443 => offload tls
[A] dst net 192.168/16 => offload !timestamp cong highspeed
The driver processes the rules for each new listen, active open, or
passive open and stops at the first match. There is an implicit rule at
the end of every policy that prohibits offload when no rule in the
policy matches:
[D] all => !offload
This is a reworked and expanded version of a patch submitted by
Krishnamraju Eraparaju @ Chelsio.
Sponsored by: Chelsio Communications
2018-04-14 19:07:56 +00:00
|
|
|
int rxcs = G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
|
|
|
|
|
|
|
|
/* Note that COP can set rx_coalesce on/off per connection. */
|
|
|
|
payload = max(mtu, rxcs);
|
2014-03-18 20:14:13 +00:00
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
/* large enough even when hw VLAN extraction is disabled */
|
2016-03-08 00:23:56 +00:00
|
|
|
payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN +
|
|
|
|
ETHER_VLAN_ENCAP_LEN + mtu;
|
2014-03-18 20:14:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
|
|
|
}
|
|
|
|
#endif
|
2013-07-04 21:19:01 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
return (payload);
|
2013-07-04 21:19:01 +00:00
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
int
|
2015-12-03 00:02:01 +00:00
|
|
|
t4_setup_vi_queues(struct vi_info *vi)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2017-12-22 19:10:19 +00:00
|
|
|
int rc = 0, i, intr_idx, iqidx;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct sge_rxq *rxq;
|
|
|
|
struct sge_txq *txq;
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sge_wrq *ctrlq;
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sge_ofld_rxq *ofld_rxq;
|
|
|
|
struct sge_wrq *ofld_txq;
|
2014-05-27 18:18:41 +00:00
|
|
|
#endif
|
|
|
|
#ifdef DEV_NETMAP
|
2016-06-23 02:53:00 +00:00
|
|
|
int saved_idx;
|
2014-05-27 18:18:41 +00:00
|
|
|
struct sge_nm_rxq *nm_rxq;
|
|
|
|
struct sge_nm_txq *nm_txq;
|
2011-12-16 02:09:51 +00:00
|
|
|
#endif
|
2011-02-18 08:00:26 +00:00
|
|
|
char name[16];
|
2015-12-03 00:02:01 +00:00
|
|
|
struct port_info *pi = vi->pi;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct adapter *sc = pi->adapter;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct ifnet *ifp = vi->ifp;
|
|
|
|
struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev);
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
2014-12-06 00:13:56 +00:00
|
|
|
int maxp, mtu = ifp->if_mtu;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/* Interrupt vector to start from (when using multiple vectors) */
|
2017-12-22 19:10:19 +00:00
|
|
|
intr_idx = vi->first_intr;
|
2015-12-03 00:02:01 +00:00
|
|
|
|
|
|
|
#ifdef DEV_NETMAP
|
2016-06-23 02:53:00 +00:00
|
|
|
saved_idx = intr_idx;
|
|
|
|
if (ifp->if_capabilities & IFCAP_NETMAP) {
|
|
|
|
|
|
|
|
/* netmap is supported with direct interrupts only. */
|
2017-12-22 19:10:19 +00:00
|
|
|
MPASS(!forwarding_intr_to_fwq(sc));
|
2016-06-23 02:53:00 +00:00
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
/*
|
|
|
|
* We don't have buffers to back the netmap rx queues
|
|
|
|
* right now so we create the queues in a way that
|
|
|
|
* doesn't set off any congestion signal in the chip.
|
|
|
|
*/
|
2016-06-23 02:53:00 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq",
|
2015-12-03 00:02:01 +00:00
|
|
|
CTLFLAG_RD, NULL, "rx queues");
|
|
|
|
for_each_nm_rxq(vi, i, nm_rxq) {
|
|
|
|
rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid);
|
|
|
|
if (rc != 0)
|
|
|
|
goto done;
|
|
|
|
intr_idx++;
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:53:00 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
|
2015-12-03 00:02:01 +00:00
|
|
|
CTLFLAG_RD, NULL, "tx queues");
|
|
|
|
for_each_nm_txq(vi, i, nm_txq) {
|
2017-12-22 19:10:19 +00:00
|
|
|
iqidx = vi->first_nm_rxq + (i % vi->nnmrxq);
|
|
|
|
rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid);
|
2015-12-03 00:02:01 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2016-06-23 02:53:00 +00:00
|
|
|
|
|
|
|
/* Normal rx queues and netmap rx queues share the same interrupts. */
|
|
|
|
intr_idx = saved_idx;
|
2015-12-03 00:02:01 +00:00
|
|
|
#endif
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
/*
|
2017-12-22 19:10:19 +00:00
|
|
|
* Allocate rx queues first because a default iqid is required when
|
|
|
|
* creating a tx queue.
|
2011-12-16 02:09:51 +00:00
|
|
|
*/
|
2014-03-18 20:14:13 +00:00
|
|
|
maxp = mtu_to_max_payload(sc, mtu, 0);
|
2017-12-22 19:10:19 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
|
|
|
|
CTLFLAG_RD, NULL, "rx queues");
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_rxq(vi, i, rxq) {
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%s rxq%d-fl",
|
2015-12-03 00:02:01 +00:00
|
|
|
device_get_nameunit(vi->dev), i);
|
|
|
|
init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2017-12-22 19:10:19 +00:00
|
|
|
rc = alloc_rxq(vi, rxq,
|
|
|
|
forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
|
|
|
|
if (rc != 0)
|
|
|
|
goto done;
|
|
|
|
intr_idx++;
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2016-06-23 02:53:00 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
if (ifp->if_capabilities & IFCAP_NETMAP)
|
|
|
|
intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
|
|
|
|
#endif
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2014-03-18 20:14:13 +00:00
|
|
|
maxp = mtu_to_max_payload(sc, mtu, 1);
|
2017-12-22 19:10:19 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
|
|
|
|
CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections");
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_ofld_rxq(vi, i, ofld_rxq) {
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2017-10-05 07:18:16 +00:00
|
|
|
init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx,
|
2015-12-03 00:02:01 +00:00
|
|
|
vi->qsize_rxq);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
|
2015-12-03 00:02:01 +00:00
|
|
|
device_get_nameunit(vi->dev), i);
|
|
|
|
init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2017-12-22 19:10:19 +00:00
|
|
|
rc = alloc_ofld_rxq(vi, ofld_rxq,
|
|
|
|
forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
|
|
|
|
if (rc != 0)
|
|
|
|
goto done;
|
|
|
|
intr_idx++;
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2017-12-22 19:10:19 +00:00
|
|
|
* Now the tx queues.
|
2011-12-16 02:09:51 +00:00
|
|
|
*/
|
2015-12-03 00:02:01 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
|
2011-12-16 02:09:51 +00:00
|
|
|
NULL, "tx queues");
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_txq(vi, i, txq) {
|
2017-12-22 19:10:19 +00:00
|
|
|
iqidx = vi->first_rxq + (i % vi->nrxq);
|
2011-02-18 08:00:26 +00:00
|
|
|
snprintf(name, sizeof(name), "%s txq%d",
|
2015-12-03 00:02:01 +00:00
|
|
|
device_get_nameunit(vi->dev), i);
|
2017-12-22 19:10:19 +00:00
|
|
|
init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan,
|
|
|
|
sc->sge.rxq[iqidx].iq.cntxt_id, name);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = alloc_txq(vi, txq, i, oid);
|
2011-02-18 08:00:26 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto done;
|
|
|
|
}
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2015-12-03 00:02:01 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
|
2011-12-16 02:09:51 +00:00
|
|
|
CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_ofld_txq(vi, i, ofld_txq) {
|
2014-05-27 18:18:41 +00:00
|
|
|
struct sysctl_oid *oid2;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2017-12-22 19:10:19 +00:00
|
|
|
iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq);
|
2011-12-16 02:09:51 +00:00
|
|
|
snprintf(name, sizeof(name), "%s ofld_txq%d",
|
2015-12-03 00:02:01 +00:00
|
|
|
device_get_nameunit(vi->dev), i);
|
2016-03-08 00:23:56 +00:00
|
|
|
init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
|
2017-12-22 19:10:19 +00:00
|
|
|
sc->sge.ofld_rxq[iqidx].iq.cntxt_id, name);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%d", i);
|
2015-12-03 00:02:01 +00:00
|
|
|
oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
|
2011-12-16 02:09:51 +00:00
|
|
|
name, CTLFLAG_RD, NULL, "offload tx queue");
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = alloc_wrq(sc, vi, ofld_txq, oid2);
|
2014-05-27 18:18:41 +00:00
|
|
|
if (rc != 0)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
#endif
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally, the control queue.
|
|
|
|
*/
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (!IS_MAIN_VI(vi) || sc->flags & IS_VF)
|
2015-12-03 00:02:01 +00:00
|
|
|
goto done;
|
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
|
2011-12-16 02:09:51 +00:00
|
|
|
NULL, "ctrl queue");
|
|
|
|
ctrlq = &sc->sge.ctrlq[pi->port_id];
|
2015-12-03 00:02:01 +00:00
|
|
|
snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
|
2017-12-22 19:10:19 +00:00
|
|
|
init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan,
|
|
|
|
sc->sge.rxq[vi->first_rxq].iq.cntxt_id, name);
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = alloc_wrq(sc, vi, ctrlq, oid);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
done:
|
|
|
|
if (rc)
|
2015-12-03 00:02:01 +00:00
|
|
|
t4_teardown_vi_queues(vi);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Idempotent
|
|
|
|
*/
|
|
|
|
int
|
2015-12-03 00:02:01 +00:00
|
|
|
t4_teardown_vi_queues(struct vi_info *vi)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
int i;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct port_info *pi = vi->pi;
|
2011-12-16 02:09:51 +00:00
|
|
|
struct adapter *sc = pi->adapter;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct sge_rxq *rxq;
|
|
|
|
struct sge_txq *txq;
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sge_ofld_rxq *ofld_rxq;
|
|
|
|
struct sge_wrq *ofld_txq;
|
|
|
|
#endif
|
2014-05-27 18:18:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
struct sge_nm_rxq *nm_rxq;
|
|
|
|
struct sge_nm_txq *nm_txq;
|
|
|
|
#endif
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
/* Do this before freeing the queues */
|
2015-12-03 00:02:01 +00:00
|
|
|
if (vi->flags & VI_SYSCTL_CTX) {
|
|
|
|
sysctl_ctx_free(&vi->ctx);
|
|
|
|
vi->flags &= ~VI_SYSCTL_CTX;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
#ifdef DEV_NETMAP
|
2016-06-23 02:53:00 +00:00
|
|
|
if (vi->ifp->if_capabilities & IFCAP_NETMAP) {
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_nm_txq(vi, i, nm_txq) {
|
|
|
|
free_nm_txq(vi, nm_txq);
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_nm_rxq(vi, i, nm_rxq) {
|
|
|
|
free_nm_rxq(vi, nm_rxq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/*
|
|
|
|
* Take down all the tx queues first, as they reference the rx queues
|
|
|
|
* (for egress updates, etc.).
|
|
|
|
*/
|
|
|
|
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
|
2015-12-03 00:02:01 +00:00
|
|
|
free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_txq(vi, i, txq) {
|
|
|
|
free_txq(vi, txq);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_ofld_txq(vi, i, ofld_txq) {
|
2011-12-16 02:09:51 +00:00
|
|
|
free_wrq(sc, ofld_txq);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2017-12-22 19:10:19 +00:00
|
|
|
* Then take down the rx queues.
|
2011-12-16 02:09:51 +00:00
|
|
|
*/
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_rxq(vi, i, rxq) {
|
2017-12-22 19:10:19 +00:00
|
|
|
free_rxq(vi, rxq);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_ofld_rxq(vi, i, ofld_rxq) {
|
2017-12-22 19:10:19 +00:00
|
|
|
free_ofld_rxq(vi, ofld_rxq);
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/*
|
|
|
|
* Deals with errors and the firmware event queue. All data rx queues forward
|
|
|
|
* their interrupt to the firmware event queue.
|
|
|
|
*/
|
2011-02-18 08:00:26 +00:00
|
|
|
void
|
|
|
|
t4_intr_all(void *arg)
|
|
|
|
{
|
|
|
|
struct adapter *sc = arg;
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sge_iq *fwq = &sc->sge.fwq;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
t4_intr_err(arg);
|
2011-12-16 02:09:51 +00:00
|
|
|
if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) {
|
|
|
|
service_iq(fwq, 0);
|
|
|
|
atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Deals with error interrupts */
|
|
|
|
void
|
|
|
|
t4_intr_err(void *arg)
|
|
|
|
{
|
|
|
|
struct adapter *sc = arg;
|
|
|
|
|
2011-05-30 21:34:44 +00:00
|
|
|
t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
|
2011-02-18 08:00:26 +00:00
|
|
|
t4_slow_intr_handler(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
t4_intr_evt(void *arg)
|
2011-04-15 03:09:27 +00:00
|
|
|
{
|
|
|
|
struct sge_iq *iq = arg;
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
|
|
|
|
service_iq(iq, 0);
|
|
|
|
atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
|
2011-05-30 21:34:44 +00:00
|
|
|
}
|
2011-04-15 03:09:27 +00:00
|
|
|
}
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
void
|
|
|
|
t4_intr(void *arg)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
struct sge_iq *iq = arg;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
|
|
|
|
service_iq(iq, 0);
|
|
|
|
atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:53:00 +00:00
|
|
|
void
|
|
|
|
t4_vi_intr(void *arg)
|
|
|
|
{
|
|
|
|
struct irq *irq = arg;
|
|
|
|
|
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) {
|
|
|
|
t4_nm_intr(irq->nm_rxq);
|
|
|
|
atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (irq->rxq != NULL)
|
|
|
|
t4_intr(irq->rxq);
|
|
|
|
}
|
|
|
|
|
2017-04-17 09:00:20 +00:00
|
|
|
static inline int
|
|
|
|
sort_before_lro(struct lro_ctrl *lro)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (lro->lro_mbuf_max != 0);
|
|
|
|
}
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/*
|
|
|
|
* Deals with anything and everything on the given ingress queue.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
service_iq(struct sge_iq *iq, int budget)
|
|
|
|
{
|
|
|
|
struct sge_iq *q;
|
2012-06-19 07:34:13 +00:00
|
|
|
struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */
|
2014-08-02 06:55:36 +00:00
|
|
|
struct sge_fl *fl; /* Use iff IQ_HAS_FL */
|
2011-02-18 08:00:26 +00:00
|
|
|
struct adapter *sc = iq->adapter;
|
2014-08-02 00:56:34 +00:00
|
|
|
struct iq_desc *d = &iq->desc[iq->cidx];
|
2014-08-02 06:55:36 +00:00
|
|
|
int ndescs = 0, limit;
|
|
|
|
int rsp_type, refill;
|
2011-12-16 02:09:51 +00:00
|
|
|
uint32_t lq;
|
2014-08-02 06:55:36 +00:00
|
|
|
uint16_t fl_hw_cidx;
|
2011-12-16 02:09:51 +00:00
|
|
|
struct mbuf *m0;
|
|
|
|
STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
|
2013-08-29 06:26:22 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
const struct timeval lro_timeout = {0, sc->lro_timeout};
|
2017-04-17 09:00:20 +00:00
|
|
|
struct lro_ctrl *lro = &rxq->lro;
|
2013-08-29 06:26:22 +00:00
|
|
|
#endif
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
|
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
limit = budget ? budget : iq->qsize / 16;
|
|
|
|
|
|
|
|
if (iq->flags & IQ_HAS_FL) {
|
|
|
|
fl = &rxq->fl;
|
|
|
|
fl_hw_cidx = fl->hw_cidx; /* stable snapshot */
|
|
|
|
} else {
|
|
|
|
fl = NULL;
|
|
|
|
fl_hw_cidx = 0; /* to silence gcc warning */
|
|
|
|
}
|
|
|
|
|
2017-04-17 09:00:20 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
if (iq->flags & IQ_ADJ_CREDIT) {
|
|
|
|
MPASS(sort_before_lro(lro));
|
|
|
|
iq->flags &= ~IQ_ADJ_CREDIT;
|
|
|
|
if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) {
|
|
|
|
tcp_lro_flush_all(lro);
|
|
|
|
t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) |
|
|
|
|
V_INGRESSQID((u32)iq->cntxt_id) |
|
|
|
|
V_SEINTARM(iq->intr_params));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
ndescs = 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
|
|
|
|
#endif
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/*
|
|
|
|
* We always come back and check the descriptor ring for new indirect
|
|
|
|
* interrupts and other responses after running a single handler.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
2014-08-02 00:56:34 +00:00
|
|
|
while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
rmb();
|
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
refill = 0;
|
2011-12-16 02:09:51 +00:00
|
|
|
m0 = NULL;
|
2014-08-02 00:56:34 +00:00
|
|
|
rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
|
|
|
|
lq = be32toh(d->rsp.pldbuflen_qid);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
switch (rsp_type) {
|
|
|
|
case X_RSPD_TYPE_FLBUF:
|
|
|
|
|
|
|
|
KASSERT(iq->flags & IQ_HAS_FL,
|
|
|
|
("%s: data for an iq (%p) with no freelist",
|
|
|
|
__func__, iq));
|
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
m0 = get_fl_payload(sc, fl, lq);
|
2013-08-30 01:45:36 +00:00
|
|
|
if (__predict_false(m0 == NULL))
|
|
|
|
goto process_iql;
|
2014-08-02 06:55:36 +00:00
|
|
|
refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2;
|
2011-12-16 02:09:51 +00:00
|
|
|
#ifdef T4_PKT_TIMESTAMP
|
|
|
|
/*
|
|
|
|
* 60 bit timestamp for the payload is
|
|
|
|
* *(uint64_t *)m0->m_pktdat. Note that it is
|
|
|
|
* in the leading free-space in the mbuf. The
|
|
|
|
* kernel can clobber it during a pullup,
|
|
|
|
* m_copymdata, etc. You need to make sure that
|
|
|
|
* the mbuf reaches you unmolested if you care
|
|
|
|
* about the timestamp.
|
|
|
|
*/
|
|
|
|
*(uint64_t *)m0->m_pktdat =
|
|
|
|
be64toh(ctrl->u.last_flit) &
|
|
|
|
0xfffffffffffffff;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
case X_RSPD_TYPE_CPL:
|
2014-08-02 00:56:34 +00:00
|
|
|
KASSERT(d->rss.opcode < NUM_CPL_CMDS,
|
2011-12-16 02:09:51 +00:00
|
|
|
("%s: bad opcode %02x.", __func__,
|
2014-08-02 00:56:34 +00:00
|
|
|
d->rss.opcode));
|
2016-07-05 01:29:24 +00:00
|
|
|
t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0);
|
2011-12-16 02:09:51 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case X_RSPD_TYPE_INTR:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupts should be forwarded only to queues
|
|
|
|
* that are not forwarding their interrupts.
|
|
|
|
* This means service_iq can recurse but only 1
|
|
|
|
* level deep.
|
|
|
|
*/
|
|
|
|
KASSERT(budget == 0,
|
|
|
|
("%s: budget %u, rsp_type %u", __func__,
|
|
|
|
budget, rsp_type));
|
|
|
|
|
2013-08-28 20:45:45 +00:00
|
|
|
/*
|
|
|
|
* There are 1K interrupt-capable queues (qids 0
|
|
|
|
* through 1023). A response type indicating a
|
|
|
|
* forwarded interrupt with a qid >= 1K is an
|
|
|
|
* iWARP async notification.
|
|
|
|
*/
|
|
|
|
if (lq >= 1024) {
|
2016-07-05 01:29:24 +00:00
|
|
|
t4_an_handler(iq, &d->rsp);
|
2013-08-28 20:45:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-08-09 17:49:42 +00:00
|
|
|
q = sc->sge.iqmap[lq - sc->sge.iq_start -
|
|
|
|
sc->sge.iq_base];
|
2011-12-16 02:09:51 +00:00
|
|
|
if (atomic_cmpset_int(&q->state, IQS_IDLE,
|
|
|
|
IQS_BUSY)) {
|
2014-08-02 06:55:36 +00:00
|
|
|
if (service_iq(q, q->qsize / 16) == 0) {
|
2011-12-16 02:09:51 +00:00
|
|
|
atomic_cmpset_int(&q->state,
|
|
|
|
IQS_BUSY, IQS_IDLE);
|
|
|
|
} else {
|
|
|
|
STAILQ_INSERT_TAIL(&iql, q,
|
|
|
|
link);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
default:
|
2013-08-28 20:45:45 +00:00
|
|
|
KASSERT(0,
|
|
|
|
("%s: illegal response type %d on iq %p",
|
|
|
|
__func__, rsp_type, iq));
|
|
|
|
log(LOG_ERR,
|
|
|
|
"%s: illegal response type %d on iq %p",
|
|
|
|
device_get_nameunit(sc->dev), rsp_type, iq);
|
2012-06-19 07:34:13 +00:00
|
|
|
break;
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-08-02 00:56:34 +00:00
|
|
|
d++;
|
|
|
|
if (__predict_false(++iq->cidx == iq->sidx)) {
|
|
|
|
iq->cidx = 0;
|
|
|
|
iq->gen ^= F_RSPD_GEN;
|
|
|
|
d = &iq->desc[0];
|
|
|
|
}
|
|
|
|
if (__predict_false(++ndescs == limit)) {
|
2016-08-01 22:39:51 +00:00
|
|
|
t4_write_reg(sc, sc->sge_gts_reg,
|
2011-12-16 02:09:51 +00:00
|
|
|
V_CIDXINC(ndescs) |
|
|
|
|
V_INGRESSQID(iq->cntxt_id) |
|
|
|
|
V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
|
|
|
|
ndescs = 0;
|
|
|
|
|
2013-08-29 06:26:22 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
if (iq->flags & IQ_LRO_ENABLED &&
|
2017-04-17 09:00:20 +00:00
|
|
|
!sort_before_lro(lro) &&
|
2013-08-29 06:26:22 +00:00
|
|
|
sc->lro_timeout != 0) {
|
2017-04-17 09:00:20 +00:00
|
|
|
tcp_lro_flush_inactive(lro,
|
2013-08-29 06:26:22 +00:00
|
|
|
&lro_timeout);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-05 00:38:32 +00:00
|
|
|
if (budget) {
|
2014-08-02 06:55:36 +00:00
|
|
|
if (iq->flags & IQ_HAS_FL) {
|
2014-06-05 00:38:32 +00:00
|
|
|
FL_LOCK(fl);
|
|
|
|
refill_fl(sc, fl, 32);
|
|
|
|
FL_UNLOCK(fl);
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
return (EINPROGRESS);
|
2014-06-05 00:38:32 +00:00
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2014-08-02 06:55:36 +00:00
|
|
|
if (refill) {
|
|
|
|
FL_LOCK(fl);
|
|
|
|
refill_fl(sc, fl, 32);
|
|
|
|
FL_UNLOCK(fl);
|
|
|
|
fl_hw_cidx = fl->hw_cidx;
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2013-08-30 01:45:36 +00:00
|
|
|
process_iql:
|
2011-12-16 02:09:51 +00:00
|
|
|
if (STAILQ_EMPTY(&iql))
|
|
|
|
break;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/*
|
|
|
|
* Process the head only, and send it to the back of the list if
|
|
|
|
* it's still not done.
|
|
|
|
*/
|
|
|
|
q = STAILQ_FIRST(&iql);
|
|
|
|
STAILQ_REMOVE_HEAD(&iql, link);
|
|
|
|
if (service_iq(q, q->qsize / 8) == 0)
|
|
|
|
atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
|
|
|
|
else
|
|
|
|
STAILQ_INSERT_TAIL(&iql, q, link);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2012-06-29 19:51:06 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2011-12-16 02:09:51 +00:00
|
|
|
if (iq->flags & IQ_LRO_ENABLED) {
|
2017-04-17 09:00:20 +00:00
|
|
|
if (ndescs > 0 && lro->lro_mbuf_count > 8) {
|
|
|
|
MPASS(sort_before_lro(lro));
|
|
|
|
/* hold back one credit and don't flush LRO state */
|
|
|
|
iq->flags |= IQ_ADJ_CREDIT;
|
|
|
|
ndescs--;
|
|
|
|
} else {
|
|
|
|
tcp_lro_flush_all(lro);
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-08-01 22:39:51 +00:00
|
|
|
t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
|
2011-12-16 02:09:51 +00:00
|
|
|
V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
|
|
|
|
|
|
|
|
if (iq->flags & IQ_HAS_FL) {
|
|
|
|
int starved;
|
2011-05-30 21:34:44 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
FL_LOCK(fl);
|
2014-03-18 20:14:13 +00:00
|
|
|
starved = refill_fl(sc, fl, 64);
|
2011-12-16 02:09:51 +00:00
|
|
|
FL_UNLOCK(fl);
|
|
|
|
if (__predict_false(starved != 0))
|
|
|
|
add_fl_to_sfl(sc, fl);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
return (0);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
static inline int
|
|
|
|
cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll)
|
2013-08-30 01:45:36 +00:00
|
|
|
{
|
2014-03-18 20:14:13 +00:00
|
|
|
int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0;
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (rc)
|
|
|
|
MPASS(cll->region3 >= CL_METADATA_SIZE);
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
return (rc);
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
static inline struct cluster_metadata *
|
|
|
|
cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll,
|
|
|
|
caddr_t cl)
|
2013-08-30 01:45:36 +00:00
|
|
|
{
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (cl_has_metadata(fl, cll)) {
|
|
|
|
struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx];
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
return ((struct cluster_metadata *)(cl + swz->size) - 1);
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
return (NULL);
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 13:58:48 +00:00
|
|
|
static void
|
2017-10-09 20:35:31 +00:00
|
|
|
rxb_free(struct mbuf *m)
|
2013-08-30 01:45:36 +00:00
|
|
|
{
|
2017-10-09 20:35:31 +00:00
|
|
|
uma_zone_t zone = m->m_ext.ext_arg1;
|
|
|
|
void *cl = m->m_ext.ext_arg2;
|
2013-08-30 01:45:36 +00:00
|
|
|
|
|
|
|
uma_zfree(zone, cl);
|
2014-07-23 22:29:22 +00:00
|
|
|
counter_u64_add(extfree_rels, 1);
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* The mbuf returned by this function could be allocated from zone_mbuf or
|
|
|
|
* constructed in spare room in the cluster.
|
|
|
|
*
|
|
|
|
* The mbuf carries the payload in one of these ways
|
|
|
|
* a) frame inside the mbuf (mbuf from zone_mbuf)
|
|
|
|
* b) m_cljset (for clusters without metadata) zone_mbuf
|
|
|
|
* c) m_extaddref (cluster with metadata) inline mbuf
|
|
|
|
* d) m_extaddref (cluster with metadata) zone_mbuf
|
|
|
|
*/
|
2013-08-30 01:45:36 +00:00
|
|
|
static struct mbuf *
|
2014-12-06 01:47:38 +00:00
|
|
|
get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
|
|
|
|
int remaining)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-03-18 20:14:13 +00:00
|
|
|
struct mbuf *m;
|
2011-12-16 02:09:51 +00:00
|
|
|
struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
|
2014-03-18 20:14:13 +00:00
|
|
|
struct cluster_layout *cll = &sd->cll;
|
|
|
|
struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx];
|
|
|
|
struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx];
|
|
|
|
struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl);
|
2014-12-06 01:47:38 +00:00
|
|
|
int len, blen;
|
2014-03-18 20:14:13 +00:00
|
|
|
caddr_t payload;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-06 01:47:38 +00:00
|
|
|
blen = hwb->size - fl->rx_offset; /* max possible in this buf */
|
|
|
|
len = min(remaining, blen);
|
2014-03-18 20:14:13 +00:00
|
|
|
payload = sd->cl + cll->region1 + fl->rx_offset;
|
2014-12-06 00:13:56 +00:00
|
|
|
if (fl->flags & FL_BUF_PACKING) {
|
2014-12-06 01:47:38 +00:00
|
|
|
const u_int l = fr_offset + len;
|
|
|
|
const u_int pad = roundup2(l, fl->buf_boundary) - l;
|
|
|
|
|
|
|
|
if (fl->rx_offset + len + pad < hwb->size)
|
|
|
|
blen = len + pad;
|
|
|
|
MPASS(fl->rx_offset + blen <= hwb->size);
|
2014-12-06 00:13:56 +00:00
|
|
|
} else {
|
|
|
|
MPASS(fl->rx_offset == 0); /* not packing */
|
|
|
|
}
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-12-06 01:47:38 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) {
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* Copy payload into a freshly allocated mbuf.
|
|
|
|
*/
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-12-06 01:47:38 +00:00
|
|
|
m = fr_offset == 0 ?
|
2014-03-18 20:14:13 +00:00
|
|
|
m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA);
|
|
|
|
if (m == NULL)
|
2013-08-30 01:45:36 +00:00
|
|
|
return (NULL);
|
2014-03-18 20:14:13 +00:00
|
|
|
fl->mbuf_allocated++;
|
2013-08-30 01:45:36 +00:00
|
|
|
#ifdef T4_PKT_TIMESTAMP
|
2014-03-18 20:14:13 +00:00
|
|
|
/* Leave room for a timestamp */
|
|
|
|
m->m_data += 8;
|
2013-08-30 01:45:36 +00:00
|
|
|
#endif
|
2014-03-18 20:14:13 +00:00
|
|
|
/* copy data to mbuf */
|
|
|
|
bcopy(payload, mtod(m, caddr_t), len);
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-07-22 02:02:39 +00:00
|
|
|
} else if (sd->nmbuf * MSIZE < cll->region1) {
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* There's spare room in the cluster for an mbuf. Create one
|
2014-06-18 00:16:35 +00:00
|
|
|
* and associate it with the payload that's in the cluster.
|
2014-03-18 20:14:13 +00:00
|
|
|
*/
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
MPASS(clm != NULL);
|
2014-07-22 02:02:39 +00:00
|
|
|
m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE);
|
2014-03-18 20:14:13 +00:00
|
|
|
/* No bzero required */
|
2016-02-10 18:54:18 +00:00
|
|
|
if (m_init(m, M_NOWAIT, MT_DATA,
|
2014-12-06 01:47:38 +00:00
|
|
|
fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE))
|
2014-03-18 20:14:13 +00:00
|
|
|
return (NULL);
|
|
|
|
fl->mbuf_inlined++;
|
2014-12-06 01:47:38 +00:00
|
|
|
m_extaddref(m, payload, blen, &clm->refcount, rxb_free,
|
2014-03-18 20:14:13 +00:00
|
|
|
swz->zone, sd->cl);
|
2014-07-23 22:29:22 +00:00
|
|
|
if (sd->nmbuf++ == 0)
|
|
|
|
counter_u64_add(extfree_refs, 1);
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
} else {
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* Grab an mbuf from zone_mbuf and associate it with the
|
|
|
|
* payload in the cluster.
|
|
|
|
*/
|
|
|
|
|
2014-12-06 01:47:38 +00:00
|
|
|
m = fr_offset == 0 ?
|
2014-03-18 20:14:13 +00:00
|
|
|
m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA);
|
|
|
|
if (m == NULL)
|
|
|
|
return (NULL);
|
|
|
|
fl->mbuf_allocated++;
|
2014-06-18 00:16:35 +00:00
|
|
|
if (clm != NULL) {
|
2014-12-06 01:47:38 +00:00
|
|
|
m_extaddref(m, payload, blen, &clm->refcount,
|
2014-03-18 20:14:13 +00:00
|
|
|
rxb_free, swz->zone, sd->cl);
|
2014-07-23 22:29:22 +00:00
|
|
|
if (sd->nmbuf++ == 0)
|
|
|
|
counter_u64_add(extfree_refs, 1);
|
2014-06-18 00:16:35 +00:00
|
|
|
} else {
|
2014-03-18 20:14:13 +00:00
|
|
|
m_cljset(m, sd->cl, swz->type);
|
|
|
|
sd->cl = NULL; /* consumed, not a recycle candidate */
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
|
|
|
}
|
2014-12-06 01:47:38 +00:00
|
|
|
if (fr_offset == 0)
|
|
|
|
m->m_pkthdr.len = remaining;
|
2014-03-18 20:14:13 +00:00
|
|
|
m->m_len = len;
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (fl->flags & FL_BUF_PACKING) {
|
2014-12-06 01:47:38 +00:00
|
|
|
fl->rx_offset += blen;
|
2014-03-18 20:14:13 +00:00
|
|
|
MPASS(fl->rx_offset <= hwb->size);
|
|
|
|
if (fl->rx_offset < hwb->size)
|
|
|
|
return (m); /* without advancing the cidx */
|
|
|
|
}
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
if (__predict_false(++fl->cidx % 8 == 0)) {
|
|
|
|
uint16_t cidx = fl->cidx / 8;
|
|
|
|
|
|
|
|
if (__predict_false(cidx == fl->sidx))
|
|
|
|
fl->cidx = cidx = 0;
|
|
|
|
fl->hw_cidx = cidx;
|
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
fl->rx_offset = 0;
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
return (m);
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
2014-08-02 06:55:36 +00:00
|
|
|
get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf)
|
2013-08-30 01:45:36 +00:00
|
|
|
{
|
2014-03-18 20:14:13 +00:00
|
|
|
struct mbuf *m0, *m, **pnext;
|
2014-12-06 01:47:38 +00:00
|
|
|
u_int remaining;
|
|
|
|
const u_int total = G_RSPD_LEN(len_newbuf);
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
if (__predict_false(fl->flags & FL_BUF_RESUME)) {
|
2014-05-30 22:59:45 +00:00
|
|
|
M_ASSERTPKTHDR(fl->m0);
|
2014-12-06 01:47:38 +00:00
|
|
|
MPASS(fl->m0->m_pkthdr.len == total);
|
|
|
|
MPASS(fl->remaining < total);
|
2014-03-18 20:14:13 +00:00
|
|
|
|
|
|
|
m0 = fl->m0;
|
|
|
|
pnext = fl->pnext;
|
2014-12-06 01:47:38 +00:00
|
|
|
remaining = fl->remaining;
|
2014-08-02 06:55:36 +00:00
|
|
|
fl->flags &= ~FL_BUF_RESUME;
|
2014-03-18 20:14:13 +00:00
|
|
|
goto get_segment;
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) {
|
|
|
|
fl->rx_offset = 0;
|
2014-08-02 06:55:36 +00:00
|
|
|
if (__predict_false(++fl->cidx % 8 == 0)) {
|
|
|
|
uint16_t cidx = fl->cidx / 8;
|
|
|
|
|
|
|
|
if (__predict_false(cidx == fl->sidx))
|
|
|
|
fl->cidx = cidx = 0;
|
|
|
|
fl->hw_cidx = cidx;
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* Payload starts at rx_offset in the current hw buffer. Its length is
|
|
|
|
* 'len' and it may span multiple hw buffers.
|
|
|
|
*/
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-06 01:47:38 +00:00
|
|
|
m0 = get_scatter_segment(sc, fl, 0, total);
|
2014-05-30 22:59:45 +00:00
|
|
|
if (m0 == NULL)
|
2014-08-02 06:55:36 +00:00
|
|
|
return (NULL);
|
2014-12-06 01:47:38 +00:00
|
|
|
remaining = total - m0->m_len;
|
2014-03-18 20:14:13 +00:00
|
|
|
pnext = &m0->m_next;
|
2014-12-06 01:47:38 +00:00
|
|
|
while (remaining > 0) {
|
2014-03-18 20:14:13 +00:00
|
|
|
get_segment:
|
|
|
|
MPASS(fl->rx_offset == 0);
|
2014-12-06 01:47:38 +00:00
|
|
|
m = get_scatter_segment(sc, fl, total - remaining, remaining);
|
2014-08-02 06:55:36 +00:00
|
|
|
if (__predict_false(m == NULL)) {
|
2014-03-18 20:14:13 +00:00
|
|
|
fl->m0 = m0;
|
|
|
|
fl->pnext = pnext;
|
2014-12-06 01:47:38 +00:00
|
|
|
fl->remaining = remaining;
|
2014-08-02 06:55:36 +00:00
|
|
|
fl->flags |= FL_BUF_RESUME;
|
|
|
|
return (NULL);
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
*pnext = m;
|
|
|
|
pnext = &m->m_next;
|
2014-12-06 01:47:38 +00:00
|
|
|
remaining -= m->m_len;
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
*pnext = NULL;
|
2014-08-02 06:55:36 +00:00
|
|
|
|
2015-06-23 05:55:13 +00:00
|
|
|
M_ASSERTPKTHDR(m0);
|
2011-12-16 02:09:51 +00:00
|
|
|
return (m0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
|
|
|
|
{
|
2012-06-22 22:59:42 +00:00
|
|
|
struct sge_rxq *rxq = iq_to_rxq(iq);
|
2011-12-16 02:09:51 +00:00
|
|
|
struct ifnet *ifp = rxq->ifp;
|
2016-03-08 00:23:56 +00:00
|
|
|
struct adapter *sc = iq->adapter;
|
2011-12-16 02:09:51 +00:00
|
|
|
const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
|
2012-06-29 19:51:06 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2011-02-18 08:00:26 +00:00
|
|
|
struct lro_ctrl *lro = &rxq->lro;
|
|
|
|
#endif
|
2015-03-26 18:45:51 +00:00
|
|
|
static const int sw_hashtype[4][2] = {
|
|
|
|
{M_HASHTYPE_NONE, M_HASHTYPE_NONE},
|
|
|
|
{M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6},
|
|
|
|
{M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6},
|
|
|
|
{M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6},
|
|
|
|
};
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__,
|
|
|
|
rss->opcode));
|
2011-03-05 03:42:03 +00:00
|
|
|
|
2016-03-08 00:23:56 +00:00
|
|
|
m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
|
|
|
|
m0->m_len -= sc->params.sge.fl_pktshift;
|
|
|
|
m0->m_data += sc->params.sge.fl_pktshift;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
m0->m_pkthdr.rcvif = ifp;
|
2015-03-26 18:45:51 +00:00
|
|
|
M_HASHTYPE_SET(m0, sw_hashtype[rss->hash_type][rss->ipv6]);
|
2013-12-08 17:47:37 +00:00
|
|
|
m0->m_pkthdr.flowid = be32toh(rss->hash_val);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2016-12-15 02:05:29 +00:00
|
|
|
if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) {
|
2012-06-29 16:50:52 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_RXCSUM &&
|
|
|
|
cpl->l2info & htobe32(F_RXF_IP)) {
|
2012-06-30 02:05:09 +00:00
|
|
|
m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
|
2012-06-29 16:50:52 +00:00
|
|
|
CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
|
|
|
|
rxq->rxcsum++;
|
|
|
|
} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
|
|
|
|
cpl->l2info & htobe32(F_RXF_IP6)) {
|
2012-06-30 02:05:09 +00:00
|
|
|
m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
|
2012-06-29 16:50:52 +00:00
|
|
|
CSUM_PSEUDO_HDR);
|
|
|
|
rxq->rxcsum++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (__predict_false(cpl->ip_frag))
|
2011-12-16 02:09:51 +00:00
|
|
|
m0->m_pkthdr.csum_data = be16toh(cpl->csum);
|
|
|
|
else
|
|
|
|
m0->m_pkthdr.csum_data = 0xffff;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
if (cpl->vlan_ex) {
|
|
|
|
m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
|
|
|
|
m0->m_flags |= M_VLANTAG;
|
|
|
|
rxq->vlan_extraction++;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2012-06-29 19:51:06 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2017-04-17 09:00:20 +00:00
|
|
|
if (iq->flags & IQ_LRO_ENABLED) {
|
|
|
|
if (sort_before_lro(lro)) {
|
|
|
|
tcp_lro_queue_mbuf(lro, m0);
|
|
|
|
return (0); /* queued for sort, then LRO */
|
|
|
|
}
|
|
|
|
if (tcp_lro_rx(lro, m0, 0) == 0)
|
|
|
|
return (0); /* queued for LRO */
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
#endif
|
|
|
|
ifp->if_input(ifp, m0);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* Must drain the wrq or make sure that someone else will.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
wrq_tx_drain(void *arg, int n)
|
|
|
|
{
|
|
|
|
struct sge_wrq *wrq = arg;
|
|
|
|
struct sge_eq *eq = &wrq->eq;
|
|
|
|
|
|
|
|
EQ_LOCK(eq);
|
|
|
|
if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
|
|
|
|
drain_wrq_wr_list(wrq->adapter, wrq);
|
|
|
|
EQ_UNLOCK(eq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq)
|
|
|
|
{
|
|
|
|
struct sge_eq *eq = &wrq->eq;
|
|
|
|
u_int available, dbdiff; /* # of hardware descriptors */
|
|
|
|
u_int n;
|
|
|
|
struct wrqe *wr;
|
|
|
|
struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */
|
|
|
|
|
|
|
|
EQ_LOCK_ASSERT_OWNED(eq);
|
|
|
|
MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
|
|
|
|
wr = STAILQ_FIRST(&wrq->wr_list);
|
|
|
|
MPASS(wr != NULL); /* Must be called with something useful to do */
|
2016-04-12 22:11:29 +00:00
|
|
|
MPASS(eq->pidx == eq->dbidx);
|
|
|
|
dbdiff = 0;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
eq->cidx = read_hw_cidx(eq);
|
|
|
|
if (eq->pidx == eq->cidx)
|
|
|
|
available = eq->sidx - 1;
|
|
|
|
else
|
|
|
|
available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
|
|
|
|
|
|
|
|
MPASS(wr->wrq == wrq);
|
|
|
|
n = howmany(wr->wr_len, EQ_ESIZE);
|
|
|
|
if (available < n)
|
2016-04-12 22:11:29 +00:00
|
|
|
break;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
dst = (void *)&eq->desc[eq->pidx];
|
|
|
|
if (__predict_true(eq->sidx - eq->pidx > n)) {
|
|
|
|
/* Won't wrap, won't end exactly at the status page. */
|
|
|
|
bcopy(&wr->wr[0], dst, wr->wr_len);
|
|
|
|
eq->pidx += n;
|
|
|
|
} else {
|
|
|
|
int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE;
|
|
|
|
|
|
|
|
bcopy(&wr->wr[0], dst, first_portion);
|
|
|
|
if (wr->wr_len > first_portion) {
|
|
|
|
bcopy(&wr->wr[first_portion], &eq->desc[0],
|
|
|
|
wr->wr_len - first_portion);
|
|
|
|
}
|
|
|
|
eq->pidx = n - (eq->sidx - eq->pidx);
|
|
|
|
}
|
2016-09-19 17:16:51 +00:00
|
|
|
wrq->tx_wrs_copied++;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
if (available < eq->sidx / 4 &&
|
|
|
|
atomic_cmpset_int(&eq->equiq, 0, 1)) {
|
|
|
|
dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
|
|
|
|
F_FW_WR_EQUEQ);
|
|
|
|
eq->equeqidx = eq->pidx;
|
|
|
|
} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
|
|
|
|
dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
|
|
|
|
eq->equeqidx = eq->pidx;
|
|
|
|
}
|
|
|
|
|
|
|
|
dbdiff += n;
|
|
|
|
if (dbdiff >= 16) {
|
|
|
|
ring_eq_db(sc, eq, dbdiff);
|
|
|
|
dbdiff = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
|
|
|
|
free_wrqe(wr);
|
|
|
|
MPASS(wrq->nwr_pending > 0);
|
|
|
|
wrq->nwr_pending--;
|
|
|
|
MPASS(wrq->ndesc_needed >= n);
|
|
|
|
wrq->ndesc_needed -= n;
|
|
|
|
} while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL);
|
|
|
|
|
|
|
|
if (dbdiff)
|
|
|
|
ring_eq_db(sc, eq, dbdiff);
|
|
|
|
}
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
/*
|
|
|
|
* Doesn't fail. Holds on to work requests it can't send right away.
|
|
|
|
*/
|
2012-06-19 07:34:13 +00:00
|
|
|
void
|
|
|
|
t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
|
2011-12-16 02:09:51 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
#ifdef INVARIANTS
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sge_eq *eq = &wrq->eq;
|
2014-12-31 23:19:16 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
EQ_LOCK_ASSERT_OWNED(eq);
|
|
|
|
MPASS(wr != NULL);
|
|
|
|
MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN);
|
|
|
|
MPASS((wr->wr_len & 0x7) == 0);
|
|
|
|
|
|
|
|
STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
|
|
|
|
wrq->nwr_pending++;
|
|
|
|
wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE);
|
|
|
|
|
|
|
|
if (!TAILQ_EMPTY(&wrq->incomplete_wrs))
|
|
|
|
return; /* commit_wrq_wr will drain wr_list as well. */
|
|
|
|
|
|
|
|
drain_wrq_wr_list(sc, wrq);
|
|
|
|
|
|
|
|
/* Doorbell must have caught up to the pidx. */
|
|
|
|
MPASS(eq->pidx == eq->dbidx);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
void
|
|
|
|
t4_update_fl_bufsize(struct ifnet *ifp)
|
|
|
|
{
|
2015-12-03 00:02:01 +00:00
|
|
|
struct vi_info *vi = ifp->if_softc;
|
|
|
|
struct adapter *sc = vi->pi->adapter;
|
2014-12-31 23:19:16 +00:00
|
|
|
struct sge_rxq *rxq;
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2014-12-31 23:19:16 +00:00
|
|
|
struct sge_ofld_rxq *ofld_rxq;
|
2012-06-19 07:34:13 +00:00
|
|
|
#endif
|
2014-12-31 23:19:16 +00:00
|
|
|
struct sge_fl *fl;
|
|
|
|
int i, maxp, mtu = ifp->if_mtu;
|
2011-03-05 03:42:03 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
maxp = mtu_to_max_payload(sc, mtu, 0);
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_rxq(vi, i, rxq) {
|
2014-12-31 23:19:16 +00:00
|
|
|
fl = &rxq->fl;
|
2011-03-05 03:42:03 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
FL_LOCK(fl);
|
|
|
|
find_best_refill_source(sc, fl, maxp);
|
|
|
|
FL_UNLOCK(fl);
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
|
|
|
maxp = mtu_to_max_payload(sc, mtu, 1);
|
2015-12-03 00:02:01 +00:00
|
|
|
for_each_ofld_rxq(vi, i, ofld_rxq) {
|
2014-12-31 23:19:16 +00:00
|
|
|
fl = &ofld_rxq->fl;
|
2011-05-05 02:38:08 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
FL_LOCK(fl);
|
|
|
|
find_best_refill_source(sc, fl, maxp);
|
|
|
|
FL_UNLOCK(fl);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline int
|
|
|
|
mbuf_nsegs(struct mbuf *m)
|
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m);
|
|
|
|
KASSERT(m->m_pkthdr.l5hlen > 0,
|
|
|
|
("%s: mbuf %p missing information on # of segments.", __func__, m));
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (m->m_pkthdr.l5hlen);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline void
|
|
|
|
set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs)
|
|
|
|
{
|
|
|
|
|
|
|
|
M_ASSERTPKTHDR(m);
|
|
|
|
m->m_pkthdr.l5hlen = nsegs;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline int
|
|
|
|
mbuf_len16(struct mbuf *m)
|
|
|
|
{
|
|
|
|
int n;
|
2011-03-05 03:42:03 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m);
|
|
|
|
n = m->m_pkthdr.PH_loc.eight[0];
|
|
|
|
MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
|
2011-03-05 03:42:03 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (n);
|
|
|
|
}
|
2011-03-05 03:42:03 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline void
|
|
|
|
set_mbuf_len16(struct mbuf *m, uint8_t len16)
|
|
|
|
{
|
|
|
|
|
|
|
|
M_ASSERTPKTHDR(m);
|
|
|
|
m->m_pkthdr.PH_loc.eight[0] = len16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
needs_tso(struct mbuf *m)
|
|
|
|
{
|
2011-03-05 03:42:03 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
|
|
|
|
KASSERT(m->m_pkthdr.tso_segsz > 0,
|
|
|
|
("%s: TSO requested in mbuf %p but MSS not provided",
|
|
|
|
__func__, m));
|
|
|
|
return (1);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
return (0);
|
2011-04-19 22:08:28 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline int
|
|
|
|
needs_l3_csum(struct mbuf *m)
|
|
|
|
{
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m);
|
|
|
|
|
|
|
|
if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
|
|
|
|
return (1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
needs_l4_csum(struct mbuf *m)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
|
|
|
|
CSUM_TCP_IPV6 | CSUM_TSO))
|
|
|
|
return (1);
|
|
|
|
return (0);
|
|
|
|
}
|
2011-03-05 03:54:37 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline int
|
|
|
|
needs_vlan_insertion(struct mbuf *m)
|
|
|
|
{
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (m->m_flags & M_VLANTAG) {
|
|
|
|
KASSERT(m->m_pkthdr.ether_vtag != 0,
|
|
|
|
("%s: HWVLAN requested in mbuf %p but tag not provided",
|
|
|
|
__func__, m));
|
|
|
|
return (1);
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static void *
|
|
|
|
m_advance(struct mbuf **pm, int *poffset, int len)
|
|
|
|
{
|
|
|
|
struct mbuf *m = *pm;
|
|
|
|
int offset = *poffset;
|
|
|
|
uintptr_t p = 0;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(len > 0);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2016-09-07 18:08:43 +00:00
|
|
|
for (;;) {
|
2014-12-31 23:19:16 +00:00
|
|
|
if (offset + len < m->m_len) {
|
|
|
|
offset += len;
|
|
|
|
p = mtod(m, uintptr_t) + offset;
|
2011-02-18 08:00:26 +00:00
|
|
|
break;
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
|
|
|
len -= m->m_len - offset;
|
|
|
|
m = m->m_next;
|
|
|
|
offset = 0;
|
|
|
|
MPASS(m != NULL);
|
|
|
|
}
|
|
|
|
*poffset = offset;
|
|
|
|
*pm = m;
|
|
|
|
return ((void *)p);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* Can deal with empty mbufs in the chain that have m_len = 0, but the chain
|
|
|
|
* must have at least one mbuf that's not empty.
|
|
|
|
*/
|
|
|
|
static inline int
|
|
|
|
count_mbuf_nsegs(struct mbuf *m)
|
|
|
|
{
|
2016-10-24 19:09:56 +00:00
|
|
|
vm_paddr_t lastb, next;
|
|
|
|
vm_offset_t va;
|
2014-12-31 23:19:16 +00:00
|
|
|
int len, nsegs;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(m != NULL);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
nsegs = 0;
|
2016-10-24 19:09:56 +00:00
|
|
|
lastb = 0;
|
2014-12-31 23:19:16 +00:00
|
|
|
for (; m; m = m->m_next) {
|
|
|
|
|
|
|
|
len = m->m_len;
|
|
|
|
if (__predict_false(len == 0))
|
2011-02-18 08:00:26 +00:00
|
|
|
continue;
|
2016-10-24 19:09:56 +00:00
|
|
|
va = mtod(m, vm_offset_t);
|
|
|
|
next = pmap_kextract(va);
|
|
|
|
nsegs += sglist_count(m->m_data, len);
|
|
|
|
if (lastb + 1 == next)
|
2014-12-31 23:19:16 +00:00
|
|
|
nsegs--;
|
2016-10-24 19:09:56 +00:00
|
|
|
lastb = pmap_kextract(va + len - 1);
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(nsegs > 0);
|
|
|
|
return (nsegs);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* Analyze the mbuf to determine its tx needs. The mbuf passed in may change:
|
|
|
|
* a) caller can assume it's been freed if this function returns with an error.
|
|
|
|
* b) it may get defragged up if the gather list is too long for the hardware.
|
|
|
|
*/
|
|
|
|
int
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
parse_pkt(struct adapter *sc, struct mbuf **mp)
|
2014-12-31 23:19:16 +00:00
|
|
|
{
|
|
|
|
struct mbuf *m0 = *mp, *m;
|
|
|
|
int rc, nsegs, defragged = 0, offset;
|
|
|
|
struct ether_header *eh;
|
|
|
|
void *l3hdr;
|
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
struct tcphdr *tcp;
|
|
|
|
#endif
|
|
|
|
uint16_t eh_type;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m0);
|
|
|
|
if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) {
|
|
|
|
rc = EINVAL;
|
|
|
|
fail:
|
|
|
|
m_freem(m0);
|
|
|
|
*mp = NULL;
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
restart:
|
|
|
|
/*
|
|
|
|
* First count the number of gather list segments in the payload.
|
|
|
|
* Defrag the mbuf if nsegs exceeds the hardware limit.
|
|
|
|
*/
|
|
|
|
M_ASSERTPKTHDR(m0);
|
|
|
|
MPASS(m0->m_pkthdr.len > 0);
|
|
|
|
nsegs = count_mbuf_nsegs(m0);
|
|
|
|
if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) {
|
|
|
|
if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) {
|
|
|
|
rc = EFBIG;
|
|
|
|
goto fail;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
*mp = m0 = m; /* update caller's copy after defrag */
|
|
|
|
goto restart;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) {
|
|
|
|
m0 = m_pullup(m0, m0->m_pkthdr.len);
|
|
|
|
if (m0 == NULL) {
|
|
|
|
/* Should have left well enough alone. */
|
|
|
|
rc = EFBIG;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
*mp = m0; /* update caller's copy after pullup */
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
set_mbuf_nsegs(m0, nsegs);
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (sc->flags & IS_VF)
|
|
|
|
set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0)));
|
|
|
|
else
|
|
|
|
set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0)));
|
2011-02-18 08:00:26 +00:00
|
|
|
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (!needs_tso(m0) &&
|
|
|
|
!(sc->flags & IS_VF && (needs_l3_csum(m0) || needs_l4_csum(m0))))
|
2014-12-31 23:19:16 +00:00
|
|
|
return (0);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
m = m0;
|
|
|
|
eh = mtod(m, struct ether_header *);
|
|
|
|
eh_type = ntohs(eh->ether_type);
|
|
|
|
if (eh_type == ETHERTYPE_VLAN) {
|
|
|
|
struct ether_vlan_header *evh = (void *)eh;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
eh_type = ntohs(evh->evl_proto);
|
|
|
|
m0->m_pkthdr.l2hlen = sizeof(*evh);
|
|
|
|
} else
|
|
|
|
m0->m_pkthdr.l2hlen = sizeof(*eh);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
offset = 0;
|
|
|
|
l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
switch (eh_type) {
|
|
|
|
#ifdef INET6
|
|
|
|
case ETHERTYPE_IPV6:
|
|
|
|
{
|
|
|
|
struct ip6_hdr *ip6 = l3hdr;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP);
|
2011-03-05 03:54:37 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
m0->m_pkthdr.l3hlen = sizeof(*ip6);
|
|
|
|
break;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
#endif
|
|
|
|
#ifdef INET
|
|
|
|
case ETHERTYPE_IP:
|
|
|
|
{
|
|
|
|
struct ip *ip = l3hdr;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
m0->m_pkthdr.l3hlen = ip->ip_hl * 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled"
|
|
|
|
" with the same INET/INET6 options as the kernel.",
|
|
|
|
__func__, eh_type);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (needs_tso(m0)) {
|
|
|
|
tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen);
|
|
|
|
m0->m_pkthdr.l4hlen = tcp->th_off * 4;
|
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
#endif
|
|
|
|
MPASS(m0 == *mp);
|
|
|
|
return (0);
|
|
|
|
}
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
void *
|
|
|
|
start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
|
|
|
|
{
|
|
|
|
struct sge_eq *eq = &wrq->eq;
|
|
|
|
struct adapter *sc = wrq->adapter;
|
|
|
|
int ndesc, available;
|
|
|
|
struct wrqe *wr;
|
|
|
|
void *w;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(len16 > 0);
|
|
|
|
ndesc = howmany(len16, EQ_ESIZE / 16);
|
|
|
|
MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
|
|
|
|
|
|
|
|
EQ_LOCK(eq);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2017-09-09 05:12:14 +00:00
|
|
|
if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
|
2014-12-31 23:19:16 +00:00
|
|
|
drain_wrq_wr_list(sc, wrq);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (!STAILQ_EMPTY(&wrq->wr_list)) {
|
|
|
|
slowpath:
|
|
|
|
EQ_UNLOCK(eq);
|
|
|
|
wr = alloc_wrqe(len16 * 16, wrq);
|
|
|
|
if (__predict_false(wr == NULL))
|
|
|
|
return (NULL);
|
|
|
|
cookie->pidx = -1;
|
|
|
|
cookie->ndesc = ndesc;
|
|
|
|
return (&wr->wr);
|
|
|
|
}
|
|
|
|
|
|
|
|
eq->cidx = read_hw_cidx(eq);
|
|
|
|
if (eq->pidx == eq->cidx)
|
|
|
|
available = eq->sidx - 1;
|
|
|
|
else
|
|
|
|
available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
|
|
|
|
if (available < ndesc)
|
|
|
|
goto slowpath;
|
|
|
|
|
|
|
|
cookie->pidx = eq->pidx;
|
|
|
|
cookie->ndesc = ndesc;
|
|
|
|
TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link);
|
|
|
|
|
|
|
|
w = &eq->desc[eq->pidx];
|
|
|
|
IDXINCR(eq->pidx, ndesc, eq->sidx);
|
2017-01-09 22:18:08 +00:00
|
|
|
if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
|
2014-12-31 23:19:16 +00:00
|
|
|
w = &wrq->ss[0];
|
|
|
|
wrq->ss_pidx = cookie->pidx;
|
|
|
|
wrq->ss_len = len16 * 16;
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
EQ_UNLOCK(eq);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (w);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-12-31 23:19:16 +00:00
|
|
|
commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
struct sge_eq *eq = &wrq->eq;
|
|
|
|
struct adapter *sc = wrq->adapter;
|
|
|
|
int ndesc, pidx;
|
|
|
|
struct wrq_cookie *prev, *next;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (cookie->pidx == -1) {
|
|
|
|
struct wrqe *wr = __containerof(w, struct wrqe, wr);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
t4_wrq_tx(sc, wr);
|
|
|
|
return;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2013-07-04 21:19:01 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (__predict_false(w == &wrq->ss[0])) {
|
|
|
|
int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE;
|
|
|
|
|
|
|
|
MPASS(wrq->ss_len > n); /* WR had better wrap around. */
|
|
|
|
bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n);
|
|
|
|
bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n);
|
|
|
|
wrq->tx_wrs_ss++;
|
|
|
|
} else
|
|
|
|
wrq->tx_wrs_direct++;
|
|
|
|
|
|
|
|
EQ_LOCK(eq);
|
2017-09-09 05:12:14 +00:00
|
|
|
ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */
|
|
|
|
pidx = cookie->pidx;
|
|
|
|
MPASS(pidx >= 0 && pidx < eq->sidx);
|
2014-12-31 23:19:16 +00:00
|
|
|
prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link);
|
|
|
|
next = TAILQ_NEXT(cookie, link);
|
|
|
|
if (prev == NULL) {
|
|
|
|
MPASS(pidx == eq->dbidx);
|
|
|
|
if (next == NULL || ndesc >= 16)
|
|
|
|
ring_eq_db(wrq->adapter, eq, ndesc);
|
|
|
|
else {
|
|
|
|
MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
|
|
|
|
next->pidx = pidx;
|
|
|
|
next->ndesc += ndesc;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc);
|
|
|
|
prev->ndesc += ndesc;
|
|
|
|
}
|
|
|
|
TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link);
|
|
|
|
|
|
|
|
if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
|
|
|
|
drain_wrq_wr_list(sc, wrq);
|
|
|
|
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (TAILQ_EMPTY(&wrq->incomplete_wrs)) {
|
|
|
|
/* Doorbell must have caught up to the pidx. */
|
|
|
|
MPASS(wrq->eq.pidx == wrq->eq.dbidx);
|
2013-07-04 21:19:01 +00:00
|
|
|
}
|
|
|
|
#endif
|
2014-12-31 23:19:16 +00:00
|
|
|
EQ_UNLOCK(eq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u_int
|
|
|
|
can_resume_eth_tx(struct mp_ring *r)
|
|
|
|
{
|
|
|
|
struct sge_eq *eq = r->cookie;
|
|
|
|
|
|
|
|
return (total_available_tx_desc(eq) > eq->sidx / 8);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline int
|
|
|
|
cannot_use_txpkts(struct mbuf *m)
|
|
|
|
{
|
|
|
|
/* maybe put a GL limit too, to avoid silliness? */
|
|
|
|
|
|
|
|
return (needs_tso(m));
|
|
|
|
}
|
|
|
|
|
2017-05-09 18:33:41 +00:00
|
|
|
static inline int
|
|
|
|
discard_tx(struct sge_eq *eq)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED);
|
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
|
|
|
|
* be consumed. Return the actual number consumed. 0 indicates a stall.
|
|
|
|
*/
|
|
|
|
static u_int
|
|
|
|
eth_tx(struct mp_ring *r, u_int cidx, u_int pidx)
|
|
|
|
{
|
|
|
|
struct sge_txq *txq = r->cookie;
|
|
|
|
struct sge_eq *eq = &txq->eq;
|
|
|
|
struct ifnet *ifp = txq->ifp;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct vi_info *vi = ifp->if_softc;
|
|
|
|
struct port_info *pi = vi->pi;
|
2014-12-31 23:19:16 +00:00
|
|
|
struct adapter *sc = pi->adapter;
|
|
|
|
u_int total, remaining; /* # of packets */
|
|
|
|
u_int available, dbdiff; /* # of hardware descriptors */
|
|
|
|
u_int n, next_cidx;
|
|
|
|
struct mbuf *m0, *tail;
|
|
|
|
struct txpkts txp;
|
|
|
|
struct fw_eth_tx_pkts_wr *wr; /* any fw WR struct will do */
|
|
|
|
|
|
|
|
remaining = IDXDIFF(pidx, cidx, r->size);
|
|
|
|
MPASS(remaining > 0); /* Must not be called without work to do. */
|
|
|
|
total = 0;
|
|
|
|
|
|
|
|
TXQ_LOCK(txq);
|
2017-05-09 18:33:41 +00:00
|
|
|
if (__predict_false(discard_tx(eq))) {
|
2014-12-31 23:19:16 +00:00
|
|
|
while (cidx != pidx) {
|
|
|
|
m0 = r->items[cidx];
|
|
|
|
m_freem(m0);
|
|
|
|
if (++cidx == r->size)
|
|
|
|
cidx = 0;
|
|
|
|
}
|
|
|
|
reclaim_tx_descs(txq, 2048);
|
|
|
|
total = remaining;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* How many hardware descriptors do we have readily available. */
|
|
|
|
if (eq->pidx == eq->cidx)
|
|
|
|
available = eq->sidx - 1;
|
|
|
|
else
|
|
|
|
available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
|
|
|
|
dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx);
|
|
|
|
|
|
|
|
while (remaining > 0) {
|
|
|
|
|
|
|
|
m0 = r->items[cidx];
|
|
|
|
M_ASSERTPKTHDR(m0);
|
|
|
|
MPASS(m0->m_nextpkt == NULL);
|
|
|
|
|
|
|
|
if (available < SGE_MAX_WR_NDESC) {
|
|
|
|
available += reclaim_tx_descs(txq, 64);
|
|
|
|
if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16))
|
|
|
|
break; /* out of descriptors */
|
|
|
|
}
|
|
|
|
|
|
|
|
next_cidx = cidx + 1;
|
|
|
|
if (__predict_false(next_cidx == r->size))
|
|
|
|
next_cidx = 0;
|
|
|
|
|
|
|
|
wr = (void *)&eq->desc[eq->pidx];
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (sc->flags & IS_VF) {
|
|
|
|
total++;
|
|
|
|
remaining--;
|
|
|
|
ETHER_BPF_MTAP(ifp, m0);
|
2016-09-11 16:11:51 +00:00
|
|
|
n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0,
|
|
|
|
available);
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
} else if (remaining > 1 &&
|
2014-12-31 23:19:16 +00:00
|
|
|
try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) {
|
|
|
|
|
|
|
|
/* pkts at cidx, next_cidx should both be in txp. */
|
|
|
|
MPASS(txp.npkt == 2);
|
|
|
|
tail = r->items[next_cidx];
|
|
|
|
MPASS(tail->m_nextpkt == NULL);
|
|
|
|
ETHER_BPF_MTAP(ifp, m0);
|
|
|
|
ETHER_BPF_MTAP(ifp, tail);
|
|
|
|
m0->m_nextpkt = tail;
|
|
|
|
|
|
|
|
if (__predict_false(++next_cidx == r->size))
|
|
|
|
next_cidx = 0;
|
|
|
|
|
|
|
|
while (next_cidx != pidx) {
|
|
|
|
if (add_to_txpkts(r->items[next_cidx], &txp,
|
|
|
|
available) != 0)
|
|
|
|
break;
|
|
|
|
tail->m_nextpkt = r->items[next_cidx];
|
|
|
|
tail = tail->m_nextpkt;
|
|
|
|
ETHER_BPF_MTAP(ifp, tail);
|
|
|
|
if (__predict_false(++next_cidx == r->size))
|
|
|
|
next_cidx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = write_txpkts_wr(txq, wr, m0, &txp, available);
|
|
|
|
total += txp.npkt;
|
|
|
|
remaining -= txp.npkt;
|
|
|
|
} else {
|
|
|
|
total++;
|
|
|
|
remaining--;
|
|
|
|
ETHER_BPF_MTAP(ifp, m0);
|
2016-03-22 18:56:23 +00:00
|
|
|
n = write_txpkt_wr(txq, (void *)wr, m0, available);
|
2014-12-31 23:19:16 +00:00
|
|
|
}
|
|
|
|
MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC);
|
|
|
|
|
|
|
|
available -= n;
|
|
|
|
dbdiff += n;
|
|
|
|
IDXINCR(eq->pidx, n, eq->sidx);
|
|
|
|
|
|
|
|
if (total_available_tx_desc(eq) < eq->sidx / 4 &&
|
|
|
|
atomic_cmpset_int(&eq->equiq, 0, 1)) {
|
|
|
|
wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
|
|
|
|
F_FW_WR_EQUEQ);
|
|
|
|
eq->equeqidx = eq->pidx;
|
|
|
|
} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
|
|
|
|
wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
|
|
|
|
eq->equeqidx = eq->pidx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dbdiff >= 16 && remaining >= 4) {
|
|
|
|
ring_eq_db(sc, eq, dbdiff);
|
|
|
|
available += reclaim_tx_descs(txq, 4 * dbdiff);
|
|
|
|
dbdiff = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
cidx = next_cidx;
|
|
|
|
}
|
|
|
|
if (dbdiff != 0) {
|
|
|
|
ring_eq_db(sc, eq, dbdiff);
|
|
|
|
reclaim_tx_descs(txq, 32);
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
TXQ_UNLOCK(txq);
|
2014-06-20 20:28:46 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (total);
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
static inline void
|
|
|
|
init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
|
2014-08-02 00:56:34 +00:00
|
|
|
int qsize)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-08-02 00:56:34 +00:00
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
|
|
|
|
("%s: bad tmr_idx %d", __func__, tmr_idx));
|
|
|
|
KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */
|
|
|
|
("%s: bad pktc_idx %d", __func__, pktc_idx));
|
|
|
|
|
|
|
|
iq->flags = 0;
|
|
|
|
iq->adapter = sc;
|
2012-04-30 09:46:05 +00:00
|
|
|
iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
|
|
|
|
iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
|
|
|
|
if (pktc_idx >= 0) {
|
|
|
|
iq->intr_params |= F_QINTR_CNT_EN;
|
|
|
|
iq->intr_pktc_idx = pktc_idx;
|
|
|
|
}
|
2013-03-30 02:26:20 +00:00
|
|
|
iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */
|
2016-03-08 00:23:56 +00:00
|
|
|
iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2014-12-06 00:13:56 +00:00
|
|
|
init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
fl->qsize = qsize;
|
2016-03-08 00:23:56 +00:00
|
|
|
fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
|
2011-02-18 08:00:26 +00:00
|
|
|
strlcpy(fl->lockname, name, sizeof(fl->lockname));
|
2014-12-06 00:13:56 +00:00
|
|
|
if (sc->flags & BUF_PACKING_OK &&
|
|
|
|
((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */
|
|
|
|
(is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */
|
2013-08-30 01:45:36 +00:00
|
|
|
fl->flags |= FL_BUF_PACKING;
|
2014-03-18 20:14:13 +00:00
|
|
|
find_best_refill_source(sc, fl, maxp);
|
|
|
|
find_safe_refill_source(sc, fl);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2016-03-08 00:23:56 +00:00
|
|
|
init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
|
|
|
|
uint8_t tx_chan, uint16_t iqid, char *name)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
|
|
|
|
|
|
|
|
eq->flags = eqtype & EQ_TYPEMASK;
|
|
|
|
eq->tx_chan = tx_chan;
|
|
|
|
eq->iqid = iqid;
|
2016-03-08 00:23:56 +00:00
|
|
|
eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
|
2011-04-19 22:08:28 +00:00
|
|
|
strlcpy(eq->lockname, name, sizeof(eq->lockname));
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
|
|
|
|
bus_dmamap_t *map, bus_addr_t *pa, void **va)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
|
|
|
|
BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = bus_dmamem_alloc(*tag, va,
|
|
|
|
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
if (rc)
|
|
|
|
free_ring(sc, *tag, *map, *pa, *va);
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
|
|
|
|
bus_addr_t pa, void *va)
|
|
|
|
{
|
|
|
|
if (pa)
|
|
|
|
bus_dmamap_unload(tag, map);
|
|
|
|
if (va)
|
|
|
|
bus_dmamem_free(tag, va, map);
|
|
|
|
if (tag)
|
|
|
|
bus_dma_tag_destroy(tag);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocates the ring for an ingress queue and an optional freelist. If the
|
|
|
|
* freelist is specified it will be allocated and then associated with the
|
|
|
|
* ingress queue.
|
|
|
|
*
|
|
|
|
* Returns errno on failure. Resources allocated up to that point may still be
|
|
|
|
* allocated. Caller is responsible for cleanup in case this function fails.
|
|
|
|
*
|
2017-12-22 19:10:19 +00:00
|
|
|
* If the ingress queue will take interrupts directly then the intr_idx
|
|
|
|
* specifies the vector, starting from 0. -1 means the interrupts for this
|
|
|
|
* queue should be forwarded to the fwq.
|
2011-02-18 08:00:26 +00:00
|
|
|
*/
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
|
2011-05-18 22:09:04 +00:00
|
|
|
int intr_idx, int cong)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
int rc, i, cntxt_id;
|
|
|
|
size_t len;
|
|
|
|
struct fw_iq_cmd c;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct port_info *pi = vi->pi;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct adapter *sc = iq->adapter;
|
2016-03-08 00:23:56 +00:00
|
|
|
struct sge_params *sp = &sc->params.sge;
|
2011-02-18 08:00:26 +00:00
|
|
|
__be32 v = 0;
|
|
|
|
|
2014-08-02 00:56:34 +00:00
|
|
|
len = iq->qsize * IQ_ESIZE;
|
2011-02-18 08:00:26 +00:00
|
|
|
rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
|
|
|
|
(void **)&iq->desc);
|
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
|
|
|
|
|
|
|
bzero(&c, sizeof(c));
|
|
|
|
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
|
|
|
|
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
|
|
|
|
V_FW_IQ_CMD_VFN(0));
|
|
|
|
|
|
|
|
c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
|
|
|
|
FW_LEN16(c));
|
|
|
|
|
|
|
|
/* Special handling for firmware event queue */
|
|
|
|
if (iq == &sc->sge.fwq)
|
|
|
|
v |= F_FW_IQ_CMD_IQASYNCH;
|
|
|
|
|
2017-12-22 19:10:19 +00:00
|
|
|
if (intr_idx < 0) {
|
|
|
|
/* Forwarded interrupts, all headed to fwq */
|
|
|
|
v |= F_FW_IQ_CMD_IQANDST;
|
|
|
|
v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id);
|
|
|
|
} else {
|
2011-02-18 08:00:26 +00:00
|
|
|
KASSERT(intr_idx < sc->intr_count,
|
|
|
|
("%s: invalid direct intr_idx %d", __func__, intr_idx));
|
2017-12-22 19:10:19 +00:00
|
|
|
v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
c.type_to_iqandstindex = htobe32(v |
|
|
|
|
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
|
2015-12-03 00:02:01 +00:00
|
|
|
V_FW_IQ_CMD_VIID(vi->viid) |
|
2011-02-18 08:00:26 +00:00
|
|
|
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
|
|
|
|
c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
|
|
|
|
F_FW_IQ_CMD_IQGTSMODE |
|
|
|
|
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
|
2014-08-02 00:56:34 +00:00
|
|
|
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
|
2011-02-18 08:00:26 +00:00
|
|
|
c.iqsize = htobe16(iq->qsize);
|
|
|
|
c.iqaddr = htobe64(iq->ba);
|
2011-05-18 22:09:04 +00:00
|
|
|
if (cong >= 0)
|
|
|
|
c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
if (fl) {
|
|
|
|
mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
|
|
|
|
|
2014-08-02 00:56:34 +00:00
|
|
|
len = fl->qsize * EQ_ESIZE;
|
2011-02-18 08:00:26 +00:00
|
|
|
rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
|
|
|
|
&fl->ba, (void **)&fl->desc);
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
|
|
|
|
|
|
|
/* Allocate space for one software descriptor per buffer. */
|
|
|
|
rc = alloc_fl_sdesc(fl);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
|
|
|
"failed to setup fl software descriptors: %d\n",
|
|
|
|
rc);
|
|
|
|
return (rc);
|
|
|
|
}
|
2014-08-02 06:55:36 +00:00
|
|
|
|
|
|
|
if (fl->flags & FL_BUF_PACKING) {
|
2016-03-08 00:23:56 +00:00
|
|
|
fl->lowat = roundup2(sp->fl_starve_threshold2, 8);
|
|
|
|
fl->buf_boundary = sp->pack_boundary;
|
2014-08-02 06:55:36 +00:00
|
|
|
} else {
|
2016-03-08 00:23:56 +00:00
|
|
|
fl->lowat = roundup2(sp->fl_starve_threshold, 8);
|
2014-12-06 00:13:56 +00:00
|
|
|
fl->buf_boundary = 16;
|
2014-08-02 06:55:36 +00:00
|
|
|
}
|
2016-03-08 00:23:56 +00:00
|
|
|
if (fl_pad && fl->buf_boundary < sp->pad_boundary)
|
|
|
|
fl->buf_boundary = sp->pad_boundary;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-14 05:34:23 +00:00
|
|
|
c.iqns_to_fl0congen |=
|
2011-05-18 22:09:04 +00:00
|
|
|
htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
|
|
|
|
F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
|
2013-08-30 01:45:36 +00:00
|
|
|
(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
|
|
|
|
(fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN :
|
|
|
|
0));
|
2011-05-18 22:09:04 +00:00
|
|
|
if (cong >= 0) {
|
|
|
|
c.iqns_to_fl0congen |=
|
|
|
|
htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
|
|
|
|
F_FW_IQ_CMD_FL0CONGCIF |
|
|
|
|
F_FW_IQ_CMD_FL0CONGEN);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
c.fl0dcaen_to_fl0cidxfthresh =
|
2016-09-11 17:51:17 +00:00
|
|
|
htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
|
|
|
|
X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) |
|
|
|
|
V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
|
|
|
|
X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
|
2011-02-18 08:00:26 +00:00
|
|
|
c.fl0size = htobe16(fl->qsize);
|
|
|
|
c.fl0addr = htobe64(fl->ba);
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
|
|
|
"failed to create ingress queue: %d\n", rc);
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
iq->cidx = 0;
|
2014-08-02 00:56:34 +00:00
|
|
|
iq->gen = F_RSPD_GEN;
|
2011-02-18 08:00:26 +00:00
|
|
|
iq->intr_next = iq->intr_params;
|
|
|
|
iq->cntxt_id = be16toh(c.iqid);
|
|
|
|
iq->abs_id = be16toh(c.physiqid);
|
2011-12-16 02:09:51 +00:00
|
|
|
iq->flags |= IQ_ALLOCATED;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
cntxt_id = iq->cntxt_id - sc->sge.iq_start;
|
2011-12-16 02:09:51 +00:00
|
|
|
if (cntxt_id >= sc->sge.niq) {
|
|
|
|
panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
|
|
|
|
cntxt_id, sc->sge.niq - 1);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
sc->sge.iqmap[cntxt_id] = iq;
|
|
|
|
|
|
|
|
if (fl) {
|
2014-08-02 06:55:36 +00:00
|
|
|
u_int qid;
|
|
|
|
|
|
|
|
iq->flags |= IQ_HAS_FL;
|
2011-02-18 08:00:26 +00:00
|
|
|
fl->cntxt_id = be16toh(c.fl0id);
|
|
|
|
fl->pidx = fl->cidx = 0;
|
|
|
|
|
2011-03-22 21:05:56 +00:00
|
|
|
cntxt_id = fl->cntxt_id - sc->sge.eq_start;
|
2011-12-16 02:09:51 +00:00
|
|
|
if (cntxt_id >= sc->sge.neq) {
|
|
|
|
panic("%s: fl->cntxt_id (%d) more than the max (%d)",
|
|
|
|
__func__, cntxt_id, sc->sge.neq - 1);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
sc->sge.eqmap[cntxt_id] = (void *)fl;
|
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
qid = fl->cntxt_id;
|
|
|
|
if (isset(&sc->doorbells, DOORBELL_UDB)) {
|
2016-03-08 00:23:56 +00:00
|
|
|
uint32_t s_qpp = sc->params.sge.eq_s_qpp;
|
2014-08-02 06:55:36 +00:00
|
|
|
uint32_t mask = (1 << s_qpp) - 1;
|
|
|
|
volatile uint8_t *udb;
|
|
|
|
|
|
|
|
udb = sc->udbs_base + UDBS_DB_OFFSET;
|
|
|
|
udb += (qid >> s_qpp) << PAGE_SHIFT;
|
|
|
|
qid &= mask;
|
|
|
|
if (qid < PAGE_SIZE / UDBS_SEG_SIZE) {
|
|
|
|
udb += qid << UDBS_SEG_SHIFT;
|
|
|
|
qid = 0;
|
|
|
|
}
|
|
|
|
fl->udb = (volatile void *)udb;
|
|
|
|
}
|
2016-03-04 13:11:13 +00:00
|
|
|
fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db;
|
2014-08-02 06:55:36 +00:00
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
FL_LOCK(fl);
|
2011-12-16 02:09:51 +00:00
|
|
|
/* Enough to make sure the SGE doesn't think it's starved */
|
|
|
|
refill_fl(sc, fl, fl->lowat);
|
2011-02-18 08:00:26 +00:00
|
|
|
FL_UNLOCK(fl);
|
|
|
|
}
|
|
|
|
|
2016-09-21 00:50:22 +00:00
|
|
|
if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) {
|
2013-08-01 23:38:30 +00:00
|
|
|
uint32_t param, val;
|
|
|
|
|
|
|
|
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
|
|
|
|
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
|
|
|
|
V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
|
2013-08-02 17:44:19 +00:00
|
|
|
if (cong == 0)
|
|
|
|
val = 1 << 19;
|
|
|
|
else {
|
|
|
|
val = 2 << 19;
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
if (cong & (1 << i))
|
|
|
|
val |= 1 << (i << 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-01 23:38:30 +00:00
|
|
|
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
|
|
|
|
if (rc != 0) {
|
|
|
|
/* report error but carry on */
|
|
|
|
device_printf(sc->dev,
|
|
|
|
"failed to set congestion manager context for "
|
|
|
|
"ingress queue %d: %d\n", iq->cntxt_id, rc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
/* Enable IQ interrupts */
|
2011-12-16 02:09:51 +00:00
|
|
|
atomic_store_rel_int(&iq->state, IQS_IDLE);
|
2016-08-01 22:39:51 +00:00
|
|
|
t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) |
|
2011-02-18 08:00:26 +00:00
|
|
|
V_INGRESSQID(iq->cntxt_id));
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-03-18 20:14:13 +00:00
|
|
|
int rc;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct adapter *sc = iq->adapter;
|
|
|
|
device_t dev;
|
|
|
|
|
|
|
|
if (sc == NULL)
|
|
|
|
return (0); /* nothing to do */
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
dev = vi ? vi->dev : sc->dev;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
if (iq->flags & IQ_ALLOCATED) {
|
|
|
|
rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
|
|
|
|
FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
|
|
|
|
fl ? fl->cntxt_id : 0xffff, 0xffff);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(dev,
|
|
|
|
"failed to free queue %p: %d\n", iq, rc);
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
iq->flags &= ~IQ_ALLOCATED;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
|
|
|
|
|
|
|
|
bzero(iq, sizeof(*iq));
|
|
|
|
|
|
|
|
if (fl) {
|
|
|
|
free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
|
|
|
|
fl->desc);
|
|
|
|
|
2013-08-23 18:03:18 +00:00
|
|
|
if (fl->sdesc)
|
2013-08-30 01:45:36 +00:00
|
|
|
free_fl_sdesc(sc, fl);
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
if (mtx_initialized(&fl->fl_lock))
|
|
|
|
mtx_destroy(&fl->fl_lock);
|
|
|
|
|
|
|
|
bzero(fl, sizeof(*fl));
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2017-12-29 02:30:21 +00:00
|
|
|
static void
|
|
|
|
add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
|
|
|
|
struct sge_iq *iq)
|
|
|
|
{
|
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
|
|
|
SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba,
|
|
|
|
"bus address of descriptor ring");
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
|
|
|
|
iq->qsize * IQ_ESIZE, "descriptor ring size in bytes");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &iq->abs_id, 0, sysctl_uint16, "I",
|
|
|
|
"absolute id of the queue");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &iq->cntxt_id, 0, sysctl_uint16, "I",
|
|
|
|
"SGE context id of the queue");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &iq->cidx, 0, sysctl_uint16, "I",
|
|
|
|
"consumer index");
|
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
static void
|
2016-09-23 20:03:28 +00:00
|
|
|
add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
|
|
|
|
struct sysctl_oid *oid, struct sge_fl *fl)
|
2014-03-18 20:14:13 +00:00
|
|
|
{
|
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
|
|
|
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL,
|
|
|
|
"freelist");
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
2016-09-23 20:03:28 +00:00
|
|
|
SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
|
|
|
|
&fl->ba, "bus address of descriptor ring");
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
|
|
|
|
fl->sidx * EQ_ESIZE + sc->params.sge.spg_len,
|
|
|
|
"desc ring size in bytes");
|
2014-03-18 20:14:13 +00:00
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I",
|
|
|
|
"SGE context id of the freelist");
|
2014-12-06 00:13:56 +00:00
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL,
|
|
|
|
fl_pad ? 1 : 0, "padding enabled");
|
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL,
|
|
|
|
fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled");
|
2014-03-18 20:14:13 +00:00
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx,
|
|
|
|
0, "consumer index");
|
|
|
|
if (fl->flags & FL_BUF_PACKING) {
|
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset",
|
|
|
|
CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset");
|
|
|
|
}
|
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx,
|
|
|
|
0, "producer index");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated",
|
|
|
|
CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined",
|
|
|
|
CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated",
|
|
|
|
CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled",
|
|
|
|
CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled",
|
|
|
|
CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)");
|
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
static int
|
2011-12-16 02:09:51 +00:00
|
|
|
alloc_fwq(struct adapter *sc)
|
2011-05-30 21:34:44 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
int rc, intr_idx;
|
|
|
|
struct sge_iq *fwq = &sc->sge.fwq;
|
|
|
|
struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
|
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
2011-05-30 21:34:44 +00:00
|
|
|
|
2014-08-02 00:56:34 +00:00
|
|
|
init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (sc->flags & IS_VF)
|
|
|
|
intr_idx = 0;
|
2018-04-30 15:18:38 +00:00
|
|
|
else
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
intr_idx = sc->intr_count > 1 ? 1 : 0;
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1);
|
2011-12-16 02:09:51 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
|
|
|
"failed to create firmware event queue: %d\n", rc);
|
2011-05-30 21:34:44 +00:00
|
|
|
return (rc);
|
2011-12-16 02:09:51 +00:00
|
|
|
}
|
2011-05-30 21:34:44 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD,
|
|
|
|
NULL, "firmware event queue");
|
2017-12-29 02:30:21 +00:00
|
|
|
add_iq_sysctls(&sc->ctx, oid, fwq);
|
2011-05-30 21:34:44 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
return (0);
|
2011-05-30 21:34:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-12-16 02:09:51 +00:00
|
|
|
free_fwq(struct adapter *sc)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
return free_iq_fl(NULL, &sc->sge.fwq, NULL);
|
2011-05-30 21:34:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-12-16 02:09:51 +00:00
|
|
|
alloc_mgmtq(struct adapter *sc)
|
2011-05-30 21:34:44 +00:00
|
|
|
{
|
|
|
|
int rc;
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sge_wrq *mgmtq = &sc->sge.mgmtq;
|
|
|
|
char name[16];
|
|
|
|
struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
|
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
2011-05-30 21:34:44 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
|
|
|
|
NULL, "management queue");
|
2011-05-30 21:34:44 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
|
2016-03-08 00:23:56 +00:00
|
|
|
init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
|
2011-12-16 02:09:51 +00:00
|
|
|
sc->sge.fwq.cntxt_id, name);
|
|
|
|
rc = alloc_wrq(sc, NULL, mgmtq, oid);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
|
|
|
"failed to create management queue: %d\n", rc);
|
|
|
|
return (rc);
|
|
|
|
}
|
2011-05-30 21:34:44 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
return (0);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-12-16 02:09:51 +00:00
|
|
|
free_mgmtq(struct adapter *sc)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2012-06-19 07:34:13 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
return free_wrq(sc, &sc->sge.mgmtq);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2015-02-24 18:40:10 +00:00
|
|
|
int
|
2015-07-06 20:56:59 +00:00
|
|
|
tnl_cong(struct port_info *pi, int drop)
|
2012-08-14 21:47:41 +00:00
|
|
|
{
|
|
|
|
|
2015-07-06 20:56:59 +00:00
|
|
|
if (drop == -1)
|
2012-08-14 21:47:41 +00:00
|
|
|
return (-1);
|
2015-07-06 20:56:59 +00:00
|
|
|
else if (drop == 1)
|
2012-08-14 21:47:41 +00:00
|
|
|
return (0);
|
|
|
|
else
|
2017-10-24 05:41:48 +00:00
|
|
|
return (pi->rx_e_chan_map);
|
2012-08-14 21:47:41 +00:00
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid *oid)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
int rc;
|
2016-08-09 17:49:42 +00:00
|
|
|
struct adapter *sc = vi->pi->adapter;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct sysctl_oid_list *children;
|
|
|
|
char name[16];
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx,
|
|
|
|
tnl_cong(vi->pi, cong_drop));
|
2011-02-18 08:00:26 +00:00
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
|
|
|
|
2016-08-09 17:49:42 +00:00
|
|
|
if (idx == 0)
|
|
|
|
sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id;
|
|
|
|
else
|
|
|
|
KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id,
|
|
|
|
("iq_base mismatch"));
|
|
|
|
KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF,
|
|
|
|
("PF with non-zero iq_base"));
|
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
/*
|
|
|
|
* The freelist is just barely above the starvation threshold right now,
|
|
|
|
* fill it up a bit more.
|
|
|
|
*/
|
2011-06-04 23:31:33 +00:00
|
|
|
FL_LOCK(&rxq->fl);
|
2016-08-09 17:49:42 +00:00
|
|
|
refill_fl(sc, &rxq->fl, 128);
|
2011-06-04 23:31:33 +00:00
|
|
|
FL_UNLOCK(&rxq->fl);
|
|
|
|
|
2012-06-29 19:51:06 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2017-04-17 09:00:20 +00:00
|
|
|
rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs);
|
2011-02-18 08:00:26 +00:00
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
2017-04-17 09:00:20 +00:00
|
|
|
MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
if (vi->ifp->if_capenable & IFCAP_LRO)
|
2011-12-16 02:09:51 +00:00
|
|
|
rxq->iq.flags |= IQ_LRO_ENABLED;
|
2011-02-18 08:00:26 +00:00
|
|
|
#endif
|
2015-12-03 00:02:01 +00:00
|
|
|
rxq->ifp = vi->ifp;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
children = SYSCTL_CHILDREN(oid);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%d", idx);
|
2015-12-03 00:02:01 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
NULL, "rx queue");
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
2017-12-29 02:30:21 +00:00
|
|
|
add_iq_sysctls(&vi->ctx, oid, &rxq->iq);
|
2012-06-29 19:51:06 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2016-01-19 15:33:28 +00:00
|
|
|
SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
&rxq->lro.lro_queued, 0, NULL);
|
2016-01-19 15:33:28 +00:00
|
|
|
SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
&rxq->lro.lro_flushed, 0, NULL);
|
2011-03-05 03:42:03 +00:00
|
|
|
#endif
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
&rxq->rxcsum, "# of times hardware assisted with checksum");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction",
|
2011-02-18 08:00:26 +00:00
|
|
|
CTLFLAG_RD, &rxq->vlan_extraction,
|
|
|
|
"# of times hardware extracted 802.1Q tag");
|
|
|
|
|
2016-09-23 20:03:28 +00:00
|
|
|
add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl);
|
2011-06-11 04:50:54 +00:00
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
free_rxq(struct vi_info *vi, struct sge_rxq *rxq)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
int rc;
|
|
|
|
|
2012-06-29 19:51:06 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2011-12-16 02:09:51 +00:00
|
|
|
if (rxq->lro.ifp) {
|
|
|
|
tcp_lro_free(&rxq->lro);
|
|
|
|
rxq->lro.ifp = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = free_iq_fl(vi, &rxq->iq, &rxq->fl);
|
2011-12-16 02:09:51 +00:00
|
|
|
if (rc == 0)
|
|
|
|
bzero(rxq, sizeof(*rxq));
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2011-12-16 02:09:51 +00:00
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq,
|
2011-12-16 02:09:51 +00:00
|
|
|
int intr_idx, int idx, struct sysctl_oid *oid)
|
|
|
|
{
|
2016-09-23 20:03:28 +00:00
|
|
|
struct port_info *pi = vi->pi;
|
2011-12-16 02:09:51 +00:00
|
|
|
int rc;
|
|
|
|
struct sysctl_oid_list *children;
|
|
|
|
char name[16];
|
|
|
|
|
2017-10-24 05:41:48 +00:00
|
|
|
rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0);
|
2011-12-16 02:09:51 +00:00
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
|
|
|
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%d", idx);
|
2015-12-03 00:02:01 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
|
2011-12-16 02:09:51 +00:00
|
|
|
NULL, "rx queue");
|
2017-12-29 02:30:21 +00:00
|
|
|
add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq);
|
2016-09-23 20:03:28 +00:00
|
|
|
add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
|
2011-12-16 02:09:51 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl);
|
2011-12-16 02:09:51 +00:00
|
|
|
if (rc == 0)
|
|
|
|
bzero(ofld_rxq, sizeof(*ofld_rxq));
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-05-27 18:18:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
|
2014-05-27 18:18:41 +00:00
|
|
|
int idx, struct sysctl_oid *oid)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct sysctl_oid_list *children;
|
|
|
|
struct sysctl_ctx_list *ctx;
|
|
|
|
char name[16];
|
|
|
|
size_t len;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct adapter *sc = vi->pi->adapter;
|
|
|
|
struct netmap_adapter *na = NA(vi->ifp);
|
2014-05-27 18:18:41 +00:00
|
|
|
|
|
|
|
MPASS(na != NULL);
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
len = vi->qsize_rxq * IQ_ESIZE;
|
2014-05-27 18:18:41 +00:00
|
|
|
rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
|
|
|
|
&nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
|
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
|
|
|
|
2016-03-08 00:23:56 +00:00
|
|
|
len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len;
|
2014-05-27 18:18:41 +00:00
|
|
|
rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
|
|
|
|
&nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
|
|
|
|
if (rc != 0)
|
|
|
|
return (rc);
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
nm_rxq->vi = vi;
|
2014-05-27 18:18:41 +00:00
|
|
|
nm_rxq->nid = idx;
|
|
|
|
nm_rxq->iq_cidx = 0;
|
2016-03-08 00:23:56 +00:00
|
|
|
nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
|
2014-05-27 18:18:41 +00:00
|
|
|
nm_rxq->iq_gen = F_RSPD_GEN;
|
|
|
|
nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
|
|
|
|
nm_rxq->fl_sidx = na->num_rx_desc;
|
|
|
|
nm_rxq->intr_idx = intr_idx;
|
2017-06-15 19:56:59 +00:00
|
|
|
nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
|
2014-05-27 18:18:41 +00:00
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
ctx = &vi->ctx;
|
2014-05-27 18:18:41 +00:00
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%d", idx);
|
|
|
|
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL,
|
|
|
|
"rx queue");
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16,
|
|
|
|
"I", "absolute id of the queue");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16,
|
|
|
|
"I", "SGE context id of the queue");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I",
|
|
|
|
"consumer index");
|
|
|
|
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL,
|
|
|
|
"freelist");
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16,
|
|
|
|
"I", "SGE context id of the freelist");
|
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
|
|
|
|
&nm_rxq->fl_cidx, 0, "consumer index");
|
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
|
|
|
|
&nm_rxq->fl_pidx, 0, "producer index");
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
|
2014-05-27 18:18:41 +00:00
|
|
|
{
|
2015-12-03 00:02:01 +00:00
|
|
|
struct adapter *sc = vi->pi->adapter;
|
2014-05-27 18:18:41 +00:00
|
|
|
|
2017-08-28 03:25:41 +00:00
|
|
|
if (vi->flags & VI_INIT_DONE)
|
|
|
|
MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID);
|
|
|
|
else
|
|
|
|
MPASS(nm_rxq->iq_cntxt_id == 0);
|
2017-06-15 19:56:59 +00:00
|
|
|
|
2014-05-27 18:18:41 +00:00
|
|
|
free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
|
|
|
|
nm_rxq->iq_desc);
|
|
|
|
free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba,
|
|
|
|
nm_rxq->fl_desc);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
|
2014-05-27 18:18:41 +00:00
|
|
|
struct sysctl_oid *oid)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
size_t len;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct port_info *pi = vi->pi;
|
2014-05-27 18:18:41 +00:00
|
|
|
struct adapter *sc = pi->adapter;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct netmap_adapter *na = NA(vi->ifp);
|
2014-05-27 18:18:41 +00:00
|
|
|
char name[16];
|
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
2016-03-08 00:23:56 +00:00
|
|
|
len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
|
2014-05-27 18:18:41 +00:00
|
|
|
rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
|
|
|
|
&nm_txq->ba, (void **)&nm_txq->desc);
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
|
|
|
|
|
|
|
nm_txq->pidx = nm_txq->cidx = 0;
|
|
|
|
nm_txq->sidx = na->num_tx_desc;
|
|
|
|
nm_txq->nid = idx;
|
|
|
|
nm_txq->iqidx = iqidx;
|
|
|
|
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
|
2016-09-15 08:30:47 +00:00
|
|
|
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
|
|
|
|
V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
|
|
|
|
V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
|
2017-06-15 19:56:59 +00:00
|
|
|
nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
|
2014-05-27 18:18:41 +00:00
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%d", idx);
|
2015-12-03 00:02:01 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
|
2014-05-27 18:18:41 +00:00
|
|
|
NULL, "netmap tx queue");
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
|
2014-05-27 18:18:41 +00:00
|
|
|
&nm_txq->cntxt_id, 0, "SGE context id of the queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
|
2014-05-27 18:18:41 +00:00
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I",
|
|
|
|
"consumer index");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
|
2014-05-27 18:18:41 +00:00
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I",
|
|
|
|
"producer index");
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
|
2014-05-27 18:18:41 +00:00
|
|
|
{
|
2015-12-03 00:02:01 +00:00
|
|
|
struct adapter *sc = vi->pi->adapter;
|
2014-05-27 18:18:41 +00:00
|
|
|
|
2017-08-28 03:25:41 +00:00
|
|
|
if (vi->flags & VI_INIT_DONE)
|
|
|
|
MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID);
|
|
|
|
else
|
|
|
|
MPASS(nm_txq->cntxt_id == 0);
|
2017-06-15 19:56:59 +00:00
|
|
|
|
2014-05-27 18:18:41 +00:00
|
|
|
free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
|
|
|
|
nm_txq->desc);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
static int
|
|
|
|
ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
|
|
|
|
{
|
|
|
|
int rc, cntxt_id;
|
|
|
|
struct fw_eq_ctrl_cmd c;
|
2016-03-08 00:23:56 +00:00
|
|
|
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
bzero(&c, sizeof(c));
|
|
|
|
|
|
|
|
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
|
|
|
|
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
|
|
|
|
V_FW_EQ_CTRL_CMD_VFN(0));
|
|
|
|
c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
|
|
|
|
F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
|
2014-12-31 23:19:16 +00:00
|
|
|
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
|
2011-12-16 02:09:51 +00:00
|
|
|
c.physeqid_pkd = htobe32(0);
|
|
|
|
c.fetchszm_to_iqid =
|
2017-01-09 22:20:09 +00:00
|
|
|
htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
|
2011-12-16 02:09:51 +00:00
|
|
|
V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
|
|
|
|
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
|
|
|
|
c.dcaen_to_eqsize =
|
|
|
|
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
|
|
|
|
V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
|
2017-01-09 22:20:09 +00:00
|
|
|
V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
|
2014-12-31 23:19:16 +00:00
|
|
|
V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
|
2011-12-16 02:09:51 +00:00
|
|
|
c.eqaddr = htobe64(eq->ba);
|
|
|
|
|
|
|
|
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
|
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
|
|
|
"failed to create control queue %d: %d\n", eq->tx_chan, rc);
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
eq->flags |= EQ_ALLOCATED;
|
|
|
|
|
|
|
|
eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
|
|
|
|
cntxt_id = eq->cntxt_id - sc->sge.eq_start;
|
|
|
|
if (cntxt_id >= sc->sge.neq)
|
|
|
|
panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
|
|
|
|
cntxt_id, sc->sge.neq - 1);
|
|
|
|
sc->sge.eqmap[cntxt_id] = eq;
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
|
2011-12-16 02:09:51 +00:00
|
|
|
{
|
|
|
|
int rc, cntxt_id;
|
|
|
|
struct fw_eq_eth_cmd c;
|
2016-03-08 00:23:56 +00:00
|
|
|
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
bzero(&c, sizeof(c));
|
|
|
|
|
|
|
|
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
|
|
|
|
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
|
|
|
|
V_FW_EQ_ETH_CMD_VFN(0));
|
|
|
|
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
|
|
|
|
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
|
2014-12-31 23:19:16 +00:00
|
|
|
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
|
2015-12-03 00:02:01 +00:00
|
|
|
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
|
2011-12-16 02:09:51 +00:00
|
|
|
c.fetchszm_to_iqid =
|
2014-12-31 23:19:16 +00:00
|
|
|
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
|
2011-12-16 02:09:51 +00:00
|
|
|
V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
|
|
|
|
V_FW_EQ_ETH_CMD_IQID(eq->iqid));
|
|
|
|
c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
|
2014-12-31 23:19:16 +00:00
|
|
|
V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
|
|
|
|
V_FW_EQ_ETH_CMD_EQSIZE(qsize));
|
2011-12-16 02:09:51 +00:00
|
|
|
c.eqaddr = htobe64(eq->ba);
|
|
|
|
|
|
|
|
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
|
|
|
|
if (rc != 0) {
|
2015-12-03 00:02:01 +00:00
|
|
|
device_printf(vi->dev,
|
2011-12-16 02:09:51 +00:00
|
|
|
"failed to create Ethernet egress queue: %d\n", rc);
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
eq->flags |= EQ_ALLOCATED;
|
|
|
|
|
|
|
|
eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
|
2016-08-09 17:49:42 +00:00
|
|
|
eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
|
2011-12-16 02:09:51 +00:00
|
|
|
cntxt_id = eq->cntxt_id - sc->sge.eq_start;
|
|
|
|
if (cntxt_id >= sc->sge.neq)
|
|
|
|
panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
|
|
|
|
cntxt_id, sc->sge.neq - 1);
|
|
|
|
sc->sge.eqmap[cntxt_id] = eq;
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2011-12-16 02:09:51 +00:00
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
|
2011-12-16 02:09:51 +00:00
|
|
|
{
|
|
|
|
int rc, cntxt_id;
|
|
|
|
struct fw_eq_ofld_cmd c;
|
2016-03-08 00:23:56 +00:00
|
|
|
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
bzero(&c, sizeof(c));
|
|
|
|
|
|
|
|
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
|
|
|
|
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
|
|
|
|
V_FW_EQ_OFLD_CMD_VFN(0));
|
|
|
|
c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
|
|
|
|
F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
|
|
|
|
c.fetchszm_to_iqid =
|
2014-12-31 23:19:16 +00:00
|
|
|
htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
|
2011-12-16 02:09:51 +00:00
|
|
|
V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
|
|
|
|
F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
|
|
|
|
c.dcaen_to_eqsize =
|
|
|
|
htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
|
|
|
|
V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
|
2014-12-31 23:19:16 +00:00
|
|
|
V_FW_EQ_OFLD_CMD_EQSIZE(qsize));
|
2011-12-16 02:09:51 +00:00
|
|
|
c.eqaddr = htobe64(eq->ba);
|
|
|
|
|
|
|
|
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
|
|
|
|
if (rc != 0) {
|
2015-12-03 00:02:01 +00:00
|
|
|
device_printf(vi->dev,
|
2011-12-16 02:09:51 +00:00
|
|
|
"failed to create egress queue for TCP offload: %d\n", rc);
|
|
|
|
return (rc);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
eq->flags |= EQ_ALLOCATED;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
|
|
|
|
cntxt_id = eq->cntxt_id - sc->sge.eq_start;
|
|
|
|
if (cntxt_id >= sc->sge.neq)
|
|
|
|
panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
|
|
|
|
cntxt_id, sc->sge.neq - 1);
|
|
|
|
sc->sge.eqmap[cntxt_id] = eq;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
#endif
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-04-19 22:08:28 +00:00
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
|
2011-04-19 22:08:28 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
int rc, qsize;
|
2011-04-19 22:08:28 +00:00
|
|
|
size_t len;
|
|
|
|
|
|
|
|
mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
|
|
|
|
|
2016-03-08 00:23:56 +00:00
|
|
|
qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
|
2014-12-31 23:19:16 +00:00
|
|
|
len = qsize * EQ_ESIZE;
|
2011-04-19 22:08:28 +00:00
|
|
|
rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
|
|
|
|
&eq->ba, (void **)&eq->desc);
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
eq->pidx = eq->cidx = 0;
|
2014-12-31 23:19:16 +00:00
|
|
|
eq->equeqidx = eq->dbidx = 0;
|
2013-03-30 02:26:20 +00:00
|
|
|
eq->doorbells = sc->doorbells;
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
switch (eq->flags & EQ_TYPEMASK) {
|
|
|
|
case EQ_CTRL:
|
|
|
|
rc = ctrl_eq_alloc(sc, eq);
|
|
|
|
break;
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
case EQ_ETH:
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = eth_eq_alloc(sc, vi, eq);
|
2011-12-16 02:09:51 +00:00
|
|
|
break;
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2011-12-16 02:09:51 +00:00
|
|
|
case EQ_OFLD:
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = ofld_eq_alloc(sc, vi, eq);
|
2011-12-16 02:09:51 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("%s: invalid eq type %d.", __func__,
|
|
|
|
eq->flags & EQ_TYPEMASK);
|
|
|
|
}
|
2011-04-19 22:08:28 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
2014-07-22 19:48:21 +00:00
|
|
|
"failed to allocate egress queue(%d): %d\n",
|
2011-12-16 02:09:51 +00:00
|
|
|
eq->flags & EQ_TYPEMASK, rc);
|
2011-04-19 22:08:28 +00:00
|
|
|
}
|
|
|
|
|
2013-03-30 02:26:20 +00:00
|
|
|
if (isset(&eq->doorbells, DOORBELL_UDB) ||
|
|
|
|
isset(&eq->doorbells, DOORBELL_UDBWC) ||
|
2013-04-11 22:49:29 +00:00
|
|
|
isset(&eq->doorbells, DOORBELL_WCWR)) {
|
2016-03-08 00:23:56 +00:00
|
|
|
uint32_t s_qpp = sc->params.sge.eq_s_qpp;
|
2013-03-30 02:26:20 +00:00
|
|
|
uint32_t mask = (1 << s_qpp) - 1;
|
|
|
|
volatile uint8_t *udb;
|
|
|
|
|
|
|
|
udb = sc->udbs_base + UDBS_DB_OFFSET;
|
|
|
|
udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */
|
|
|
|
eq->udb_qid = eq->cntxt_id & mask; /* id in page */
|
2014-08-02 01:48:25 +00:00
|
|
|
if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
|
2013-04-11 22:49:29 +00:00
|
|
|
clrbit(&eq->doorbells, DOORBELL_WCWR);
|
2013-03-30 02:26:20 +00:00
|
|
|
else {
|
|
|
|
udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */
|
|
|
|
eq->udb_qid = 0;
|
|
|
|
}
|
|
|
|
eq->udb = (volatile void *)udb;
|
|
|
|
}
|
|
|
|
|
2011-04-19 22:08:28 +00:00
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-12-16 02:09:51 +00:00
|
|
|
free_eq(struct adapter *sc, struct sge_eq *eq)
|
2011-04-19 22:08:28 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
if (eq->flags & EQ_ALLOCATED) {
|
|
|
|
switch (eq->flags & EQ_TYPEMASK) {
|
|
|
|
case EQ_CTRL:
|
|
|
|
rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
|
|
|
|
eq->cntxt_id);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EQ_ETH:
|
|
|
|
rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
|
|
|
|
eq->cntxt_id);
|
|
|
|
break;
|
|
|
|
|
2012-06-19 07:34:13 +00:00
|
|
|
#ifdef TCP_OFFLOAD
|
2011-12-16 02:09:51 +00:00
|
|
|
case EQ_OFLD:
|
|
|
|
rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
|
|
|
|
eq->cntxt_id);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("%s: invalid eq type %d.", __func__,
|
|
|
|
eq->flags & EQ_TYPEMASK);
|
|
|
|
}
|
2011-04-19 22:08:28 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
device_printf(sc->dev,
|
2011-12-16 02:09:51 +00:00
|
|
|
"failed to free egress queue (%d): %d\n",
|
|
|
|
eq->flags & EQ_TYPEMASK, rc);
|
2011-04-19 22:08:28 +00:00
|
|
|
return (rc);
|
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
eq->flags &= ~EQ_ALLOCATED;
|
2011-04-19 22:08:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
|
|
|
|
|
|
|
|
if (mtx_initialized(&eq->eq_lock))
|
|
|
|
mtx_destroy(&eq->eq_lock);
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
bzero(eq, sizeof(*eq));
|
2011-04-19 22:08:28 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid *oid)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
int rc;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx;
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = alloc_eq(sc, vi, &wrq->eq);
|
2011-12-16 02:09:51 +00:00
|
|
|
if (rc)
|
|
|
|
return (rc);
|
|
|
|
|
|
|
|
wrq->adapter = sc;
|
2014-12-31 23:19:16 +00:00
|
|
|
TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq);
|
|
|
|
TAILQ_INIT(&wrq->incomplete_wrs);
|
2012-06-19 07:34:13 +00:00
|
|
|
STAILQ_INIT(&wrq->wr_list);
|
2014-12-31 23:19:16 +00:00
|
|
|
wrq->nwr_pending = 0;
|
|
|
|
wrq->ndesc_needed = 0;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2016-09-23 20:03:28 +00:00
|
|
|
SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
|
|
|
|
&wrq->eq.ba, "bus address of descriptor ring");
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
|
|
|
|
wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len,
|
|
|
|
"desc ring size in bytes");
|
2011-12-16 02:09:51 +00:00
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
|
|
|
|
&wrq->eq.cntxt_id, 0, "SGE context id of the queue");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I",
|
|
|
|
"consumer index");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I",
|
|
|
|
"producer index");
|
2016-09-23 20:03:28 +00:00
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
|
|
|
|
wrq->eq.sidx, "status page index");
|
2014-12-31 23:19:16 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD,
|
|
|
|
&wrq->tx_wrs_direct, "# of work requests (direct)");
|
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD,
|
|
|
|
&wrq->tx_wrs_copied, "# of work requests (copied)");
|
2016-09-19 17:16:51 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD,
|
|
|
|
&wrq->tx_wrs_ss, "# of work requests (copied from scratch space)");
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
free_wrq(struct adapter *sc, struct sge_wrq *wrq)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = free_eq(sc, &wrq->eq);
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
|
|
|
|
|
|
|
bzero(wrq, sizeof(*wrq));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid *oid)
|
|
|
|
{
|
|
|
|
int rc;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct port_info *pi = vi->pi;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct adapter *sc = pi->adapter;
|
|
|
|
struct sge_eq *eq = &txq->eq;
|
|
|
|
char name[16];
|
2011-12-16 02:09:51 +00:00
|
|
|
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
2011-03-05 03:06:38 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx,
|
|
|
|
M_CXGBE, M_WAITOK);
|
2011-02-18 08:00:26 +00:00
|
|
|
if (rc != 0) {
|
2014-12-31 23:19:16 +00:00
|
|
|
device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc);
|
2011-02-18 08:00:26 +00:00
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
rc = alloc_eq(sc, vi, eq);
|
2011-02-18 08:00:26 +00:00
|
|
|
if (rc != 0) {
|
2014-12-31 23:19:16 +00:00
|
|
|
mp_ring_free(txq->r);
|
|
|
|
txq->r = NULL;
|
2011-02-18 08:00:26 +00:00
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/* Can't fail after this point. */
|
|
|
|
|
2016-08-09 17:49:42 +00:00
|
|
|
if (idx == 0)
|
|
|
|
sc->sge.eq_base = eq->abs_id - eq->cntxt_id;
|
|
|
|
else
|
|
|
|
KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id,
|
|
|
|
("eq_base mismatch"));
|
|
|
|
KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF,
|
|
|
|
("PF with non-zero eq_base"));
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
|
2015-12-03 00:02:01 +00:00
|
|
|
txq->ifp = vi->ifp;
|
2014-12-31 23:19:16 +00:00
|
|
|
txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
if (sc->flags & IS_VF)
|
|
|
|
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
|
|
|
|
V_TXPKT_INTF(pi->tx_chan));
|
|
|
|
else
|
|
|
|
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
|
2016-09-15 08:30:47 +00:00
|
|
|
V_TXPKT_INTF(pi->tx_chan) |
|
|
|
|
V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
|
|
|
|
V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
|
|
|
|
V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
|
2016-06-08 14:15:29 +00:00
|
|
|
txq->tc_idx = -1;
|
2014-12-31 23:19:16 +00:00
|
|
|
txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
|
|
|
|
M_ZERO | M_WAITOK);
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
snprintf(name, sizeof(name), "%d", idx);
|
2015-12-03 00:02:01 +00:00
|
|
|
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
NULL, "tx queue");
|
|
|
|
children = SYSCTL_CHILDREN(oid);
|
|
|
|
|
2016-09-23 20:03:28 +00:00
|
|
|
SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
|
|
|
|
&eq->ba, "bus address of descriptor ring");
|
|
|
|
SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
|
|
|
|
eq->sidx * EQ_ESIZE + sc->params.sge.spg_len,
|
|
|
|
"desc ring size in bytes");
|
2016-08-09 17:49:42 +00:00
|
|
|
SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
|
|
|
|
&eq->abs_id, 0, "absolute id of the queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
|
2011-06-11 04:50:54 +00:00
|
|
|
&eq->cntxt_id, 0, "SGE context id of the queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
|
2011-06-11 04:50:54 +00:00
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
|
|
|
|
"consumer index");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
|
2011-06-11 04:50:54 +00:00
|
|
|
CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
|
|
|
|
"producer index");
|
2016-09-23 20:03:28 +00:00
|
|
|
SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
|
|
|
|
eq->sidx, "status page index");
|
2011-06-11 04:50:54 +00:00
|
|
|
|
2016-06-08 14:15:29 +00:00
|
|
|
SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I",
|
|
|
|
"traffic class (-1 means none)");
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
&txq->txcsum, "# of times hardware assisted with checksum");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion",
|
2011-02-18 08:00:26 +00:00
|
|
|
CTLFLAG_RD, &txq->vlan_insertion,
|
|
|
|
"# of times hardware inserted 802.1Q tag");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
|
2012-06-29 19:51:06 +00:00
|
|
|
&txq->tso_wrs, "# of TSO work requests");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
&txq->imm_wrs, "# of work requests with immediate data");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
&txq->sgl_wrs, "# of work requests with direct SGL");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
|
2011-02-18 08:00:26 +00:00
|
|
|
&txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->txpkts0_wrs,
|
|
|
|
"# of txpkts (type 0) work requests");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->txpkts1_wrs,
|
|
|
|
"# of txpkts (type 1) work requests");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->txpkts0_pkts,
|
|
|
|
"# of frames tx'd using type0 txpkts work requests");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->txpkts1_pkts,
|
|
|
|
"# of frames tx'd using type1 txpkts work requests");
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->r->enqueues,
|
|
|
|
"# of enqueues to the mp_ring for this queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->r->drops,
|
|
|
|
"# of drops in the mp_ring for this queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->r->starts,
|
|
|
|
"# of normal consumer starts in the mp_ring for this queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->r->stalls,
|
|
|
|
"# of consumer stalls in the mp_ring for this queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->r->restarts,
|
|
|
|
"# of consumer restarts in the mp_ring for this queue");
|
2015-12-03 00:02:01 +00:00
|
|
|
SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications",
|
2014-12-31 23:19:16 +00:00
|
|
|
CTLFLAG_RD, &txq->r->abdications,
|
|
|
|
"# of consumer abdications in the mp_ring for this queue");
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (0);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-12-03 00:02:01 +00:00
|
|
|
free_txq(struct vi_info *vi, struct sge_txq *txq)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
int rc;
|
2015-12-03 00:02:01 +00:00
|
|
|
struct adapter *sc = vi->pi->adapter;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct sge_eq *eq = &txq->eq;
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
rc = free_eq(sc, eq);
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
sglist_free(txq->gl);
|
2011-04-19 22:08:28 +00:00
|
|
|
free(txq->sdesc, M_CXGBE);
|
2014-12-31 23:19:16 +00:00
|
|
|
mp_ring_free(txq->r);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
bzero(txq, sizeof(*txq));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
|
|
|
|
{
|
|
|
|
bus_addr_t *ba = arg;
|
|
|
|
|
|
|
|
KASSERT(nseg == 1,
|
|
|
|
("%s meant for single segment mappings only.", __func__));
|
|
|
|
|
|
|
|
*ba = error ? 0 : segs->ds_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
ring_fl_db(struct adapter *sc, struct sge_fl *fl)
|
|
|
|
{
|
2014-08-02 06:55:36 +00:00
|
|
|
uint32_t n, v;
|
2011-04-20 23:20:00 +00:00
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx);
|
|
|
|
MPASS(n > 0);
|
2013-03-30 02:26:20 +00:00
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
wmb();
|
2014-08-02 06:55:36 +00:00
|
|
|
v = fl->dbval | V_PIDX(n);
|
|
|
|
if (fl->udb)
|
|
|
|
*fl->udb = htole32(v);
|
|
|
|
else
|
2016-08-01 22:39:51 +00:00
|
|
|
t4_write_reg(sc, sc->sge_kdoorbell_reg, v);
|
2014-08-02 06:55:36 +00:00
|
|
|
IDXINCR(fl->dbidx, n, fl->sidx);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2011-04-20 23:20:00 +00:00
|
|
|
/*
|
2016-05-03 03:41:25 +00:00
|
|
|
* Fills up the freelist by allocating up to 'n' buffers. Buffers that are
|
2014-08-02 06:55:36 +00:00
|
|
|
* recycled do not count towards this allocation budget.
|
2011-12-16 02:09:51 +00:00
|
|
|
*
|
2014-08-02 06:55:36 +00:00
|
|
|
* Returns non-zero to indicate that this freelist should be added to the list
|
|
|
|
* of starving freelists.
|
2011-04-20 23:20:00 +00:00
|
|
|
*/
|
2011-12-16 02:09:51 +00:00
|
|
|
static int
|
2014-08-02 06:55:36 +00:00
|
|
|
refill_fl(struct adapter *sc, struct sge_fl *fl, int n)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-08-02 06:55:36 +00:00
|
|
|
__be64 *d;
|
|
|
|
struct fl_sdesc *sd;
|
2014-03-18 20:14:13 +00:00
|
|
|
uintptr_t pa;
|
2011-02-18 08:00:26 +00:00
|
|
|
caddr_t cl;
|
2014-08-02 06:55:36 +00:00
|
|
|
struct cluster_layout *cll;
|
|
|
|
struct sw_zone_info *swz;
|
2014-03-18 20:14:13 +00:00
|
|
|
struct cluster_metadata *clm;
|
2014-08-02 06:55:36 +00:00
|
|
|
uint16_t max_pidx;
|
|
|
|
uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
FL_LOCK_ASSERT_OWNED(fl);
|
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
/*
|
2016-05-03 03:41:25 +00:00
|
|
|
* We always stop at the beginning of the hardware descriptor that's just
|
2014-08-02 06:55:36 +00:00
|
|
|
* before the one with the hw cidx. This is to avoid hw pidx = hw cidx,
|
|
|
|
* which would mean an empty freelist to the chip.
|
|
|
|
*/
|
|
|
|
max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1;
|
|
|
|
if (fl->pidx == max_pidx * 8)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
d = &fl->desc[fl->pidx];
|
|
|
|
sd = &fl->sdesc[fl->pidx];
|
|
|
|
cll = &fl->cll_def; /* default layout */
|
|
|
|
swz = &sc->sge.sw_zone_info[cll->zidx];
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
while (n > 0) {
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
if (sd->cl != NULL) {
|
|
|
|
|
2014-07-22 02:02:39 +00:00
|
|
|
if (sd->nmbuf == 0) {
|
2013-08-30 01:45:36 +00:00
|
|
|
/*
|
2014-03-18 20:14:13 +00:00
|
|
|
* Fast recycle without involving any atomics on
|
|
|
|
* the cluster's metadata (if the cluster has
|
|
|
|
* metadata). This happens when all frames
|
|
|
|
* received in the cluster were small enough to
|
|
|
|
* fit within a single mbuf each.
|
2013-08-30 01:45:36 +00:00
|
|
|
*/
|
2014-03-18 20:14:13 +00:00
|
|
|
fl->cl_fast_recycled++;
|
2014-06-18 00:16:35 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
clm = cl_metadata(sc, fl, &sd->cll, sd->cl);
|
|
|
|
if (clm != NULL)
|
|
|
|
MPASS(clm->refcount == 1);
|
|
|
|
#endif
|
2014-03-18 20:14:13 +00:00
|
|
|
goto recycled_fast;
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
/*
|
2014-03-18 20:14:13 +00:00
|
|
|
* Cluster is guaranteed to have metadata. Clusters
|
|
|
|
* without metadata always take the fast recycle path
|
|
|
|
* when they're recycled.
|
2011-02-18 08:00:26 +00:00
|
|
|
*/
|
2014-03-18 20:14:13 +00:00
|
|
|
clm = cl_metadata(sc, fl, &sd->cll, sd->cl);
|
|
|
|
MPASS(clm != NULL);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
|
|
|
|
fl->cl_recycled++;
|
2014-07-23 22:29:22 +00:00
|
|
|
counter_u64_add(extfree_rels, 1);
|
2014-03-18 20:14:13 +00:00
|
|
|
goto recycled;
|
|
|
|
}
|
|
|
|
sd->cl = NULL; /* gave up my reference */
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
MPASS(sd->cl == NULL);
|
|
|
|
alloc:
|
|
|
|
cl = uma_zalloc(swz->zone, M_NOWAIT);
|
|
|
|
if (__predict_false(cl == NULL)) {
|
|
|
|
if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 ||
|
|
|
|
fl->cll_def.zidx == fl->cll_alt.zidx)
|
|
|
|
break;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/* fall back to the safe zone */
|
|
|
|
cll = &fl->cll_alt;
|
|
|
|
swz = &sc->sge.sw_zone_info[cll->zidx];
|
|
|
|
goto alloc;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
fl->cl_allocated++;
|
2014-08-02 06:55:36 +00:00
|
|
|
n--;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
pa = pmap_kextract((vm_offset_t)cl);
|
|
|
|
pa += cll->region1;
|
2011-02-18 08:00:26 +00:00
|
|
|
sd->cl = cl;
|
2014-03-18 20:14:13 +00:00
|
|
|
sd->cll = *cll;
|
|
|
|
*d = htobe64(pa | cll->hwidx);
|
|
|
|
clm = cl_metadata(sc, fl, cll, cl);
|
|
|
|
if (clm != NULL) {
|
|
|
|
recycled:
|
2011-02-18 08:00:26 +00:00
|
|
|
#ifdef INVARIANTS
|
2014-03-18 20:14:13 +00:00
|
|
|
clm->sd = sd;
|
2011-02-18 08:00:26 +00:00
|
|
|
#endif
|
2014-03-18 20:14:13 +00:00
|
|
|
clm->refcount = 1;
|
|
|
|
}
|
2014-07-22 02:02:39 +00:00
|
|
|
sd->nmbuf = 0;
|
2014-03-18 20:14:13 +00:00
|
|
|
recycled_fast:
|
|
|
|
d++;
|
2011-02-18 08:00:26 +00:00
|
|
|
sd++;
|
2014-08-02 06:55:36 +00:00
|
|
|
if (__predict_false(++fl->pidx % 8 == 0)) {
|
|
|
|
uint16_t pidx = fl->pidx / 8;
|
|
|
|
|
|
|
|
if (__predict_false(pidx == fl->sidx)) {
|
|
|
|
fl->pidx = 0;
|
|
|
|
pidx = 0;
|
|
|
|
sd = fl->sdesc;
|
|
|
|
d = fl->desc;
|
|
|
|
}
|
|
|
|
if (pidx == max_pidx)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4)
|
|
|
|
ring_fl_db(sc, fl);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
}
|
2011-04-20 23:20:00 +00:00
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
if (fl->pidx / 8 != fl->dbidx)
|
2011-04-20 23:20:00 +00:00
|
|
|
ring_fl_db(sc, fl);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
|
|
|
return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to refill all starving freelists.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
refill_sfl(void *arg)
|
|
|
|
{
|
|
|
|
struct adapter *sc = arg;
|
|
|
|
struct sge_fl *fl, *fl_temp;
|
|
|
|
|
2015-12-03 00:02:01 +00:00
|
|
|
mtx_assert(&sc->sfl_lock, MA_OWNED);
|
2011-12-16 02:09:51 +00:00
|
|
|
TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
|
|
|
|
FL_LOCK(fl);
|
|
|
|
refill_fl(sc, fl, 64);
|
|
|
|
if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
|
|
|
|
TAILQ_REMOVE(&sc->sfl, fl, link);
|
|
|
|
fl->flags &= ~FL_STARVING;
|
|
|
|
}
|
|
|
|
FL_UNLOCK(fl);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!TAILQ_EMPTY(&sc->sfl))
|
|
|
|
callout_schedule(&sc->sfl_callout, hz / 5);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
alloc_fl_sdesc(struct sge_fl *fl)
|
|
|
|
{
|
|
|
|
|
2014-08-02 06:55:36 +00:00
|
|
|
fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE,
|
2011-02-18 08:00:26 +00:00
|
|
|
M_ZERO | M_WAITOK);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-08-30 01:45:36 +00:00
|
|
|
free_fl_sdesc(struct adapter *sc, struct sge_fl *fl)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
struct fl_sdesc *sd;
|
2014-03-18 20:14:13 +00:00
|
|
|
struct cluster_metadata *clm;
|
|
|
|
struct cluster_layout *cll;
|
2011-02-18 08:00:26 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
sd = fl->sdesc;
|
2014-08-02 06:55:36 +00:00
|
|
|
for (i = 0; i < fl->sidx * 8; i++, sd++) {
|
2014-03-18 20:14:13 +00:00
|
|
|
if (sd->cl == NULL)
|
|
|
|
continue;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
cll = &sd->cll;
|
|
|
|
clm = cl_metadata(sc, fl, cll, sd->cl);
|
2014-07-23 22:29:22 +00:00
|
|
|
if (sd->nmbuf == 0)
|
|
|
|
uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
|
|
|
|
else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) {
|
2014-03-18 20:14:13 +00:00
|
|
|
uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
|
2014-07-23 22:29:22 +00:00
|
|
|
counter_u64_add(extfree_rels, 1);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
sd->cl = NULL;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
free(fl->sdesc, M_CXGBE);
|
|
|
|
fl->sdesc = NULL;
|
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline void
|
|
|
|
get_pkt_gl(struct mbuf *m, struct sglist *gl)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
int rc;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
sglist_reset(gl);
|
|
|
|
rc = sglist_append_mbuf(gl, m);
|
|
|
|
if (__predict_false(rc != 0)) {
|
|
|
|
panic("%s: mbuf %p (%d segs) was vetted earlier but now fails "
|
|
|
|
"with %d.", __func__, m, mbuf_nsegs(m), rc);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
KASSERT(gl->sg_nseg == mbuf_nsegs(m),
|
|
|
|
("%s: nsegs changed for mbuf %p from %d to %d", __func__, m,
|
|
|
|
mbuf_nsegs(m), gl->sg_nseg));
|
|
|
|
KASSERT(gl->sg_nseg > 0 &&
|
|
|
|
gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS),
|
|
|
|
("%s: %d segments, should have been 1 <= nsegs <= %d", __func__,
|
|
|
|
gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS));
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* len16 for a txpkt WR with a GL. Includes the firmware work request header.
|
|
|
|
*/
|
|
|
|
static inline u_int
|
|
|
|
txpkt_len16(u_int nsegs, u_int tso)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
u_int n;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(nsegs > 0);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
nsegs--; /* first segment is part of ulptx_sgl */
|
|
|
|
n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) +
|
|
|
|
sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
|
|
|
|
if (tso)
|
|
|
|
n += sizeof(struct cpl_tx_pkt_lso_core);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (howmany(n, 16));
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
/*
|
|
|
|
* len16 for a txpkt_vm WR with a GL. Includes the firmware work
|
|
|
|
* request header.
|
|
|
|
*/
|
|
|
|
static inline u_int
|
|
|
|
txpkt_vm_len16(u_int nsegs, u_int tso)
|
|
|
|
{
|
|
|
|
u_int n;
|
|
|
|
|
|
|
|
MPASS(nsegs > 0);
|
|
|
|
|
|
|
|
nsegs--; /* first segment is part of ulptx_sgl */
|
|
|
|
n = sizeof(struct fw_eth_tx_pkt_vm_wr) +
|
|
|
|
sizeof(struct cpl_tx_pkt_core) +
|
|
|
|
sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
|
|
|
|
if (tso)
|
|
|
|
n += sizeof(struct cpl_tx_pkt_lso_core);
|
|
|
|
|
|
|
|
return (howmany(n, 16));
|
|
|
|
}
|
|
|
|
|
2011-02-18 08:00:26 +00:00
|
|
|
/*
|
2014-12-31 23:19:16 +00:00
|
|
|
* len16 for a txpkts type 0 WR with a GL. Does not include the firmware work
|
|
|
|
* request header.
|
2011-02-18 08:00:26 +00:00
|
|
|
*/
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline u_int
|
|
|
|
txpkts0_len16(u_int nsegs)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
u_int n;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(nsegs > 0);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
nsegs--; /* first segment is part of ulptx_sgl */
|
|
|
|
n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) +
|
|
|
|
sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) +
|
|
|
|
8 * ((3 * nsegs) / 2 + (nsegs & 1));
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (howmany(n, 16));
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-12-31 23:19:16 +00:00
|
|
|
* len16 for a txpkts type 1 WR with a GL. Does not include the firmware work
|
|
|
|
* request header.
|
2011-02-18 08:00:26 +00:00
|
|
|
*/
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline u_int
|
|
|
|
txpkts1_len16(void)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
u_int n;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl);
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (howmany(n, 16));
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline u_int
|
|
|
|
imm_payload(u_int ndesc)
|
|
|
|
{
|
|
|
|
u_int n;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) -
|
|
|
|
sizeof(struct cpl_tx_pkt_core);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (n);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
/*
|
|
|
|
* Write a VM txpkt WR for this packet to the hardware descriptors, update the
|
|
|
|
* software descriptor, and advance the pidx. It is guaranteed that enough
|
|
|
|
* descriptors are available.
|
|
|
|
*
|
|
|
|
* The return value is the # of hardware descriptors used.
|
|
|
|
*/
|
|
|
|
static u_int
|
2016-09-11 16:11:51 +00:00
|
|
|
write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq,
|
|
|
|
struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available)
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
{
|
|
|
|
struct sge_eq *eq = &txq->eq;
|
|
|
|
struct tx_sdesc *txsd;
|
|
|
|
struct cpl_tx_pkt_core *cpl;
|
|
|
|
uint32_t ctrl; /* used in many unrelated places */
|
|
|
|
uint64_t ctrl1;
|
|
|
|
int csum_type, len16, ndesc, pktlen, nsegs;
|
|
|
|
caddr_t dst;
|
|
|
|
|
|
|
|
TXQ_LOCK_ASSERT_OWNED(txq);
|
|
|
|
M_ASSERTPKTHDR(m0);
|
|
|
|
MPASS(available > 0 && available < eq->sidx);
|
|
|
|
|
|
|
|
len16 = mbuf_len16(m0);
|
|
|
|
nsegs = mbuf_nsegs(m0);
|
|
|
|
pktlen = m0->m_pkthdr.len;
|
|
|
|
ctrl = sizeof(struct cpl_tx_pkt_core);
|
|
|
|
if (needs_tso(m0))
|
|
|
|
ctrl += sizeof(struct cpl_tx_pkt_lso_core);
|
|
|
|
ndesc = howmany(len16, EQ_ESIZE / 16);
|
|
|
|
MPASS(ndesc <= available);
|
|
|
|
|
|
|
|
/* Firmware work request header */
|
|
|
|
MPASS(wr == (void *)&eq->desc[eq->pidx]);
|
|
|
|
wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
|
|
|
|
V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
|
|
|
|
|
|
|
|
ctrl = V_FW_WR_LEN16(len16);
|
|
|
|
wr->equiq_to_len16 = htobe32(ctrl);
|
|
|
|
wr->r3[0] = 0;
|
|
|
|
wr->r3[1] = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy over ethmacdst, ethmacsrc, ethtype, and vlantci.
|
|
|
|
* vlantci is ignored unless the ethtype is 0x8100, so it's
|
|
|
|
* simpler to always copy it rather than making it
|
|
|
|
* conditional. Also, it seems that we do not have to set
|
|
|
|
* vlantci or fake the ethtype when doing VLAN tag insertion.
|
|
|
|
*/
|
|
|
|
m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst);
|
|
|
|
|
|
|
|
csum_type = -1;
|
|
|
|
if (needs_tso(m0)) {
|
|
|
|
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
|
|
|
|
|
|
|
|
KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
|
|
|
|
m0->m_pkthdr.l4hlen > 0,
|
|
|
|
("%s: mbuf %p needs TSO but missing header lengths",
|
|
|
|
__func__, m0));
|
|
|
|
|
|
|
|
ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
|
|
|
|
F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2)
|
|
|
|
| V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
|
|
|
|
if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header))
|
|
|
|
ctrl |= V_LSO_ETHHDR_LEN(1);
|
|
|
|
if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
|
|
|
|
ctrl |= F_LSO_IPV6;
|
|
|
|
|
|
|
|
lso->lso_ctrl = htobe32(ctrl);
|
|
|
|
lso->ipid_ofst = htobe16(0);
|
|
|
|
lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
|
|
|
|
lso->seqno_offset = htobe32(0);
|
|
|
|
lso->len = htobe32(pktlen);
|
|
|
|
|
|
|
|
if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
|
|
|
|
csum_type = TX_CSUM_TCPIP6;
|
|
|
|
else
|
|
|
|
csum_type = TX_CSUM_TCPIP;
|
|
|
|
|
|
|
|
cpl = (void *)(lso + 1);
|
|
|
|
|
|
|
|
txq->tso_wrs++;
|
|
|
|
} else {
|
|
|
|
if (m0->m_pkthdr.csum_flags & CSUM_IP_TCP)
|
|
|
|
csum_type = TX_CSUM_TCPIP;
|
|
|
|
else if (m0->m_pkthdr.csum_flags & CSUM_IP_UDP)
|
|
|
|
csum_type = TX_CSUM_UDPIP;
|
|
|
|
else if (m0->m_pkthdr.csum_flags & CSUM_IP6_TCP)
|
|
|
|
csum_type = TX_CSUM_TCPIP6;
|
|
|
|
else if (m0->m_pkthdr.csum_flags & CSUM_IP6_UDP)
|
|
|
|
csum_type = TX_CSUM_UDPIP6;
|
|
|
|
#if defined(INET)
|
|
|
|
else if (m0->m_pkthdr.csum_flags & CSUM_IP) {
|
|
|
|
/*
|
|
|
|
* XXX: The firmware appears to stomp on the
|
|
|
|
* fragment/flags field of the IP header when
|
|
|
|
* using TX_CSUM_IP. Fall back to doing
|
|
|
|
* software checksums.
|
|
|
|
*/
|
|
|
|
u_short *sump;
|
|
|
|
struct mbuf *m;
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
m = m0;
|
|
|
|
offset = 0;
|
|
|
|
sump = m_advance(&m, &offset, m0->m_pkthdr.l2hlen +
|
|
|
|
offsetof(struct ip, ip_sum));
|
|
|
|
*sump = in_cksum_skip(m0, m0->m_pkthdr.l2hlen +
|
|
|
|
m0->m_pkthdr.l3hlen, m0->m_pkthdr.l2hlen);
|
|
|
|
m0->m_pkthdr.csum_flags &= ~CSUM_IP;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cpl = (void *)(wr + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Checksum offload */
|
|
|
|
ctrl1 = 0;
|
|
|
|
if (needs_l3_csum(m0) == 0)
|
|
|
|
ctrl1 |= F_TXPKT_IPCSUM_DIS;
|
|
|
|
if (csum_type >= 0) {
|
|
|
|
KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0,
|
|
|
|
("%s: mbuf %p needs checksum offload but missing header lengths",
|
|
|
|
__func__, m0));
|
|
|
|
|
2016-09-11 16:11:51 +00:00
|
|
|
if (chip_id(sc) <= CHELSIO_T5) {
|
|
|
|
ctrl1 |= V_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen -
|
|
|
|
ETHER_HDR_LEN);
|
|
|
|
} else {
|
|
|
|
ctrl1 |= V_T6_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen -
|
|
|
|
ETHER_HDR_LEN);
|
|
|
|
}
|
Chelsio T4/T5 VF driver.
The cxgbev/cxlv driver supports Virtual Function devices for Chelsio
T4 and T4 adapters. The VF devices share most of their code with the
existing PF4 driver (cxgbe/cxl) and as such the VF device driver
currently depends on the PF4 driver.
Similar to the cxgbe/cxl drivers, the VF driver includes a t4vf/t5vf
PCI device driver that attaches to the VF device. It then creates
child cxgbev/cxlv devices representing ports assigned to the VF.
By default, the PF driver assigns a single port to each VF.
t4vf_hw.c contains VF-specific routines from the shared code used to
fetch VF-specific parameters from the firmware.
t4_vf.c contains the VF-specific PCI device driver and includes its
own attach routine.
VF devices are required to use a different firmware request when
transmitting packets (which in turn requires a different CPL message
to encapsulate messages). This alternate firmware request does not
permit chaining multiple packets in a single message, so each packet
results in a firmware request. In addition, the different CPL message
requires more detailed information when enabling hardware checksums,
so parse_pkt() on VF devices must examine L2 and L3 headers for all
packets (not just TSO packets) for VF devices. Finally, L2 checksums
on non-UDP/non-TCP packets do not work reliably (the firmware trashes
the IPv4 fragment field), so IPv4 checksums for such packets are
calculated in software.
Most of the other changes in the non-VF-specific code are to expose
various variables and functions private to the PF driver so that they
can be used by the VF driver.
Note that a limited subset of cxgbetool functions are supported on VF
devices including register dumps, scheduler classes, and clearing of
statistics. In addition, TOE is not supported on VF devices, only for
the PF interfaces.
Reviewed by: np
MFC after: 2 months
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D7599
2016-09-07 18:13:57 +00:00
|
|
|
ctrl1 |= V_TXPKT_IPHDR_LEN(m0->m_pkthdr.l3hlen);
|
|
|
|
ctrl1 |= V_TXPKT_CSUM_TYPE(csum_type);
|
|
|
|
} else
|
|
|
|
ctrl1 |= F_TXPKT_L4CSUM_DIS;
|
|
|
|
if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
|
|
|
|
CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
|
|
|
|
txq->txcsum++; /* some hardware assistance provided */
|
|
|
|
|
|
|
|
/* VLAN tag insertion */
|
|
|
|
if (needs_vlan_insertion(m0)) {
|
|
|
|
ctrl1 |= F_TXPKT_VLAN_VLD |
|
|
|
|
V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
|
|
|
|
txq->vlan_insertion++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CPL header */
|
|
|
|
cpl->ctrl0 = txq->cpl_ctrl0;
|
|
|
|
cpl->pack = 0;
|
|
|
|
cpl->len = htobe16(pktlen);
|
|
|
|
cpl->ctrl1 = htobe64(ctrl1);
|
|
|
|
|
|
|
|
/* SGL */
|
|
|
|
dst = (void *)(cpl + 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A packet using TSO will use up an entire descriptor for the
|
|
|
|
* firmware work request header, LSO CPL, and TX_PKT_XT CPL.
|
|
|
|
* If this descriptor is the last descriptor in the ring, wrap
|
|
|
|
* around to the front of the ring explicitly for the start of
|
|
|
|
* the sgl.
|
|
|
|
*/
|
|
|
|
if (dst == (void *)&eq->desc[eq->sidx]) {
|
|
|
|
dst = (void *)&eq->desc[0];
|
|
|
|
write_gl_to_txd(txq, m0, &dst, 0);
|
|
|
|
} else
|
|
|
|
write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
|
|
|
|
txq->sgl_wrs++;
|
|
|
|
|
|
|
|
txq->txpkt_wrs++;
|
|
|
|
|
|
|
|
txsd = &txq->sdesc[eq->pidx];
|
|
|
|
txsd->m = m0;
|
|
|
|
txsd->desc_used = ndesc;
|
|
|
|
|
|
|
|
return (ndesc);
|
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* Write a txpkt WR for this packet to the hardware descriptors, update the
|
|
|
|
* software descriptor, and advance the pidx. It is guaranteed that enough
|
|
|
|
* descriptors are available.
|
|
|
|
*
|
|
|
|
* The return value is the # of hardware descriptors used.
|
|
|
|
*/
|
|
|
|
static u_int
|
|
|
|
write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr,
|
|
|
|
struct mbuf *m0, u_int available)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
struct sge_eq *eq = &txq->eq;
|
2014-12-31 23:19:16 +00:00
|
|
|
struct tx_sdesc *txsd;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct cpl_tx_pkt_core *cpl;
|
|
|
|
uint32_t ctrl; /* used in many unrelated places */
|
|
|
|
uint64_t ctrl1;
|
2014-12-31 23:19:16 +00:00
|
|
|
int len16, ndesc, pktlen, nsegs;
|
2011-02-18 08:00:26 +00:00
|
|
|
caddr_t dst;
|
|
|
|
|
|
|
|
TXQ_LOCK_ASSERT_OWNED(txq);
|
2014-12-31 23:19:16 +00:00
|
|
|
M_ASSERTPKTHDR(m0);
|
|
|
|
MPASS(available > 0 && available < eq->sidx);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
len16 = mbuf_len16(m0);
|
|
|
|
nsegs = mbuf_nsegs(m0);
|
|
|
|
pktlen = m0->m_pkthdr.len;
|
2011-02-18 08:00:26 +00:00
|
|
|
ctrl = sizeof(struct cpl_tx_pkt_core);
|
2014-12-31 23:19:16 +00:00
|
|
|
if (needs_tso(m0))
|
2012-06-22 07:51:15 +00:00
|
|
|
ctrl += sizeof(struct cpl_tx_pkt_lso_core);
|
2014-12-31 23:19:16 +00:00
|
|
|
else if (pktlen <= imm_payload(2) && available >= 2) {
|
|
|
|
/* Immediate data. Recalculate len16 and set nsegs to 0. */
|
2011-03-05 03:06:38 +00:00
|
|
|
ctrl += pktlen;
|
2014-12-31 23:19:16 +00:00
|
|
|
len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) +
|
|
|
|
sizeof(struct cpl_tx_pkt_core) + pktlen, 16);
|
|
|
|
nsegs = 0;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
ndesc = howmany(len16, EQ_ESIZE / 16);
|
|
|
|
MPASS(ndesc <= available);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
/* Firmware work request header */
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(wr == (void *)&eq->desc[eq->pidx]);
|
2011-02-18 08:00:26 +00:00
|
|
|
wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
|
2011-12-16 02:09:51 +00:00
|
|
|
V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
|
2011-04-14 20:06:23 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
ctrl = V_FW_WR_LEN16(len16);
|
2011-02-18 08:00:26 +00:00
|
|
|
wr->equiq_to_len16 = htobe32(ctrl);
|
|
|
|
wr->r3 = 0;
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (needs_tso(m0)) {
|
2012-06-22 07:51:15 +00:00
|
|
|
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
|
|
|
|
m0->m_pkthdr.l4hlen > 0,
|
|
|
|
("%s: mbuf %p needs TSO but missing header lengths",
|
|
|
|
__func__, m0));
|
2012-06-29 19:51:06 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
|
|
|
|
F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2)
|
|
|
|
| V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
|
|
|
|
if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header))
|
2011-02-18 08:00:26 +00:00
|
|
|
ctrl |= V_LSO_ETHHDR_LEN(1);
|
2014-12-31 23:19:16 +00:00
|
|
|
if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
|
2012-06-29 19:51:06 +00:00
|
|
|
ctrl |= F_LSO_IPV6;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
lso->lso_ctrl = htobe32(ctrl);
|
|
|
|
lso->ipid_ofst = htobe16(0);
|
2014-12-31 23:19:16 +00:00
|
|
|
lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
|
2011-02-18 08:00:26 +00:00
|
|
|
lso->seqno_offset = htobe32(0);
|
2011-03-05 03:06:38 +00:00
|
|
|
lso->len = htobe32(pktlen);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
cpl = (void *)(lso + 1);
|
|
|
|
|
|
|
|
txq->tso_wrs++;
|
|
|
|
} else
|
|
|
|
cpl = (void *)(wr + 1);
|
|
|
|
|
|
|
|
/* Checksum offload */
|
|
|
|
ctrl1 = 0;
|
2014-12-31 23:19:16 +00:00
|
|
|
if (needs_l3_csum(m0) == 0)
|
2011-02-18 08:00:26 +00:00
|
|
|
ctrl1 |= F_TXPKT_IPCSUM_DIS;
|
2014-12-31 23:19:16 +00:00
|
|
|
if (needs_l4_csum(m0) == 0)
|
2011-02-18 08:00:26 +00:00
|
|
|
ctrl1 |= F_TXPKT_L4CSUM_DIS;
|
2014-12-31 23:19:16 +00:00
|
|
|
if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
|
2013-02-20 23:15:40 +00:00
|
|
|
CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
|
2011-02-18 08:00:26 +00:00
|
|
|
txq->txcsum++; /* some hardware assistance provided */
|
|
|
|
|
|
|
|
/* VLAN tag insertion */
|
2014-12-31 23:19:16 +00:00
|
|
|
if (needs_vlan_insertion(m0)) {
|
|
|
|
ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
|
2011-02-18 08:00:26 +00:00
|
|
|
txq->vlan_insertion++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CPL header */
|
2014-12-31 23:19:16 +00:00
|
|
|
cpl->ctrl0 = txq->cpl_ctrl0;
|
2011-02-18 08:00:26 +00:00
|
|
|
cpl->pack = 0;
|
2011-03-05 03:06:38 +00:00
|
|
|
cpl->len = htobe16(pktlen);
|
2011-02-18 08:00:26 +00:00
|
|
|
cpl->ctrl1 = htobe64(ctrl1);
|
|
|
|
|
|
|
|
/* SGL */
|
|
|
|
dst = (void *)(cpl + 1);
|
2014-12-31 23:19:16 +00:00
|
|
|
if (nsegs > 0) {
|
|
|
|
|
|
|
|
write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
|
2011-02-18 08:00:26 +00:00
|
|
|
txq->sgl_wrs++;
|
|
|
|
} else {
|
2014-12-31 23:19:16 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
for (m = m0; m != NULL; m = m->m_next) {
|
2011-02-18 08:00:26 +00:00
|
|
|
copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
|
2011-03-05 03:06:38 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
pktlen -= m->m_len;
|
|
|
|
#endif
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2011-03-05 03:06:38 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
|
|
|
|
#endif
|
2014-12-31 23:19:16 +00:00
|
|
|
txq->imm_wrs++;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
txq->txpkt_wrs++;
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
txsd = &txq->sdesc[eq->pidx];
|
|
|
|
txsd->m = m0;
|
|
|
|
txsd->desc_used = ndesc;
|
|
|
|
|
|
|
|
return (ndesc);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2014-12-31 23:19:16 +00:00
|
|
|
try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
u_int needed, nsegs1, nsegs2, l1, l2;
|
2011-12-16 02:09:51 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (cannot_use_txpkts(m) || cannot_use_txpkts(n))
|
|
|
|
return (1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
nsegs1 = mbuf_nsegs(m);
|
|
|
|
nsegs2 = mbuf_nsegs(n);
|
|
|
|
if (nsegs1 + nsegs2 == 2) {
|
|
|
|
txp->wr_type = 1;
|
|
|
|
l1 = l2 = txpkts1_len16();
|
|
|
|
} else {
|
|
|
|
txp->wr_type = 0;
|
|
|
|
l1 = txpkts0_len16(nsegs1);
|
|
|
|
l2 = txpkts0_len16(nsegs2);
|
|
|
|
}
|
|
|
|
txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2;
|
|
|
|
needed = howmany(txp->len16, EQ_ESIZE / 16);
|
|
|
|
if (needed > SGE_MAX_WR_NDESC || needed > available)
|
|
|
|
return (1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
txp->plen = m->m_pkthdr.len + n->m_pkthdr.len;
|
|
|
|
if (txp->plen > 65535)
|
|
|
|
return (1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
txp->npkt = 2;
|
|
|
|
set_mbuf_len16(m, l1);
|
|
|
|
set_mbuf_len16(n, l2);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static int
|
|
|
|
add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available)
|
|
|
|
{
|
|
|
|
u_int plen, len16, needed, nsegs;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(txp->wr_type == 0 || txp->wr_type == 1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
nsegs = mbuf_nsegs(m);
|
|
|
|
if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1))
|
|
|
|
return (1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
plen = txp->plen + m->m_pkthdr.len;
|
|
|
|
if (plen > 65535)
|
|
|
|
return (1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (txp->wr_type == 0)
|
|
|
|
len16 = txpkts0_len16(nsegs);
|
|
|
|
else
|
|
|
|
len16 = txpkts1_len16();
|
|
|
|
needed = howmany(txp->len16 + len16, EQ_ESIZE / 16);
|
|
|
|
if (needed > SGE_MAX_WR_NDESC || needed > available)
|
|
|
|
return (1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
txp->npkt++;
|
|
|
|
txp->plen = plen;
|
|
|
|
txp->len16 += len16;
|
|
|
|
set_mbuf_len16(m, len16);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-12-31 23:19:16 +00:00
|
|
|
* Write a txpkts WR for the packets in txp to the hardware descriptors, update
|
|
|
|
* the software descriptor, and advance the pidx. It is guaranteed that enough
|
|
|
|
* descriptors are available.
|
|
|
|
*
|
|
|
|
* The return value is the # of hardware descriptors used.
|
2011-02-18 08:00:26 +00:00
|
|
|
*/
|
2014-12-31 23:19:16 +00:00
|
|
|
static u_int
|
|
|
|
write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr,
|
|
|
|
struct mbuf *m0, const struct txpkts *txp, u_int available)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
struct sge_eq *eq = &txq->eq;
|
|
|
|
struct tx_sdesc *txsd;
|
2014-12-31 23:19:16 +00:00
|
|
|
struct cpl_tx_pkt_core *cpl;
|
2011-02-18 08:00:26 +00:00
|
|
|
uint32_t ctrl;
|
2014-12-31 23:19:16 +00:00
|
|
|
uint64_t ctrl1;
|
|
|
|
int ndesc, checkwrap;
|
|
|
|
struct mbuf *m;
|
|
|
|
void *flitp;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
TXQ_LOCK_ASSERT_OWNED(txq);
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(txp->npkt > 0);
|
|
|
|
MPASS(txp->plen < 65536);
|
|
|
|
MPASS(m0 != NULL);
|
|
|
|
MPASS(m0->m_nextpkt != NULL);
|
|
|
|
MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
|
|
|
|
MPASS(available > 0 && available < eq->sidx);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
ndesc = howmany(txp->len16, EQ_ESIZE / 16);
|
|
|
|
MPASS(ndesc <= available);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(wr == (void *)&eq->desc[eq->pidx]);
|
2011-12-16 02:09:51 +00:00
|
|
|
wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
|
2014-12-31 23:19:16 +00:00
|
|
|
ctrl = V_FW_WR_LEN16(txp->len16);
|
2011-02-18 08:00:26 +00:00
|
|
|
wr->equiq_to_len16 = htobe32(ctrl);
|
2014-12-31 23:19:16 +00:00
|
|
|
wr->plen = htobe16(txp->plen);
|
|
|
|
wr->npkt = txp->npkt;
|
|
|
|
wr->r3 = 0;
|
|
|
|
wr->type = txp->wr_type;
|
|
|
|
flitp = wr + 1;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/*
|
|
|
|
* At this point we are 16B into a hardware descriptor. If checkwrap is
|
|
|
|
* set then we know the WR is going to wrap around somewhere. We'll
|
|
|
|
* check for that at appropriate points.
|
|
|
|
*/
|
|
|
|
checkwrap = eq->sidx - ndesc < eq->pidx;
|
|
|
|
for (m = m0; m != NULL; m = m->m_nextpkt) {
|
|
|
|
if (txp->wr_type == 0) {
|
|
|
|
struct ulp_txpkt *ulpmc;
|
|
|
|
struct ulptx_idata *ulpsc;
|
|
|
|
|
|
|
|
/* ULP master command */
|
|
|
|
ulpmc = flitp;
|
|
|
|
ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
|
|
|
|
V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid));
|
|
|
|
ulpmc->len = htobe32(mbuf_len16(m));
|
|
|
|
|
|
|
|
/* ULP subcommand */
|
|
|
|
ulpsc = (void *)(ulpmc + 1);
|
|
|
|
ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
|
|
|
|
F_ULP_TX_SC_MORE);
|
|
|
|
ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
|
|
|
|
|
|
|
|
cpl = (void *)(ulpsc + 1);
|
|
|
|
if (checkwrap &&
|
|
|
|
(uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx])
|
|
|
|
cpl = (void *)&eq->desc[0];
|
|
|
|
} else {
|
|
|
|
cpl = flitp;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/* Checksum offload */
|
|
|
|
ctrl1 = 0;
|
|
|
|
if (needs_l3_csum(m) == 0)
|
|
|
|
ctrl1 |= F_TXPKT_IPCSUM_DIS;
|
|
|
|
if (needs_l4_csum(m) == 0)
|
|
|
|
ctrl1 |= F_TXPKT_L4CSUM_DIS;
|
|
|
|
if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
|
|
|
|
CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
|
|
|
|
txq->txcsum++; /* some hardware assistance provided */
|
|
|
|
|
|
|
|
/* VLAN tag insertion */
|
|
|
|
if (needs_vlan_insertion(m)) {
|
|
|
|
ctrl1 |= F_TXPKT_VLAN_VLD |
|
|
|
|
V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
|
|
|
|
txq->vlan_insertion++;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
/* CPL header */
|
|
|
|
cpl->ctrl0 = txq->cpl_ctrl0;
|
|
|
|
cpl->pack = 0;
|
|
|
|
cpl->len = htobe16(m->m_pkthdr.len);
|
|
|
|
cpl->ctrl1 = htobe64(ctrl1);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
flitp = cpl + 1;
|
|
|
|
if (checkwrap &&
|
|
|
|
(uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
|
|
|
|
flitp = (void *)&eq->desc[0];
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-06-02 17:57:27 +00:00
|
|
|
if (txp->wr_type == 0) {
|
|
|
|
txq->txpkts0_pkts += txp->npkt;
|
|
|
|
txq->txpkts0_wrs++;
|
|
|
|
} else {
|
|
|
|
txq->txpkts1_pkts += txp->npkt;
|
|
|
|
txq->txpkts1_wrs++;
|
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
txsd = &txq->sdesc[eq->pidx];
|
|
|
|
txsd->m = m0;
|
|
|
|
txsd->desc_used = ndesc;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
return (ndesc);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the SGL ends on an address that is not 16 byte aligned, this function will
|
2014-12-31 23:19:16 +00:00
|
|
|
* add a 0 filled flit at the end.
|
2011-02-18 08:00:26 +00:00
|
|
|
*/
|
2014-12-31 23:19:16 +00:00
|
|
|
static void
|
|
|
|
write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
struct sge_eq *eq = &txq->eq;
|
|
|
|
struct sglist *gl = txq->gl;
|
|
|
|
struct sglist_seg *seg;
|
|
|
|
__be64 *flitp, *wrap;
|
2011-02-18 08:00:26 +00:00
|
|
|
struct ulptx_sgl *usgl;
|
2014-12-31 23:19:16 +00:00
|
|
|
int i, nflits, nsegs;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
KASSERT(((uintptr_t)(*to) & 0xf) == 0,
|
|
|
|
("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
|
|
|
|
MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
get_pkt_gl(m, gl);
|
|
|
|
nsegs = gl->sg_nseg;
|
|
|
|
MPASS(nsegs > 0);
|
|
|
|
|
|
|
|
nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
|
2011-02-18 08:00:26 +00:00
|
|
|
flitp = (__be64 *)(*to);
|
2014-12-31 23:19:16 +00:00
|
|
|
wrap = (__be64 *)(&eq->desc[eq->sidx]);
|
|
|
|
seg = &gl->sg_segs[0];
|
2011-02-18 08:00:26 +00:00
|
|
|
usgl = (void *)flitp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We start at a 16 byte boundary somewhere inside the tx descriptor
|
|
|
|
* ring, so we're at least 16 bytes away from the status page. There is
|
|
|
|
* no chance of a wrap around in the middle of usgl (which is 16 bytes).
|
|
|
|
*/
|
|
|
|
|
|
|
|
usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
|
2014-12-31 23:19:16 +00:00
|
|
|
V_ULPTX_NSGE(nsegs));
|
|
|
|
usgl->len0 = htobe32(seg->ss_len);
|
|
|
|
usgl->addr0 = htobe64(seg->ss_paddr);
|
2011-02-18 08:00:26 +00:00
|
|
|
seg++;
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) {
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
/* Won't wrap around at all */
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
for (i = 0; i < nsegs - 1; i++, seg++) {
|
|
|
|
usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
|
|
|
|
usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
if (i & 1)
|
|
|
|
usgl->sge[i / 2].len[1] = htobe32(0);
|
2014-12-31 23:19:16 +00:00
|
|
|
flitp += nflits;
|
2011-02-18 08:00:26 +00:00
|
|
|
} else {
|
|
|
|
|
|
|
|
/* Will wrap somewhere in the rest of the SGL */
|
|
|
|
|
|
|
|
/* 2 flits already written, write the rest flit by flit */
|
|
|
|
flitp = (void *)(usgl + 1);
|
2014-12-31 23:19:16 +00:00
|
|
|
for (i = 0; i < nflits - 2; i++) {
|
|
|
|
if (flitp == wrap)
|
2011-02-18 08:00:26 +00:00
|
|
|
flitp = (void *)eq->desc;
|
2014-12-31 23:19:16 +00:00
|
|
|
*flitp++ = get_flit(seg, nsegs - 1, i);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (nflits & 1) {
|
|
|
|
MPASS(((uintptr_t)flitp) & 0xf);
|
|
|
|
*flitp++ = 0;
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS((((uintptr_t)flitp) & 0xf) == 0);
|
|
|
|
if (__predict_false(flitp == wrap))
|
2011-02-18 08:00:26 +00:00
|
|
|
*to = (void *)eq->desc;
|
|
|
|
else
|
2014-12-31 23:19:16 +00:00
|
|
|
*to = (void *)flitp;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
|
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
|
|
|
|
MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
|
|
|
|
|
|
|
|
if (__predict_true((uintptr_t)(*to) + len <=
|
|
|
|
(uintptr_t)&eq->desc[eq->sidx])) {
|
2011-02-18 08:00:26 +00:00
|
|
|
bcopy(from, *to, len);
|
|
|
|
(*to) += len;
|
|
|
|
} else {
|
2014-12-31 23:19:16 +00:00
|
|
|
int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
|
|
|
bcopy(from, *to, portion);
|
|
|
|
from += portion;
|
|
|
|
portion = len - portion; /* remaining */
|
|
|
|
bcopy(from, (void *)eq->desc, portion);
|
|
|
|
(*to) = (caddr_t)eq->desc + portion;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2014-12-31 23:19:16 +00:00
|
|
|
ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
u_int db;
|
|
|
|
|
|
|
|
MPASS(n > 0);
|
2013-03-30 02:26:20 +00:00
|
|
|
|
|
|
|
db = eq->doorbells;
|
2014-12-31 23:19:16 +00:00
|
|
|
if (n > 1)
|
2013-04-11 22:49:29 +00:00
|
|
|
clrbit(&db, DOORBELL_WCWR);
|
2013-03-30 02:26:20 +00:00
|
|
|
wmb();
|
|
|
|
|
|
|
|
switch (ffs(db) - 1) {
|
|
|
|
case DOORBELL_UDB:
|
2014-12-31 23:19:16 +00:00
|
|
|
*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
|
|
|
|
break;
|
2013-03-30 02:26:20 +00:00
|
|
|
|
2013-04-11 22:49:29 +00:00
|
|
|
case DOORBELL_WCWR: {
|
2013-03-30 02:26:20 +00:00
|
|
|
volatile uint64_t *dst, *src;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Queues whose 128B doorbell segment fits in the page do not
|
|
|
|
* use relative qid (udb_qid is always 0). Only queues with
|
2013-04-11 22:49:29 +00:00
|
|
|
* doorbell segments can do WCWR.
|
2013-03-30 02:26:20 +00:00
|
|
|
*/
|
2014-12-31 23:19:16 +00:00
|
|
|
KASSERT(eq->udb_qid == 0 && n == 1,
|
2013-03-30 02:26:20 +00:00
|
|
|
("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
|
2014-12-31 23:19:16 +00:00
|
|
|
__func__, eq->doorbells, n, eq->dbidx, eq));
|
2013-03-30 02:26:20 +00:00
|
|
|
|
|
|
|
dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
|
|
|
|
UDBS_DB_OFFSET);
|
2014-12-31 23:19:16 +00:00
|
|
|
i = eq->dbidx;
|
2013-03-30 02:26:20 +00:00
|
|
|
src = (void *)&eq->desc[i];
|
|
|
|
while (src != (void *)&eq->desc[i + 1])
|
|
|
|
*dst++ = *src++;
|
|
|
|
wmb();
|
2014-12-31 23:19:16 +00:00
|
|
|
break;
|
2013-03-30 02:26:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case DOORBELL_UDBWC:
|
2014-12-31 23:19:16 +00:00
|
|
|
*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
|
2013-03-30 02:26:20 +00:00
|
|
|
wmb();
|
2014-12-31 23:19:16 +00:00
|
|
|
break;
|
2013-03-30 02:26:20 +00:00
|
|
|
|
|
|
|
case DOORBELL_KDB:
|
2016-08-01 22:39:51 +00:00
|
|
|
t4_write_reg(sc, sc->sge_kdoorbell_reg,
|
2014-12-31 23:19:16 +00:00
|
|
|
V_QID(eq->cntxt_id) | V_PIDX(n));
|
|
|
|
break;
|
2013-03-30 02:26:20 +00:00
|
|
|
}
|
2014-12-31 23:19:16 +00:00
|
|
|
|
|
|
|
IDXINCR(eq->dbidx, n, eq->sidx);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static inline u_int
|
|
|
|
reclaimable_tx_desc(struct sge_eq *eq)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
uint16_t hw_cidx;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
hw_cidx = read_hw_cidx(eq);
|
|
|
|
return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u_int
|
|
|
|
total_available_tx_desc(struct sge_eq *eq)
|
|
|
|
{
|
|
|
|
uint16_t hw_cidx, pidx;
|
|
|
|
|
|
|
|
hw_cidx = read_hw_cidx(eq);
|
|
|
|
pidx = eq->pidx;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
if (pidx == hw_cidx)
|
|
|
|
return (eq->sidx - 1);
|
2011-02-18 08:00:26 +00:00
|
|
|
else
|
2014-12-31 23:19:16 +00:00
|
|
|
return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
read_hw_cidx(struct sge_eq *eq)
|
|
|
|
{
|
|
|
|
struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
|
|
|
|
uint16_t cidx = spg->cidx; /* stable snapshot */
|
|
|
|
|
|
|
|
return (be16toh(cidx));
|
2011-03-05 03:54:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-12-31 23:19:16 +00:00
|
|
|
* Reclaim 'n' descriptors approximately.
|
2011-03-05 03:54:37 +00:00
|
|
|
*/
|
2014-12-31 23:19:16 +00:00
|
|
|
static u_int
|
|
|
|
reclaim_tx_descs(struct sge_txq *txq, u_int n)
|
2011-03-05 03:54:37 +00:00
|
|
|
{
|
|
|
|
struct tx_sdesc *txsd;
|
2011-04-19 22:08:28 +00:00
|
|
|
struct sge_eq *eq = &txq->eq;
|
2014-12-31 23:19:16 +00:00
|
|
|
u_int can_reclaim, reclaimed;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
TXQ_LOCK_ASSERT_OWNED(txq);
|
2014-12-31 23:19:16 +00:00
|
|
|
MPASS(n > 0);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
reclaimed = 0;
|
|
|
|
can_reclaim = reclaimable_tx_desc(eq);
|
|
|
|
while (can_reclaim && reclaimed < n) {
|
2011-02-18 08:00:26 +00:00
|
|
|
int ndesc;
|
2014-12-31 23:19:16 +00:00
|
|
|
struct mbuf *m, *nextpkt;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2011-04-19 22:08:28 +00:00
|
|
|
txsd = &txq->sdesc[eq->cidx];
|
2011-02-18 08:00:26 +00:00
|
|
|
ndesc = txsd->desc_used;
|
|
|
|
|
|
|
|
/* Firmware doesn't return "partial" credits. */
|
|
|
|
KASSERT(can_reclaim >= ndesc,
|
|
|
|
("%s: unexpected number of credits: %d, %d",
|
|
|
|
__func__, can_reclaim, ndesc));
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
for (m = txsd->m; m != NULL; m = nextpkt) {
|
|
|
|
nextpkt = m->m_nextpkt;
|
|
|
|
m->m_nextpkt = NULL;
|
|
|
|
m_freem(m);
|
|
|
|
}
|
2011-02-18 08:00:26 +00:00
|
|
|
reclaimed += ndesc;
|
2011-03-05 03:54:37 +00:00
|
|
|
can_reclaim -= ndesc;
|
2014-12-31 23:19:16 +00:00
|
|
|
IDXINCR(eq->cidx, ndesc, eq->sidx);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (reclaimed);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2014-12-31 23:19:16 +00:00
|
|
|
tx_reclaim(void *arg, int n)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-12-31 23:19:16 +00:00
|
|
|
struct sge_txq *txq = arg;
|
|
|
|
struct sge_eq *eq = &txq->eq;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
do {
|
|
|
|
if (TXQ_TRYLOCK(txq) == 0)
|
|
|
|
break;
|
|
|
|
n = reclaim_tx_descs(txq, 32);
|
|
|
|
if (eq->cidx == eq->pidx)
|
|
|
|
eq->equeqidx = eq->pidx;
|
|
|
|
TXQ_UNLOCK(txq);
|
|
|
|
} while (n > 0);
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __be64
|
2014-12-31 23:19:16 +00:00
|
|
|
get_flit(struct sglist_seg *segs, int nsegs, int idx)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
|
|
|
int i = (idx / 3) * 2;
|
|
|
|
|
|
|
|
switch (idx % 3) {
|
|
|
|
case 0: {
|
2018-01-11 09:17:02 +00:00
|
|
|
uint64_t rc;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2018-01-11 09:17:02 +00:00
|
|
|
rc = (uint64_t)segs[i].ss_len << 32;
|
2011-02-18 08:00:26 +00:00
|
|
|
if (i + 1 < nsegs)
|
2018-01-11 09:17:02 +00:00
|
|
|
rc |= (uint64_t)(segs[i + 1].ss_len);
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2018-01-11 09:17:02 +00:00
|
|
|
return (htobe64(rc));
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
case 1:
|
2014-12-31 23:19:16 +00:00
|
|
|
return (htobe64(segs[i].ss_paddr));
|
2011-02-18 08:00:26 +00:00
|
|
|
case 2:
|
2014-12-31 23:19:16 +00:00
|
|
|
return (htobe64(segs[i + 1].ss_paddr));
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2014-03-18 20:14:13 +00:00
|
|
|
find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp)
|
2011-02-18 08:00:26 +00:00
|
|
|
{
|
2014-03-18 20:14:13 +00:00
|
|
|
int8_t zidx, hwidx, idx;
|
|
|
|
uint16_t region1, region3;
|
|
|
|
int spare, spare_needed, n;
|
|
|
|
struct sw_zone_info *swz;
|
|
|
|
struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0];
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize
|
|
|
|
* large enough for the max payload and cluster metadata. Otherwise
|
|
|
|
* settle for the largest bufsize that leaves enough room in the cluster
|
|
|
|
* for metadata.
|
|
|
|
*
|
|
|
|
* Without buffer packing: Look for the smallest zone which has a
|
|
|
|
* bufsize large enough for the max payload. Settle for the largest
|
|
|
|
* bufsize available if there's nothing big enough for max payload.
|
|
|
|
*/
|
|
|
|
spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0;
|
|
|
|
swz = &sc->sge.sw_zone_info[0];
|
|
|
|
hwidx = -1;
|
|
|
|
for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) {
|
|
|
|
if (swz->size > largest_rx_cluster) {
|
|
|
|
if (__predict_true(hwidx != -1))
|
|
|
|
break;
|
2011-02-18 08:00:26 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
/*
|
|
|
|
* This is a misconfiguration. largest_rx_cluster is
|
|
|
|
* preventing us from finding a refill source. See
|
|
|
|
* dev.t5nex.<n>.buffer_sizes to figure out why.
|
|
|
|
*/
|
|
|
|
device_printf(sc->dev, "largest_rx_cluster=%u leaves no"
|
|
|
|
" refill source for fl %p (dma %u). Ignored.\n",
|
|
|
|
largest_rx_cluster, fl, maxp);
|
|
|
|
}
|
|
|
|
for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) {
|
|
|
|
hwb = &hwb_list[idx];
|
|
|
|
spare = swz->size - hwb->size;
|
|
|
|
if (spare < spare_needed)
|
|
|
|
continue;
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
hwidx = idx; /* best option so far */
|
|
|
|
if (hwb->size >= maxp) {
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if ((fl->flags & FL_BUF_PACKING) == 0)
|
|
|
|
goto done; /* stop looking (not packing) */
|
2013-08-30 01:45:36 +00:00
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (swz->size >= safest_rx_cluster)
|
|
|
|
goto done; /* stop looking (packing) */
|
|
|
|
}
|
|
|
|
break; /* keep looking, next zone */
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
}
|
|
|
|
done:
|
|
|
|
/* A usable hwidx has been located. */
|
|
|
|
MPASS(hwidx != -1);
|
|
|
|
hwb = &hwb_list[hwidx];
|
|
|
|
zidx = hwb->zidx;
|
|
|
|
swz = &sc->sge.sw_zone_info[zidx];
|
|
|
|
region1 = 0;
|
|
|
|
region3 = swz->size - hwb->size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stay within this zone and see if there is a better match when mbuf
|
|
|
|
* inlining is allowed. Remember that the hwidx's are sorted in
|
|
|
|
* decreasing order of size (so in increasing order of spare area).
|
|
|
|
*/
|
|
|
|
for (idx = hwidx; idx != -1; idx = hwb->next) {
|
|
|
|
hwb = &hwb_list[idx];
|
|
|
|
spare = swz->size - hwb->size;
|
|
|
|
|
|
|
|
if (allow_mbufs_in_cluster == 0 || hwb->size < maxp)
|
|
|
|
break;
|
2014-12-06 00:13:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not inline mbufs if doing so would violate the pad/pack
|
|
|
|
* boundary alignment requirement.
|
|
|
|
*/
|
2016-03-08 00:23:56 +00:00
|
|
|
if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0)
|
2014-12-06 00:13:56 +00:00
|
|
|
continue;
|
|
|
|
if (fl->flags & FL_BUF_PACKING &&
|
2016-03-08 00:23:56 +00:00
|
|
|
(MSIZE % sc->params.sge.pack_boundary) != 0)
|
2014-12-06 00:13:56 +00:00
|
|
|
continue;
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
if (spare < CL_METADATA_SIZE + MSIZE)
|
|
|
|
continue;
|
|
|
|
n = (spare - CL_METADATA_SIZE) / MSIZE;
|
|
|
|
if (n > howmany(hwb->size, maxp))
|
|
|
|
break;
|
|
|
|
|
|
|
|
hwidx = idx;
|
|
|
|
if (fl->flags & FL_BUF_PACKING) {
|
|
|
|
region1 = n * MSIZE;
|
|
|
|
region3 = spare - region1;
|
|
|
|
} else {
|
|
|
|
region1 = MSIZE;
|
|
|
|
region3 = spare - region1;
|
|
|
|
break;
|
2013-08-30 01:45:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-18 20:14:13 +00:00
|
|
|
KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES,
|
|
|
|
("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp));
|
|
|
|
KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES,
|
|
|
|
("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp));
|
|
|
|
KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 ==
|
|
|
|
sc->sge.sw_zone_info[zidx].size,
|
|
|
|
("%s: bad buffer layout for fl %p, maxp %d. "
|
|
|
|
"cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
|
|
|
|
sc->sge.sw_zone_info[zidx].size, region1,
|
|
|
|
sc->sge.hw_buf_info[hwidx].size, region3));
|
|
|
|
if (fl->flags & FL_BUF_PACKING || region1 > 0) {
|
|
|
|
KASSERT(region3 >= CL_METADATA_SIZE,
|
|
|
|
("%s: no room for metadata. fl %p, maxp %d; "
|
|
|
|
"cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
|
|
|
|
sc->sge.sw_zone_info[zidx].size, region1,
|
|
|
|
sc->sge.hw_buf_info[hwidx].size, region3));
|
|
|
|
KASSERT(region1 % MSIZE == 0,
|
|
|
|
("%s: bad mbuf region for fl %p, maxp %d. "
|
|
|
|
"cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
|
|
|
|
sc->sge.sw_zone_info[zidx].size, region1,
|
|
|
|
sc->sge.hw_buf_info[hwidx].size, region3));
|
|
|
|
}
|
|
|
|
|
|
|
|
fl->cll_def.zidx = zidx;
|
|
|
|
fl->cll_def.hwidx = hwidx;
|
|
|
|
fl->cll_def.region1 = region1;
|
|
|
|
fl->cll_def.region3 = region3;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
find_safe_refill_source(struct adapter *sc, struct sge_fl *fl)
|
|
|
|
{
|
|
|
|
struct sge *s = &sc->sge;
|
|
|
|
struct hw_buf_info *hwb;
|
|
|
|
struct sw_zone_info *swz;
|
|
|
|
int spare;
|
|
|
|
int8_t hwidx;
|
|
|
|
|
|
|
|
if (fl->flags & FL_BUF_PACKING)
|
|
|
|
hwidx = s->safe_hwidx2; /* with room for metadata */
|
|
|
|
else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) {
|
|
|
|
hwidx = s->safe_hwidx2;
|
|
|
|
hwb = &s->hw_buf_info[hwidx];
|
|
|
|
swz = &s->sw_zone_info[hwb->zidx];
|
|
|
|
spare = swz->size - hwb->size;
|
|
|
|
|
|
|
|
/* no good if there isn't room for an mbuf as well */
|
|
|
|
if (spare < CL_METADATA_SIZE + MSIZE)
|
|
|
|
hwidx = s->safe_hwidx1;
|
|
|
|
} else
|
|
|
|
hwidx = s->safe_hwidx1;
|
|
|
|
|
|
|
|
if (hwidx == -1) {
|
|
|
|
/* No fallback source */
|
|
|
|
fl->cll_alt.hwidx = -1;
|
|
|
|
fl->cll_alt.zidx = -1;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
hwb = &s->hw_buf_info[hwidx];
|
|
|
|
swz = &s->sw_zone_info[hwb->zidx];
|
|
|
|
spare = swz->size - hwb->size;
|
|
|
|
fl->cll_alt.hwidx = hwidx;
|
|
|
|
fl->cll_alt.zidx = hwb->zidx;
|
2014-12-06 00:13:56 +00:00
|
|
|
if (allow_mbufs_in_cluster &&
|
2016-03-08 00:23:56 +00:00
|
|
|
(fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0))
|
2014-03-18 20:14:13 +00:00
|
|
|
fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE;
|
2013-08-30 01:45:36 +00:00
|
|
|
else
|
2014-03-18 20:14:13 +00:00
|
|
|
fl->cll_alt.region1 = 0;
|
|
|
|
fl->cll_alt.region3 = spare - fl->cll_alt.region1;
|
2011-02-18 08:00:26 +00:00
|
|
|
}
|
2011-03-05 03:06:38 +00:00
|
|
|
|
2011-05-30 21:34:44 +00:00
|
|
|
static void
|
2011-12-16 02:09:51 +00:00
|
|
|
add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
|
2011-05-30 21:34:44 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
mtx_lock(&sc->sfl_lock);
|
|
|
|
FL_LOCK(fl);
|
|
|
|
if ((fl->flags & FL_DOOMED) == 0) {
|
|
|
|
fl->flags |= FL_STARVING;
|
|
|
|
TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
|
|
|
|
callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
|
2011-05-30 21:34:44 +00:00
|
|
|
}
|
2011-12-16 02:09:51 +00:00
|
|
|
FL_UNLOCK(fl);
|
|
|
|
mtx_unlock(&sc->sfl_lock);
|
2011-05-30 21:34:44 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 23:19:16 +00:00
|
|
|
static void
|
|
|
|
handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq)
|
|
|
|
{
|
|
|
|
struct sge_wrq *wrq = (void *)eq;
|
|
|
|
|
|
|
|
atomic_readandclear_int(&eq->equiq);
|
|
|
|
taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq)
|
|
|
|
{
|
|
|
|
struct sge_txq *txq = (void *)eq;
|
|
|
|
|
|
|
|
MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH);
|
|
|
|
|
|
|
|
atomic_readandclear_int(&eq->equiq);
|
|
|
|
mp_ring_check_drainage(txq->r, 0);
|
|
|
|
taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task);
|
|
|
|
}
|
|
|
|
|
2011-04-19 22:08:28 +00:00
|
|
|
static int
|
2011-12-16 02:09:51 +00:00
|
|
|
handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
|
|
|
|
struct mbuf *m)
|
2011-04-19 22:08:28 +00:00
|
|
|
{
|
2011-12-16 02:09:51 +00:00
|
|
|
const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
|
|
|
|
unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
|
|
|
|
struct adapter *sc = iq->adapter;
|
|
|
|
struct sge *s = &sc->sge;
|
|
|
|
struct sge_eq *eq;
|
2014-12-31 23:19:16 +00:00
|
|
|
static void (*h[])(struct adapter *, struct sge_eq *) = {NULL,
|
|
|
|
&handle_wrq_egr_update, &handle_eth_egr_update,
|
|
|
|
&handle_wrq_egr_update};
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
|
|
|
|
rss->opcode));
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2016-08-09 17:49:42 +00:00
|
|
|
eq = s->eqmap[qid - s->eq_start - s->eq_base];
|
2014-12-31 23:19:16 +00:00
|
|
|
(*h[eq->flags & EQ_TYPEMASK])(sc, eq);
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2013-02-26 00:27:27 +00:00
|
|
|
/* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */
|
|
|
|
CTASSERT(offsetof(struct cpl_fw4_msg, data) == \
|
|
|
|
offsetof(struct cpl_fw6_msg, data));
|
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
static int
|
2012-08-16 18:31:50 +00:00
|
|
|
handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
|
2011-12-16 02:09:51 +00:00
|
|
|
{
|
2012-08-16 18:31:50 +00:00
|
|
|
struct adapter *sc = iq->adapter;
|
2011-12-16 02:09:51 +00:00
|
|
|
const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2011-12-16 02:09:51 +00:00
|
|
|
KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
|
|
|
|
rss->opcode));
|
2011-04-19 22:08:28 +00:00
|
|
|
|
2013-02-26 00:27:27 +00:00
|
|
|
if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
|
|
|
|
const struct rss_header *rss2;
|
|
|
|
|
|
|
|
rss2 = (const struct rss_header *)&cpl->data[0];
|
2016-07-05 01:29:24 +00:00
|
|
|
return (t4_cpl_handler[rss2->opcode](iq, rss2, m));
|
2013-02-26 00:27:27 +00:00
|
|
|
}
|
|
|
|
|
2016-07-05 01:29:24 +00:00
|
|
|
return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0]));
|
2011-04-19 22:08:28 +00:00
|
|
|
}
|
2011-05-14 19:27:15 +00:00
|
|
|
|
2016-07-22 21:52:07 +00:00
|
|
|
/**
|
|
|
|
* t4_handle_wrerr_rpl - process a FW work request error message
|
|
|
|
* @adap: the adapter
|
|
|
|
* @rpl: start of the FW message
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl)
|
|
|
|
{
|
|
|
|
u8 opcode = *(const u8 *)rpl;
|
|
|
|
const struct fw_error_cmd *e = (const void *)rpl;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (opcode != FW_ERROR_CMD) {
|
|
|
|
log(LOG_ERR,
|
|
|
|
"%s: Received WRERR_RPL message with opcode %#x\n",
|
|
|
|
device_get_nameunit(adap->dev), opcode);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev),
|
|
|
|
G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" :
|
|
|
|
"non-fatal");
|
|
|
|
switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) {
|
|
|
|
case FW_ERROR_TYPE_EXCEPTION:
|
|
|
|
log(LOG_ERR, "exception info:\n");
|
|
|
|
for (i = 0; i < nitems(e->u.exception.info); i++)
|
|
|
|
log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ",
|
|
|
|
be32toh(e->u.exception.info[i]));
|
|
|
|
log(LOG_ERR, "\n");
|
|
|
|
break;
|
|
|
|
case FW_ERROR_TYPE_HWMODULE:
|
|
|
|
log(LOG_ERR, "HW module regaddr %08x regval %08x\n",
|
|
|
|
be32toh(e->u.hwmodule.regaddr),
|
|
|
|
be32toh(e->u.hwmodule.regval));
|
|
|
|
break;
|
|
|
|
case FW_ERROR_TYPE_WR:
|
|
|
|
log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n",
|
|
|
|
be16toh(e->u.wr.cidx),
|
|
|
|
G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)),
|
|
|
|
G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)),
|
|
|
|
be32toh(e->u.wr.eqid));
|
|
|
|
for (i = 0; i < nitems(e->u.wr.wrhdr); i++)
|
|
|
|
log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ",
|
|
|
|
e->u.wr.wrhdr[i]);
|
|
|
|
log(LOG_ERR, "\n");
|
|
|
|
break;
|
|
|
|
case FW_ERROR_TYPE_ACL:
|
|
|
|
log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s",
|
|
|
|
be16toh(e->u.acl.cidx),
|
|
|
|
G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)),
|
|
|
|
G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)),
|
|
|
|
be32toh(e->u.acl.eqid),
|
|
|
|
G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" :
|
|
|
|
"MAC");
|
|
|
|
for (i = 0; i < nitems(e->u.acl.val); i++)
|
|
|
|
log(LOG_ERR, " %02x", e->u.acl.val[i]);
|
|
|
|
log(LOG_ERR, "\n");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log(LOG_ERR, "type %#x\n",
|
|
|
|
G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type)));
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2011-05-14 19:27:15 +00:00
|
|
|
static int
|
2011-05-30 21:34:44 +00:00
|
|
|
sysctl_uint16(SYSCTL_HANDLER_ARGS)
|
2011-05-14 19:27:15 +00:00
|
|
|
{
|
|
|
|
uint16_t *id = arg1;
|
|
|
|
int i = *id;
|
|
|
|
|
|
|
|
return sysctl_handle_int(oidp, &i, 0, req);
|
|
|
|
}
|
2014-03-18 20:14:13 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_bufsizes(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct sge *s = arg1;
|
|
|
|
struct hw_buf_info *hwb = &s->hw_buf_info[0];
|
|
|
|
struct sw_zone_info *swz = &s->sw_zone_info[0];
|
|
|
|
int i, rc;
|
|
|
|
struct sbuf sb;
|
|
|
|
char c;
|
|
|
|
|
|
|
|
sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
|
|
|
|
for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
|
|
|
|
if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster)
|
|
|
|
c = '*';
|
|
|
|
else
|
|
|
|
c = '\0';
|
|
|
|
|
|
|
|
sbuf_printf(&sb, "%u%c ", hwb->size, c);
|
|
|
|
}
|
|
|
|
sbuf_trim(&sb);
|
|
|
|
sbuf_finish(&sb);
|
|
|
|
rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
|
|
|
|
sbuf_delete(&sb);
|
|
|
|
return (rc);
|
|
|
|
}
|
2016-06-08 14:15:29 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_tc(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct vi_info *vi = arg1;
|
|
|
|
struct port_info *pi;
|
|
|
|
struct adapter *sc;
|
|
|
|
struct sge_txq *txq;
|
2017-05-02 20:38:10 +00:00
|
|
|
struct tx_cl_rl_params *tc;
|
2016-06-08 14:15:29 +00:00
|
|
|
int qidx = arg2, rc, tc_idx;
|
|
|
|
uint32_t fw_queue, fw_class;
|
|
|
|
|
|
|
|
MPASS(qidx >= 0 && qidx < vi->ntxq);
|
|
|
|
pi = vi->pi;
|
|
|
|
sc = pi->adapter;
|
|
|
|
txq = &sc->sge.txq[vi->first_txq + qidx];
|
|
|
|
|
|
|
|
tc_idx = txq->tc_idx;
|
|
|
|
rc = sysctl_handle_int(oidp, &tc_idx, 0, req);
|
|
|
|
if (rc != 0 || req->newptr == NULL)
|
|
|
|
return (rc);
|
|
|
|
|
2017-05-02 20:38:10 +00:00
|
|
|
if (sc->flags & IS_VF)
|
|
|
|
return (EPERM);
|
|
|
|
|
2016-06-08 14:15:29 +00:00
|
|
|
/* Note that -1 is legitimate input (it means unbind). */
|
|
|
|
if (tc_idx < -1 || tc_idx >= sc->chip_params->nsched_cls)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2017-05-02 20:38:10 +00:00
|
|
|
mtx_lock(&sc->tc_lock);
|
2016-06-08 14:15:29 +00:00
|
|
|
if (tc_idx == txq->tc_idx) {
|
|
|
|
rc = 0; /* No change, nothing to do. */
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
fw_queue = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
|
|
|
|
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) |
|
|
|
|
V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id);
|
|
|
|
|
|
|
|
if (tc_idx == -1)
|
|
|
|
fw_class = 0xffffffff; /* Unbind. */
|
|
|
|
else {
|
|
|
|
/*
|
2017-05-02 20:38:10 +00:00
|
|
|
* Bind to a different class.
|
2016-06-08 14:15:29 +00:00
|
|
|
*/
|
2017-05-02 20:38:10 +00:00
|
|
|
tc = &pi->sched_params->cl_rl[tc_idx];
|
|
|
|
if (tc->flags & TX_CLRL_ERROR) {
|
|
|
|
/* Previous attempt to set the cl-rl params failed. */
|
|
|
|
rc = EIO;
|
2016-06-08 14:15:29 +00:00
|
|
|
goto done;
|
2017-05-02 20:38:10 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Ok to proceed. Place a reference on the new class
|
|
|
|
* while still holding on to the reference on the
|
|
|
|
* previous class, if any.
|
|
|
|
*/
|
|
|
|
fw_class = tc_idx;
|
|
|
|
tc->refcount++;
|
2016-06-08 14:15:29 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-02 20:38:10 +00:00
|
|
|
mtx_unlock(&sc->tc_lock);
|
2016-06-08 14:15:29 +00:00
|
|
|
|
2017-05-02 20:38:10 +00:00
|
|
|
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4stc");
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
2016-06-08 14:15:29 +00:00
|
|
|
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, &fw_class);
|
2017-05-02 20:38:10 +00:00
|
|
|
end_synchronized_op(sc, 0);
|
|
|
|
|
|
|
|
mtx_lock(&sc->tc_lock);
|
2016-06-08 14:15:29 +00:00
|
|
|
if (rc == 0) {
|
|
|
|
if (txq->tc_idx != -1) {
|
2017-05-02 20:38:10 +00:00
|
|
|
tc = &pi->sched_params->cl_rl[txq->tc_idx];
|
2016-06-08 14:15:29 +00:00
|
|
|
MPASS(tc->refcount > 0);
|
|
|
|
tc->refcount--;
|
|
|
|
}
|
|
|
|
txq->tc_idx = tc_idx;
|
2017-05-15 18:18:32 +00:00
|
|
|
} else if (tc_idx != -1) {
|
2017-05-02 20:38:10 +00:00
|
|
|
tc = &pi->sched_params->cl_rl[tc_idx];
|
|
|
|
MPASS(tc->refcount > 0);
|
|
|
|
tc->refcount--;
|
2016-06-08 14:15:29 +00:00
|
|
|
}
|
|
|
|
done:
|
2017-05-02 20:38:10 +00:00
|
|
|
mtx_unlock(&sc->tc_lock);
|
2016-06-08 14:15:29 +00:00
|
|
|
return (rc);
|
|
|
|
}
|