2015-03-17 18:32:28 +00:00
|
|
|
/******************************************************************************
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
Copyright (c) 2001-2017, Intel Corporation
|
2015-03-17 18:32:28 +00:00
|
|
|
All rights reserved.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
2015-03-17 18:32:28 +00:00
|
|
|
modification, are permitted provided that the following conditions are met:
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
1. Redistributions of source code must retain the above copyright notice,
|
2015-03-17 18:32:28 +00:00
|
|
|
this list of conditions and the following disclaimer.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in the
|
2015-03-17 18:32:28 +00:00
|
|
|
documentation and/or other materials provided with the distribution.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
3. Neither the name of the Intel Corporation nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived from
|
2015-03-17 18:32:28 +00:00
|
|
|
this software without specific prior written permission.
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
2017-07-05 17:27:03 +00:00
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
2015-03-17 18:32:28 +00:00
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
******************************************************************************/
|
|
|
|
/*$FreeBSD$*/
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef IXGBE_STANDALONE_BUILD
|
|
|
|
#include "opt_inet.h"
|
|
|
|
#include "opt_inet6.h"
|
|
|
|
#include "opt_rss.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "ixgbe.h"
|
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* Local Function prototypes
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi);
|
|
|
|
static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
|
|
|
|
static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru);
|
|
|
|
static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx);
|
|
|
|
static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx,
|
|
|
|
qidx_t budget);
|
|
|
|
static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static void ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype);
|
|
|
|
static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
extern void ixgbe_if_enable_intr(if_ctx_t ctx);
|
|
|
|
static int ixgbe_determine_rsstype(u16 pkt_info);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
struct if_txrx ixgbe_txrx = {
|
2018-04-11 15:15:34 +00:00
|
|
|
.ift_txd_encap = ixgbe_isc_txd_encap,
|
|
|
|
.ift_txd_flush = ixgbe_isc_txd_flush,
|
|
|
|
.ift_txd_credits_update = ixgbe_isc_txd_credits_update,
|
|
|
|
.ift_rxd_available = ixgbe_isc_rxd_available,
|
|
|
|
.ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get,
|
|
|
|
.ift_rxd_refill = ixgbe_isc_rxd_refill,
|
|
|
|
.ift_rxd_flush = ixgbe_isc_rxd_flush,
|
|
|
|
.ift_legacy_intr = NULL
|
2017-12-20 18:15:06 +00:00
|
|
|
};
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
extern if_shared_ctx_t ixgbe_sctx;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
|
|
|
* ixgbe_tx_ctx_setup
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Advanced Context Descriptor setup for VLAN, CSUM or TSO
|
2017-12-20 18:15:06 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
u32 vlan_macip_lens, type_tucmd_mlhl;
|
|
|
|
u32 olinfo_status, mss_l4len_idx, pktlen, offload;
|
|
|
|
u8 ehdrlen;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
offload = TRUE;
|
|
|
|
olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
|
|
|
|
/* VLAN MACLEN IPLEN */
|
|
|
|
vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/*
|
2017-12-20 18:15:06 +00:00
|
|
|
* Some of our VF devices need a context descriptor for every
|
|
|
|
* packet. That means the ehdrlen needs to be non-zero in order
|
|
|
|
* for the host driver not to flag a malicious event. The stack
|
|
|
|
* will most likely populate this for all other reasons of why
|
|
|
|
* this function was called.
|
2015-03-17 18:32:28 +00:00
|
|
|
*/
|
2017-12-20 18:15:06 +00:00
|
|
|
if (pi->ipi_ehdrlen == 0) {
|
2015-03-17 18:32:28 +00:00
|
|
|
ehdrlen = ETHER_HDR_LEN;
|
2017-12-20 18:15:06 +00:00
|
|
|
ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0;
|
|
|
|
} else
|
|
|
|
ehdrlen = pi->ipi_ehdrlen;
|
2015-03-17 18:32:28 +00:00
|
|
|
vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
pktlen = pi->ipi_len;
|
|
|
|
/* First check if TSO is to be used */
|
|
|
|
if (pi->ipi_csum_flags & CSUM_TSO) {
|
|
|
|
/* This is used in the transmit desc in encap */
|
|
|
|
pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
|
|
|
|
mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
|
|
|
|
mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
|
|
|
|
|
|
|
|
if (pi->ipi_flags & IPI_TX_IPV4) {
|
|
|
|
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
|
|
|
|
/* Tell transmit desc to also do IPv4 checksum. */
|
|
|
|
if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO))
|
|
|
|
olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
|
|
|
|
} else if (pi->ipi_flags & IPI_TX_IPV6)
|
|
|
|
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
|
2015-12-23 22:45:17 +00:00
|
|
|
else
|
2017-12-20 18:15:06 +00:00
|
|
|
offload = FALSE;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
vlan_macip_lens |= pi->ipi_ip_hlen;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
switch (pi->ipi_ipproto) {
|
|
|
|
case IPPROTO_TCP:
|
ix(4),ixv(4): Fix TSO offloads when TXCSUM is disabled
This patch and commit message are based on r340256 created by Jacob Keller:
The iflib stack does not disable TSO automatically when TXCSUM is
disabled, instead assuming that the driver will correctly handle TSOs
even when CSUM_IP is not set.
This results in iflib calling ixgbe_isc_txd_encap with packets which have
CSUM_IP_TSO, but do not have CSUM_IP or CSUM_IP_TCP set. Because of
this, ixgbe_tx_ctx_setup will not setup the IPv4 checksum offloading.
This results in bad TSO packets being sent if a user disables TXCSUM
without disabling TSO.
Fix this by updating the ixgbe_tx_ctx_setup function to check both
CSUM_IP and CSUM_IP_TSO when deciding whether to enable checksums.
Once this is corrected, another issue for TSO packets is revealed. The
driver sets IFLIB_NEED_ZERO_CSUM in order to enable a work around that
causes the ip->sum field to be zero'd. This is necessary for ix
hardware to correctly perform TSOs.
However, if TXCSUM is disabled, then the work around is not enabled, as
CSUM_IP will not be set when the iflib stack checks to see if it should
clear the sum field.
Fix this by adding IFLIB_TSO_INIT_IP to the iflib flags for the ix and
ixv interface files.
Once both of these changes are made, the ix and ixv drivers should
correctly offload TSO packets when TSO offload is enabled, regardless
of whether TXCSUM is enabled or disabled.
Submitted by: Piotr Pietruszewski <piotr.pietruszewski@intel.com>
Reviewed by: IntelNetworking
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D18470
2019-01-31 21:53:03 +00:00
|
|
|
if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
|
2017-12-20 18:15:06 +00:00
|
|
|
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
|
|
|
|
else
|
|
|
|
offload = FALSE;
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
|
|
|
|
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
|
|
|
|
else
|
|
|
|
offload = FALSE;
|
|
|
|
break;
|
|
|
|
case IPPROTO_SCTP:
|
|
|
|
if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
|
|
|
|
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
|
|
|
|
else
|
|
|
|
offload = FALSE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
offload = FALSE;
|
|
|
|
break;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Insert L4 checksum into data descriptors */
|
|
|
|
if (offload)
|
|
|
|
olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
|
|
|
|
|
|
|
|
/* Now copy bits into descriptor */
|
|
|
|
TXD->vlan_macip_lens = htole32(vlan_macip_lens);
|
|
|
|
TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
|
|
|
|
TXD->seqnum_seed = htole32(0);
|
2017-12-20 18:15:06 +00:00
|
|
|
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return (olinfo_status);
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixgbe_tx_ctx_setup */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_isc_txd_encap
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
|
|
|
|
{
|
|
|
|
struct adapter *sc = arg;
|
|
|
|
if_softc_ctx_t scctx = sc->shared;
|
|
|
|
struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
|
|
|
|
struct tx_ring *txr = &que->txr;
|
|
|
|
int nsegs = pi->ipi_nsegs;
|
|
|
|
bus_dma_segment_t *segs = pi->ipi_segs;
|
|
|
|
union ixgbe_adv_tx_desc *txd = NULL;
|
2015-03-17 18:32:28 +00:00
|
|
|
struct ixgbe_adv_tx_context_desc *TXD;
|
2017-12-20 18:15:06 +00:00
|
|
|
int i, j, first, pidx_last;
|
|
|
|
u32 olinfo_status, cmd, flags;
|
|
|
|
qidx_t ntxd;
|
|
|
|
|
|
|
|
cmd = (IXGBE_ADVTXD_DTYP_DATA |
|
|
|
|
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
|
|
|
|
|
|
|
|
if (pi->ipi_mflags & M_VLANTAG)
|
|
|
|
cmd |= IXGBE_ADVTXD_DCMD_VLE;
|
|
|
|
|
|
|
|
i = first = pi->ipi_pidx;
|
|
|
|
flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0;
|
|
|
|
ntxd = scctx->isc_ntxd[0];
|
|
|
|
|
|
|
|
TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first];
|
|
|
|
if ((pi->ipi_csum_flags & CSUM_OFFLOAD) ||
|
|
|
|
(sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) ||
|
|
|
|
pi->ipi_vtag) {
|
|
|
|
/*********************************************
|
|
|
|
* Set up the appropriate offload context
|
|
|
|
* this will consume the first descriptor
|
|
|
|
*********************************************/
|
|
|
|
olinfo_status = ixgbe_tx_ctx_setup(TXD, pi);
|
|
|
|
if (pi->ipi_csum_flags & CSUM_TSO) {
|
|
|
|
cmd |= IXGBE_ADVTXD_DCMD_TSE;
|
|
|
|
++txr->tso_tx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (++i == scctx->isc_ntxd[0])
|
|
|
|
i = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
} else {
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Indicate the whole packet as payload when not doing TSO */
|
|
|
|
olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
olinfo_status |= IXGBE_ADVTXD_CC;
|
2018-05-19 05:57:26 +00:00
|
|
|
pidx_last = 0;
|
2017-12-20 18:15:06 +00:00
|
|
|
for (j = 0; j < nsegs; j++) {
|
|
|
|
bus_size_t seglen;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
txd = &txr->tx_base[i];
|
|
|
|
seglen = segs[j].ds_len;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
txd->read.buffer_addr = htole64(segs[j].ds_addr);
|
|
|
|
txd->read.cmd_type_len = htole32(cmd | seglen);
|
|
|
|
txd->read.olinfo_status = htole32(olinfo_status);
|
2015-12-23 22:45:17 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
pidx_last = i;
|
|
|
|
if (++i == scctx->isc_ntxd[0]) {
|
|
|
|
i = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
if (flags) {
|
|
|
|
txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
|
|
|
|
txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1);
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
txr->bytes += pi->ipi_len;
|
|
|
|
pi->ipi_new_pidx = i;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
++txr->total_packets;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
return (0);
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixgbe_isc_txd_encap */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_isc_txd_flush
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
|
2017-04-05 19:52:49 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *sc = arg;
|
|
|
|
struct ix_tx_queue *que = &sc->tx_queues[txqid];
|
|
|
|
struct tx_ring *txr = &que->txr;
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
|
|
|
|
} /* ixgbe_isc_txd_flush */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_isc_txd_credits_update
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static int
|
2017-12-20 18:15:06 +00:00
|
|
|
ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
|
|
|
|
{
|
|
|
|
struct adapter *sc = arg;
|
|
|
|
if_softc_ctx_t scctx = sc->shared;
|
|
|
|
struct ix_tx_queue *que = &sc->tx_queues[txqid];
|
|
|
|
struct tx_ring *txr = &que->txr;
|
|
|
|
qidx_t processed = 0;
|
|
|
|
int updated;
|
|
|
|
qidx_t cur, prev, ntxd, rs_cidx;
|
|
|
|
int32_t delta;
|
|
|
|
uint8_t status;
|
|
|
|
|
|
|
|
rs_cidx = txr->tx_rs_cidx;
|
|
|
|
if (rs_cidx == txr->tx_rs_pidx)
|
|
|
|
return (0);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
cur = txr->tx_rsq[rs_cidx];
|
|
|
|
status = txr->tx_base[cur].wb.status;
|
|
|
|
updated = !!(status & IXGBE_TXD_STAT_DD);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2018-10-14 05:09:43 +00:00
|
|
|
if (!updated)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/* If clear is false just let caller know that there
|
|
|
|
* are descriptors to reclaim */
|
|
|
|
if (!clear)
|
|
|
|
return (1);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
prev = txr->tx_cidx_processed;
|
|
|
|
ntxd = scctx->isc_ntxd[0];
|
|
|
|
do {
|
intel iflib drivers: correct initialization of tx_cidx_processed
From Jake:
In r341156 ("Fix first-packet completion", 2018-11-28) a hack to work
around a delta calculation determining how many descriptors were used
was added to ixl_isc_tx_credits_update_dwb.
The same fix was also applied to the em and igb drivers in r340310, and
to ix in r341156.
The hack checked the case where prev and cur were equal, and then added
one. This works, because by the time we do the delta check, we already
know there is at least one packet available, so the delta should be at
least one.
However, it's not a complete fix, and as indicated by the comment is
really a hack to work around the real bug.
The real problem is that the first time that we transmit a packet,
tx_cidx_processed will be set to point to the start of the ring.
Ultimately, the credits_update function expects it to point to the
*last* descriptor that was processed. Since we haven't yet processed any
descriptors, pointing it to 0 results in this incorrect calculation.
Fix the initialization code to have it point to the end of the ring
instead. One way to think about this, is that we are setting the value
to be one prior to the first available descriptor.
Doing so, corrects the delta calculation in all cases. The original fix
only works if the first packet has exactly one descriptor. Otherwise, we
will report 1 less than the correct value.
As part of this fix, also update the MPASS assertions to match the real
expectations. First, ensure that prev is not equal to cur, since this
should never happen. Second, remove the assertion about prev==0 || delta
!= 0. It looks like that originated from when the em driver was
converted to iflib. It seems like it was supposed to ensure that delta
was non-zero. However, because we originally returned 0 delta for the
first calculation, the "prev == 0" was tacked on.
Instead, replace this with a check that delta is greater than zero,
after the correction necessary when the ring pointers wrap around.
This new solution should fix the same bug as r341156 did, but in a more
robust way.
Submitted by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed by: shurd@
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D18545
2019-01-24 01:03:00 +00:00
|
|
|
MPASS(prev != cur);
|
2017-12-20 18:15:06 +00:00
|
|
|
delta = (int32_t)cur - (int32_t)prev;
|
|
|
|
if (delta < 0)
|
|
|
|
delta += ntxd;
|
intel iflib drivers: correct initialization of tx_cidx_processed
From Jake:
In r341156 ("Fix first-packet completion", 2018-11-28) a hack to work
around a delta calculation determining how many descriptors were used
was added to ixl_isc_tx_credits_update_dwb.
The same fix was also applied to the em and igb drivers in r340310, and
to ix in r341156.
The hack checked the case where prev and cur were equal, and then added
one. This works, because by the time we do the delta check, we already
know there is at least one packet available, so the delta should be at
least one.
However, it's not a complete fix, and as indicated by the comment is
really a hack to work around the real bug.
The real problem is that the first time that we transmit a packet,
tx_cidx_processed will be set to point to the start of the ring.
Ultimately, the credits_update function expects it to point to the
*last* descriptor that was processed. Since we haven't yet processed any
descriptors, pointing it to 0 results in this incorrect calculation.
Fix the initialization code to have it point to the end of the ring
instead. One way to think about this, is that we are setting the value
to be one prior to the first available descriptor.
Doing so, corrects the delta calculation in all cases. The original fix
only works if the first packet has exactly one descriptor. Otherwise, we
will report 1 less than the correct value.
As part of this fix, also update the MPASS assertions to match the real
expectations. First, ensure that prev is not equal to cur, since this
should never happen. Second, remove the assertion about prev==0 || delta
!= 0. It looks like that originated from when the em driver was
converted to iflib. It seems like it was supposed to ensure that delta
was non-zero. However, because we originally returned 0 delta for the
first calculation, the "prev == 0" was tacked on.
Instead, replace this with a check that delta is greater than zero,
after the correction necessary when the ring pointers wrap around.
This new solution should fix the same bug as r341156 did, but in a more
robust way.
Submitted by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed by: shurd@
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D18545
2019-01-24 01:03:00 +00:00
|
|
|
MPASS(delta > 0);
|
2017-12-20 18:15:06 +00:00
|
|
|
|
|
|
|
processed += delta;
|
|
|
|
prev = cur;
|
|
|
|
rs_cidx = (rs_cidx + 1) & (ntxd - 1);
|
|
|
|
if (rs_cidx == txr->tx_rs_pidx)
|
|
|
|
break;
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
cur = txr->tx_rsq[rs_cidx];
|
|
|
|
status = txr->tx_base[cur].wb.status;
|
|
|
|
} while ((status & IXGBE_TXD_STAT_DD));
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
txr->tx_rs_cidx = rs_cidx;
|
|
|
|
txr->tx_cidx_processed = prev;
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
return (processed);
|
|
|
|
} /* ixgbe_isc_txd_credits_update */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_isc_rxd_refill
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static void
|
|
|
|
ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *sc = arg;
|
|
|
|
struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
|
|
|
|
struct rx_ring *rxr = &que->rxr;
|
|
|
|
uint64_t *paddrs;
|
|
|
|
int i;
|
|
|
|
uint32_t next_pidx, pidx;
|
|
|
|
uint16_t count;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
paddrs = iru->iru_paddrs;
|
|
|
|
pidx = iru->iru_pidx;
|
|
|
|
count = iru->iru_count;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
for (i = 0, next_pidx = pidx; i < count; i++) {
|
|
|
|
rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
|
|
|
|
if (++next_pidx == sc->shared->isc_nrxd[0])
|
|
|
|
next_pidx = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixgbe_isc_rxd_refill */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_isc_rxd_flush
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *sc = arg;
|
|
|
|
struct ix_rx_queue *que = &sc->rx_queues[qsidx];
|
|
|
|
struct rx_ring *rxr = &que->rxr;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
|
|
|
|
} /* ixgbe_isc_rxd_flush */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_isc_rxd_available
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static int
|
|
|
|
ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-12-20 18:15:06 +00:00
|
|
|
struct adapter *sc = arg;
|
|
|
|
struct ix_rx_queue *que = &sc->rx_queues[qsidx];
|
|
|
|
struct rx_ring *rxr = &que->rxr;
|
|
|
|
union ixgbe_adv_rx_desc *rxd;
|
|
|
|
u32 staterr;
|
|
|
|
int cnt, i, nrxd;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
nrxd = sc->shared->isc_nrxd[0];
|
2018-10-14 05:09:43 +00:00
|
|
|
for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
|
2017-12-20 18:15:06 +00:00
|
|
|
rxd = &rxr->rx_base[i];
|
|
|
|
staterr = le32toh(rxd->wb.upper.status_error);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
|
|
|
|
break;
|
|
|
|
if (++i == nrxd)
|
|
|
|
i = 0;
|
|
|
|
if (staterr & IXGBE_RXD_STAT_EOP)
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
return (cnt);
|
|
|
|
} /* ixgbe_isc_rxd_available */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_isc_rxd_pkt_get
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-12-20 18:15:06 +00:00
|
|
|
* Routine sends data which has been dma'ed into host memory
|
|
|
|
* to upper layer. Initialize ri structure.
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-12-20 18:15:06 +00:00
|
|
|
* Returns 0 upon success, errno on failure
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
static int
|
|
|
|
ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = arg;
|
|
|
|
struct ix_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
|
|
|
|
struct rx_ring *rxr = &que->rxr;
|
|
|
|
struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
|
|
|
|
union ixgbe_adv_rx_desc *rxd;
|
|
|
|
|
|
|
|
u16 pkt_info, len, cidx, i;
|
|
|
|
u16 vtag = 0;
|
|
|
|
u32 ptype;
|
|
|
|
u32 staterr = 0;
|
|
|
|
bool eop;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
cidx = ri->iri_cidx;
|
|
|
|
do {
|
|
|
|
rxd = &rxr->rx_base[cidx];
|
|
|
|
staterr = le32toh(rxd->wb.upper.status_error);
|
|
|
|
pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
|
2017-07-05 17:27:03 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
/* Error Checking then decrement count */
|
|
|
|
MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0);
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
len = le16toh(rxd->wb.upper.length);
|
|
|
|
ptype = le32toh(rxd->wb.lower.lo_dword.data) &
|
|
|
|
IXGBE_RXDADV_PKTTYPE_MASK;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
ri->iri_len += len;
|
|
|
|
rxr->bytes += len;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
rxd->wb.upper.status_error = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
|
2018-09-11 18:33:43 +00:00
|
|
|
|
|
|
|
if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) {
|
2017-12-20 18:15:06 +00:00
|
|
|
vtag = le16toh(rxd->wb.upper.vlan);
|
|
|
|
} else {
|
|
|
|
vtag = 0;
|
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
/* Make sure bad packets are discarded */
|
|
|
|
if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
|
2017-12-20 18:15:06 +00:00
|
|
|
|
2015-03-17 18:32:28 +00:00
|
|
|
#if __FreeBSD_version >= 1100036
|
2017-07-05 17:27:03 +00:00
|
|
|
if (adapter->feat_en & IXGBE_FEATURE_VF)
|
2015-04-30 22:53:27 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
|
2015-03-17 18:32:28 +00:00
|
|
|
#endif
|
|
|
|
|
2017-12-20 18:15:06 +00:00
|
|
|
rxr->rx_discarded++;
|
|
|
|
return (EBADMSG);
|
|
|
|
}
|
|
|
|
ri->iri_frags[i].irf_flid = 0;
|
|
|
|
ri->iri_frags[i].irf_idx = cidx;
|
|
|
|
ri->iri_frags[i].irf_len = len;
|
|
|
|
if (++cidx == adapter->shared->isc_nrxd[0])
|
|
|
|
cidx = 0;
|
|
|
|
i++;
|
|
|
|
/* even a 16K packet shouldn't consume more than 8 clusters */
|
|
|
|
MPASS(i < 9);
|
|
|
|
} while (!eop);
|
|
|
|
|
|
|
|
rxr->rx_packets++;
|
|
|
|
rxr->packets++;
|
|
|
|
rxr->rx_bytes += ri->iri_len;
|
|
|
|
|
|
|
|
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
|
|
|
|
ixgbe_rx_checksum(staterr, ri, ptype);
|
|
|
|
|
|
|
|
ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
|
|
|
|
ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info);
|
2019-11-05 06:34:20 +00:00
|
|
|
if ((adapter->feat_en & IXGBE_FEATURE_RSS) == 0) {
|
|
|
|
if (ri->iri_rsstype == M_HASHTYPE_OPAQUE)
|
|
|
|
ri->iri_rsstype = M_HASHTYPE_NONE;
|
|
|
|
else
|
|
|
|
ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
|
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
ri->iri_vtag = vtag;
|
|
|
|
ri->iri_nfrags = i;
|
|
|
|
if (vtag)
|
|
|
|
ri->iri_flags |= M_VLANTAG;
|
|
|
|
return (0);
|
|
|
|
} /* ixgbe_isc_rxd_pkt_get */
|
2017-07-05 17:27:03 +00:00
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* ixgbe_rx_checksum
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-07-05 17:27:03 +00:00
|
|
|
* Verify that the hardware indicated that the checksum is valid.
|
|
|
|
* Inform the stack about the status of checksum so that stack
|
|
|
|
* doesn't spend time verifying the checksum.
|
|
|
|
************************************************************************/
|
2015-03-17 18:32:28 +00:00
|
|
|
static void
|
2017-12-20 18:15:06 +00:00
|
|
|
ixgbe_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
|
2015-03-17 18:32:28 +00:00
|
|
|
{
|
2017-07-05 17:27:03 +00:00
|
|
|
u16 status = (u16)staterr;
|
|
|
|
u8 errors = (u8)(staterr >> 24);
|
|
|
|
bool sctp = false;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
|
|
|
if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
|
|
|
|
(ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
|
2017-12-20 18:15:06 +00:00
|
|
|
sctp = TRUE;
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2015-12-23 22:45:17 +00:00
|
|
|
/* IPv4 checksum */
|
2015-03-17 18:32:28 +00:00
|
|
|
if (status & IXGBE_RXD_STAT_IPCS) {
|
2017-12-20 18:15:06 +00:00
|
|
|
if (!(errors & IXGBE_RXD_ERR_IPE)) {
|
|
|
|
/* IP Checksum Good */
|
|
|
|
ri->iri_csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
|
|
|
|
} else
|
|
|
|
ri->iri_csum_flags = 0;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2015-12-23 22:45:17 +00:00
|
|
|
/* TCP/UDP/SCTP checksum */
|
2015-03-17 18:32:28 +00:00
|
|
|
if (status & IXGBE_RXD_STAT_L4CS) {
|
2017-12-20 18:15:06 +00:00
|
|
|
u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
|
|
|
|
#if __FreeBSD_version >= 800000
|
|
|
|
if (sctp)
|
|
|
|
type = CSUM_SCTP_VALID;
|
|
|
|
#endif
|
2015-03-17 18:32:28 +00:00
|
|
|
if (!(errors & IXGBE_RXD_ERR_TCPE)) {
|
2017-12-20 18:15:06 +00:00
|
|
|
ri->iri_csum_flags |= type;
|
2015-03-17 18:32:28 +00:00
|
|
|
if (!sctp)
|
2017-12-20 18:15:06 +00:00
|
|
|
ri->iri_csum_data = htons(0xffff);
|
2015-12-23 22:45:17 +00:00
|
|
|
}
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-07-05 17:27:03 +00:00
|
|
|
} /* ixgbe_rx_checksum */
|
2015-03-17 18:32:28 +00:00
|
|
|
|
2017-07-05 17:27:03 +00:00
|
|
|
/************************************************************************
|
2017-12-20 18:15:06 +00:00
|
|
|
* ixgbe_determine_rsstype
|
2015-03-17 18:32:28 +00:00
|
|
|
*
|
2017-12-20 18:15:06 +00:00
|
|
|
* Parse the packet type to determine the appropriate hash
|
2017-07-05 17:27:03 +00:00
|
|
|
************************************************************************/
|
2017-12-20 18:15:06 +00:00
|
|
|
static int
|
|
|
|
ixgbe_determine_rsstype(u16 pkt_info)
|
|
|
|
{
|
|
|
|
switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
|
|
|
|
return M_HASHTYPE_RSS_TCP_IPV4;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV4:
|
|
|
|
return M_HASHTYPE_RSS_IPV4;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
|
|
|
|
return M_HASHTYPE_RSS_TCP_IPV6;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
|
|
|
|
return M_HASHTYPE_RSS_IPV6_EX;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV6:
|
|
|
|
return M_HASHTYPE_RSS_IPV6;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
|
|
|
|
return M_HASHTYPE_RSS_TCP_IPV6_EX;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
|
|
|
|
return M_HASHTYPE_RSS_UDP_IPV4;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
|
|
|
|
return M_HASHTYPE_RSS_UDP_IPV6;
|
|
|
|
case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
|
|
|
|
return M_HASHTYPE_RSS_UDP_IPV6_EX;
|
|
|
|
default:
|
|
|
|
return M_HASHTYPE_OPAQUE;
|
2015-03-17 18:32:28 +00:00
|
|
|
}
|
2017-12-20 18:15:06 +00:00
|
|
|
} /* ixgbe_determine_rsstype */
|