IFLIB updates
- unconditionally enable BUS_DMA on non-x86 architectures - speed up rxd zeroing via customized function - support out of order updates to rxd's - add prefetching to hardware descriptor rings - only prefetch on 10G or faster hardware - add seperate tx queue intr function - preliminary rework of NETMAP interfaces, WIP Submitted by: Matt Macy <mmacy@nextbsd.org> Sponsored by: Limelight Networks
This commit is contained in:
parent
85f104954a
commit
95246abb21
@ -48,17 +48,19 @@ __FBSDID("$FreeBSD$");
|
||||
*/
|
||||
|
||||
static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
|
||||
static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx);
|
||||
static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, uint32_t cidx,
|
||||
bool clear);
|
||||
static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
|
||||
static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
|
||||
|
||||
static void bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid,
|
||||
static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
|
||||
|
||||
/* uint16_t rxqid, uint8_t flid,
|
||||
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
|
||||
uint16_t buf_size);
|
||||
*/
|
||||
static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
|
||||
uint32_t pidx);
|
||||
static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint32_t idx,
|
||||
int budget);
|
||||
qidx_t pidx);
|
||||
static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
|
||||
qidx_t budget);
|
||||
static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
|
||||
|
||||
static int bnxt_intr(void *sc);
|
||||
@ -172,7 +174,7 @@ bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
|
||||
}
|
||||
|
||||
static void
|
||||
bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx)
|
||||
bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
|
||||
{
|
||||
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
|
||||
struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
|
||||
@ -185,7 +187,7 @@ bnxt_isc_txd_flush(void *sc, uint16_t txqid, uint32_t pidx)
|
||||
}
|
||||
|
||||
static int
|
||||
bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, uint32_t idx, bool clear)
|
||||
bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
|
||||
{
|
||||
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
|
||||
struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
|
||||
@ -249,15 +251,27 @@ done:
|
||||
}
|
||||
|
||||
static void
|
||||
bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid,
|
||||
uint32_t pidx, uint64_t *paddrs,
|
||||
caddr_t *vaddrs, uint16_t count, uint16_t len)
|
||||
bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
|
||||
{
|
||||
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
|
||||
struct bnxt_ring *rx_ring;
|
||||
struct rx_prod_pkt_bd *rxbd;
|
||||
uint16_t type;
|
||||
uint16_t i;
|
||||
uint16_t rxqid;
|
||||
uint16_t count, len;
|
||||
uint32_t pidx;
|
||||
uint8_t flid;
|
||||
uint64_t *paddrs;
|
||||
caddr_t *vaddrs;
|
||||
|
||||
rxqid = iru->iru_qsidx;
|
||||
count = iru->iru_count;
|
||||
len = iru->iru_buf_size;
|
||||
pidx = iru->iru_pidx;
|
||||
flid = iru->iru_flidx;
|
||||
vaddrs = iru->iru_vaddrs;
|
||||
paddrs = iru->iru_paddrs;
|
||||
|
||||
if (flid == 0) {
|
||||
rx_ring = &softc->rx_rings[rxqid];
|
||||
@ -284,7 +298,7 @@ bnxt_isc_rxd_refill(void *sc, uint16_t rxqid, uint8_t flid,
|
||||
|
||||
static void
|
||||
bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
|
||||
uint32_t pidx)
|
||||
qidx_t pidx)
|
||||
{
|
||||
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
|
||||
struct bnxt_ring *rx_ring;
|
||||
@ -310,7 +324,7 @@ bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
|
||||
}
|
||||
|
||||
static int
|
||||
bnxt_isc_rxd_available(void *sc, uint16_t rxqid, uint32_t idx, int budget)
|
||||
bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
|
||||
{
|
||||
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
|
||||
struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
|
||||
@ -412,37 +426,6 @@ cmpl_invalid:
|
||||
return avail;
|
||||
}
|
||||
|
||||
static void
|
||||
bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
|
||||
{
|
||||
uint8_t rss_profile_id;
|
||||
|
||||
rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
|
||||
switch (rss_profile_id) {
|
||||
case BNXT_RSS_HASH_TYPE_TCPV4:
|
||||
ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
|
||||
break;
|
||||
case BNXT_RSS_HASH_TYPE_UDPV4:
|
||||
ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
|
||||
break;
|
||||
case BNXT_RSS_HASH_TYPE_IPV4:
|
||||
ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
|
||||
break;
|
||||
case BNXT_RSS_HASH_TYPE_TCPV6:
|
||||
ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
|
||||
break;
|
||||
case BNXT_RSS_HASH_TYPE_UDPV6:
|
||||
ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
|
||||
break;
|
||||
case BNXT_RSS_HASH_TYPE_IPV6:
|
||||
ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
|
||||
break;
|
||||
default:
|
||||
ri->iri_rsstype = M_HASHTYPE_OPAQUE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
|
||||
struct bnxt_cp_ring *cpr, uint16_t flags_type)
|
||||
@ -460,7 +443,13 @@ bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
|
||||
/* Extract from the first 16-byte BD */
|
||||
if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
|
||||
ri->iri_flowid = le32toh(rcp->rss_hash);
|
||||
bnxt_set_rsstype(ri, rcp->rss_hash_type);
|
||||
/*
|
||||
* TODO: Extract something useful from rcp->rss_hash_type
|
||||
* (undocumented)
|
||||
* May be documented in the "LSI ES"
|
||||
* also check the firmware code.
|
||||
*/
|
||||
ri->iri_rsstype = M_HASHTYPE_OPAQUE;
|
||||
}
|
||||
else {
|
||||
ri->iri_rsstype = M_HASHTYPE_NONE;
|
||||
@ -540,7 +529,13 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
|
||||
/* Extract from the first 16-byte BD */
|
||||
if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
|
||||
ri->iri_flowid = le32toh(tpas->low.rss_hash);
|
||||
bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
|
||||
/*
|
||||
* TODO: Extract something useful from tpas->low.rss_hash_type
|
||||
* (undocumented)
|
||||
* May be documented in the "LSI ES"
|
||||
* also check the firmware code.
|
||||
*/
|
||||
ri->iri_rsstype = M_HASHTYPE_OPAQUE;
|
||||
}
|
||||
else {
|
||||
ri->iri_rsstype = M_HASHTYPE_NONE;
|
||||
|
@ -253,7 +253,8 @@ static device_method_t bnxt_iflib_methods[] = {
|
||||
DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
|
||||
|
||||
DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
|
||||
DEVMETHOD(ifdi_queue_intr_enable, bnxt_queue_intr_enable),
|
||||
DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_queue_intr_enable),
|
||||
DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_queue_intr_enable),
|
||||
DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
|
||||
DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
|
||||
|
||||
@ -711,6 +712,8 @@ bnxt_attach_pre(if_ctx_t ctx)
|
||||
scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
|
||||
scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
|
||||
scctx->isc_vectors = softc->func.max_cp_rings;
|
||||
scctx->isc_txrx = &bnxt_txrx;
|
||||
|
||||
if (scctx->isc_nrxd[0] <
|
||||
((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
|
||||
device_printf(softc->dev,
|
||||
@ -1479,7 +1482,7 @@ bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
|
||||
|
||||
for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
|
||||
rc = iflib_irq_alloc_generic(ctx, &softc->rx_cp_rings[i].irq,
|
||||
softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RX,
|
||||
softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RXTX,
|
||||
bnxt_handle_rx_cp, &softc->rx_cp_rings[i], i, "rx_cp");
|
||||
if (rc) {
|
||||
device_printf(iflib_get_dev(ctx),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*-
|
||||
* Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
|
||||
* Copyright (c) 2016-2017 Matt Macy <mmacy@nextbsd.org>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -27,7 +27,7 @@
|
||||
/* $FreeBSD$ */
|
||||
#include "if_em.h"
|
||||
|
||||
#ifdef RSS
|
||||
#ifdef RSS
|
||||
#include <net/rss_config.h>
|
||||
#include <netinet/in_rss.h>
|
||||
#endif
|
||||
@ -41,23 +41,24 @@
|
||||
/*********************************************************************
|
||||
* Local Function prototypes
|
||||
*********************************************************************/
|
||||
static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
|
||||
static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
|
||||
static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper,
|
||||
u32 *txd_lower);
|
||||
static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi,
|
||||
u32 *txd_upper, u32 *txd_lower);
|
||||
static int em_isc_txd_encap(void *arg, if_pkt_info_t pi);
|
||||
static void em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
|
||||
static int em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear);
|
||||
static void em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
|
||||
static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx);
|
||||
static int em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
|
||||
int budget);
|
||||
static void em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
|
||||
static int em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
|
||||
static void em_isc_rxd_refill(void *arg, if_rxd_update_t iru);
|
||||
static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
qidx_t pidx);
|
||||
static int em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
|
||||
qidx_t budget);
|
||||
static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
|
||||
|
||||
static void lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
|
||||
static void lem_isc_rxd_refill(void *arg, if_rxd_update_t iru);
|
||||
|
||||
static int lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
|
||||
int budget);
|
||||
static int lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
|
||||
qidx_t budget);
|
||||
static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
|
||||
|
||||
static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri);
|
||||
@ -65,7 +66,7 @@ static void em_receive_checksum(uint32_t status, if_rxd_info_t ri);
|
||||
static int em_determine_rsstype(u32 pkt_info);
|
||||
extern int em_intr(void *arg);
|
||||
|
||||
struct if_txrx em_txrx = {
|
||||
struct if_txrx em_txrx = {
|
||||
em_isc_txd_encap,
|
||||
em_isc_txd_flush,
|
||||
em_isc_txd_credits_update,
|
||||
@ -76,7 +77,7 @@ struct if_txrx em_txrx = {
|
||||
em_intr
|
||||
};
|
||||
|
||||
struct if_txrx lem_txrx = {
|
||||
struct if_txrx lem_txrx = {
|
||||
em_isc_txd_encap,
|
||||
em_isc_txd_flush,
|
||||
em_isc_txd_credits_update,
|
||||
@ -87,7 +88,42 @@ struct if_txrx lem_txrx = {
|
||||
em_intr
|
||||
};
|
||||
|
||||
extern if_shared_ctx_t em_sctx;
|
||||
extern if_shared_ctx_t em_sctx;
|
||||
|
||||
void
|
||||
em_dump_rs(struct adapter *adapter)
|
||||
{
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_tx_queue *que;
|
||||
struct tx_ring *txr;
|
||||
qidx_t i, ntxd, qid, cur;
|
||||
int16_t rs_cidx;
|
||||
uint8_t status;
|
||||
|
||||
printf("\n");
|
||||
ntxd = scctx->isc_ntxd[0];
|
||||
for (qid = 0; qid < adapter->tx_num_queues; qid++) {
|
||||
que = &adapter->tx_queues[qid];
|
||||
txr = &que->txr;
|
||||
rs_cidx = txr->tx_rs_cidx;
|
||||
if (rs_cidx != txr->tx_rs_pidx) {
|
||||
cur = txr->tx_rsq[rs_cidx];
|
||||
status = txr->tx_base[cur].upper.fields.status;
|
||||
if (!(status & E1000_TXD_STAT_DD))
|
||||
printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur);
|
||||
} else {
|
||||
rs_cidx = (rs_cidx-1)&(ntxd-1);
|
||||
cur = txr->tx_rsq[rs_cidx];
|
||||
printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur);
|
||||
}
|
||||
printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, txr->tx_rs_pidx);
|
||||
for (i = 0; i < ntxd; i++) {
|
||||
if (txr->tx_base[i].upper.fields.status & E1000_TXD_STAT_DD)
|
||||
printf("%d set ", i);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
*
|
||||
@ -99,14 +135,13 @@ static int
|
||||
em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
|
||||
{
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
struct e1000_context_desc *TXD;
|
||||
struct em_txbuffer *tx_buffer;
|
||||
int cur, hdr_len;
|
||||
int cur, hdr_len;
|
||||
|
||||
hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
|
||||
*txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
|
||||
*txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
|
||||
E1000_TXD_DTYP_D | /* Data descr type */
|
||||
E1000_TXD_CMD_TSE); /* Do TSE on this packet */
|
||||
|
||||
@ -114,10 +149,9 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
|
||||
*txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
|
||||
|
||||
cur = pi->ipi_pidx;
|
||||
TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
|
||||
tx_buffer = &txr->tx_buffers[cur];
|
||||
|
||||
/*
|
||||
TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
|
||||
|
||||
/*
|
||||
* Start offset for header checksum calculation.
|
||||
* End offset for header checksum calculation.
|
||||
* Offset of place put the checksum.
|
||||
@ -127,7 +161,7 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
|
||||
htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1);
|
||||
TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
|
||||
|
||||
/*
|
||||
/*
|
||||
* Start offset for payload checksum calculation.
|
||||
* End offset for payload checksum calculation.
|
||||
* Offset of place to put the checksum.
|
||||
@ -136,8 +170,8 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
|
||||
TXD->upper_setup.tcp_fields.tucse = 0;
|
||||
TXD->upper_setup.tcp_fields.tucso =
|
||||
pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum);
|
||||
|
||||
/*
|
||||
|
||||
/*
|
||||
* Payload size per packet w/o any headers.
|
||||
* Length of all headers up to payload.
|
||||
*/
|
||||
@ -150,7 +184,6 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
|
||||
E1000_TXD_CMD_IP | /* Do IP csum */
|
||||
E1000_TXD_CMD_TCP | /* Do TCP checksum */
|
||||
(pi->ipi_len - hdr_len)); /* Total len */
|
||||
tx_buffer->eop = -1;
|
||||
txr->tx_tso = TRUE;
|
||||
|
||||
if (++cur == scctx->isc_ntxd[0]) {
|
||||
@ -178,21 +211,20 @@ em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd
|
||||
* until the previous request completes. This means setting up
|
||||
* a new context effectively disables pipelined Tx data DMA which
|
||||
* in turn greatly slow down performance to send small sized
|
||||
* frames.
|
||||
* frames.
|
||||
**********************************************************************/
|
||||
|
||||
static int
|
||||
em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
|
||||
{
|
||||
struct e1000_context_desc *TXD = NULL;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
struct em_txbuffer *tx_buffer;
|
||||
int csum_flags = pi->ipi_csum_flags;
|
||||
int cur, hdr_len;
|
||||
u32 cmd;
|
||||
|
||||
struct e1000_context_desc *TXD = NULL;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
int csum_flags = pi->ipi_csum_flags;
|
||||
int cur, hdr_len;
|
||||
u32 cmd;
|
||||
|
||||
cur = pi->ipi_pidx;
|
||||
hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
|
||||
cmd = adapter->txd_cmd;
|
||||
@ -220,7 +252,7 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
|
||||
|
||||
TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
|
||||
if (csum_flags & CSUM_IP) {
|
||||
*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
|
||||
*txd_upper |= E1000_TXD_POPTS_IXSM << 8;
|
||||
/*
|
||||
* Start offset for header checksum calculation.
|
||||
* End offset for header checksum calculation.
|
||||
@ -235,7 +267,7 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
|
||||
if (csum_flags & (CSUM_TCP|CSUM_UDP)) {
|
||||
uint8_t tucso;
|
||||
|
||||
*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
|
||||
*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
|
||||
*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
|
||||
|
||||
if (csum_flags & CSUM_TCP) {
|
||||
@ -243,9 +275,9 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
|
||||
cmd |= E1000_TXD_CMD_TCP;
|
||||
} else
|
||||
tucso = hdr_len + offsetof(struct udphdr, uh_sum);
|
||||
TXD->upper_setup.tcp_fields.tucss = hdr_len;
|
||||
TXD->upper_setup.tcp_fields.tucse = htole16(0);
|
||||
TXD->upper_setup.tcp_fields.tucso = tucso;
|
||||
TXD->upper_setup.tcp_fields.tucss = hdr_len;
|
||||
TXD->upper_setup.tcp_fields.tucse = htole16(0);
|
||||
TXD->upper_setup.tcp_fields.tucso = tucso;
|
||||
}
|
||||
|
||||
txr->csum_lhlen = pi->ipi_ehdrlen;
|
||||
@ -258,9 +290,6 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
|
||||
TXD->cmd_and_length =
|
||||
htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
|
||||
|
||||
tx_buffer = &txr->tx_buffers[cur];
|
||||
tx_buffer->eop = -1;
|
||||
|
||||
if (++cur == scctx->isc_ntxd[0]) {
|
||||
cur = 0;
|
||||
}
|
||||
@ -272,24 +301,26 @@ em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_u
|
||||
static int
|
||||
em_isc_txd_encap(void *arg, if_pkt_info_t pi)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
bus_dma_segment_t *segs = pi->ipi_segs;
|
||||
int nsegs = pi->ipi_nsegs;
|
||||
int csum_flags = pi->ipi_csum_flags;
|
||||
int i, j, first, pidx_last;
|
||||
u32 txd_upper = 0, txd_lower = 0;
|
||||
|
||||
struct em_txbuffer *tx_buffer;
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
bus_dma_segment_t *segs = pi->ipi_segs;
|
||||
int nsegs = pi->ipi_nsegs;
|
||||
int csum_flags = pi->ipi_csum_flags;
|
||||
int i, j, first, pidx_last;
|
||||
u32 txd_flags, txd_upper = 0, txd_lower = 0;
|
||||
|
||||
struct e1000_tx_desc *ctxd = NULL;
|
||||
bool do_tso, tso_desc;
|
||||
|
||||
i = first = pi->ipi_pidx;
|
||||
bool do_tso, tso_desc;
|
||||
qidx_t ntxd;
|
||||
|
||||
txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_TXD_CMD_RS : 0;
|
||||
i = first = pi->ipi_pidx;
|
||||
do_tso = (csum_flags & CSUM_TSO);
|
||||
tso_desc = FALSE;
|
||||
/*
|
||||
ntxd = scctx->isc_ntxd[0];
|
||||
/*
|
||||
* TSO Hardware workaround, if this packet is not
|
||||
* TSO, and is only a single descriptor long, and
|
||||
* it follows a TSO burst, then we need to add a
|
||||
@ -310,10 +341,10 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
|
||||
}
|
||||
|
||||
if (pi->ipi_mflags & M_VLANTAG) {
|
||||
/* Set the vlan id. */
|
||||
/* Set the vlan id. */
|
||||
txd_upper |= htole16(pi->ipi_vtag) << 16;
|
||||
/* Tell hardware to add tag */
|
||||
txd_lower |= htole32(E1000_TXD_CMD_VLE);
|
||||
/* Tell hardware to add tag */
|
||||
txd_lower |= htole32(E1000_TXD_CMD_VLE);
|
||||
}
|
||||
|
||||
DPRINTF(iflib_get_dev(sc->ctx), "encap: set up tx: nsegs=%d first=%d i=%d\n", nsegs, first, i);
|
||||
@ -326,28 +357,26 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
|
||||
uint32_t cmd;
|
||||
|
||||
ctxd = &txr->tx_base[i];
|
||||
tx_buffer = &txr->tx_buffers[i];
|
||||
seg_addr = segs[j].ds_addr;
|
||||
seg_len = segs[j].ds_len;
|
||||
cmd = E1000_TXD_CMD_IFCS | sc->txd_cmd;
|
||||
|
||||
/*
|
||||
** TSO Workaround:
|
||||
** If this is the last descriptor, we want to
|
||||
** split it so we have a small final sentinel
|
||||
*/
|
||||
* TSO Workaround:
|
||||
* If this is the last descriptor, we want to
|
||||
* split it so we have a small final sentinel
|
||||
*/
|
||||
if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) {
|
||||
seg_len -= TSO_WORKAROUND;
|
||||
ctxd->buffer_addr = htole64(seg_addr);
|
||||
ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
|
||||
ctxd->upper.data = htole32(txd_upper);
|
||||
|
||||
if (++i == scctx->isc_ntxd[0])
|
||||
if (++i == scctx->isc_ntxd[0])
|
||||
i = 0;
|
||||
|
||||
/* Now make the sentinel */
|
||||
ctxd = &txr->tx_base[i];
|
||||
tx_buffer = &txr->tx_buffers[i];
|
||||
ctxd->buffer_addr = htole64(seg_addr + seg_len);
|
||||
ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND);
|
||||
ctxd->upper.data = htole32(txd_upper);
|
||||
@ -364,27 +393,28 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
|
||||
i = 0;
|
||||
DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
|
||||
}
|
||||
tx_buffer->eop = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Last Descriptor of Packet
|
||||
* Last Descriptor of Packet
|
||||
* needs End Of Packet (EOP)
|
||||
* and Report Status (RS)
|
||||
*/
|
||||
ctxd->lower.data |=
|
||||
htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
|
||||
|
||||
tx_buffer = &txr->tx_buffers[first];
|
||||
tx_buffer->eop = pidx_last;
|
||||
*/
|
||||
if (txd_flags) {
|
||||
txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
|
||||
DPRINTF(iflib_get_dev(sc->ctx), "setting to RS on %d rs_pidx %d first: %d\n", pidx_last, txr->tx_rs_pidx, first);
|
||||
txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
|
||||
MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
|
||||
}
|
||||
ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | txd_flags);
|
||||
DPRINTF(iflib_get_dev(sc->ctx), "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i);
|
||||
pi->ipi_new_pidx = i;
|
||||
|
||||
return (0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
|
||||
em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
|
||||
{
|
||||
struct adapter *adapter = arg;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[txqid];
|
||||
@ -394,86 +424,72 @@ em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
|
||||
}
|
||||
|
||||
static int
|
||||
em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear)
|
||||
em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
|
||||
{
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[txqid];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
|
||||
u32 cidx, processed = 0;
|
||||
int last, done;
|
||||
struct em_txbuffer *buf;
|
||||
struct e1000_tx_desc *tx_desc, *eop_desc;
|
||||
qidx_t processed = 0;
|
||||
int updated;
|
||||
qidx_t cur, prev, ntxd, rs_cidx;
|
||||
int32_t delta;
|
||||
uint8_t status;
|
||||
|
||||
cidx = cidx_init;
|
||||
buf = &txr->tx_buffers[cidx];
|
||||
tx_desc = &txr->tx_base[cidx];
|
||||
last = buf->eop;
|
||||
if (last == -1)
|
||||
return (processed);
|
||||
eop_desc = &txr->tx_base[last];
|
||||
rs_cidx = txr->tx_rs_cidx;
|
||||
if (rs_cidx == txr->tx_rs_pidx)
|
||||
return (0);
|
||||
cur = txr->tx_rsq[rs_cidx];
|
||||
MPASS(cur != QIDX_INVALID);
|
||||
status = txr->tx_base[cur].upper.fields.status;
|
||||
updated = !!(status & E1000_TXD_STAT_DD);
|
||||
|
||||
DPRINTF(iflib_get_dev(adapter->ctx),
|
||||
"credits_update: cidx_init=%d clear=%d last=%d\n",
|
||||
cidx_init, clear, last);
|
||||
/*
|
||||
* What this does is get the index of the
|
||||
* first descriptor AFTER the EOP of the
|
||||
* first packet, that way we can do the
|
||||
* simple comparison on the inner while loop.
|
||||
*/
|
||||
if (++last == scctx->isc_ntxd[0])
|
||||
last = 0;
|
||||
done = last;
|
||||
if (clear == false || updated == 0)
|
||||
return (updated);
|
||||
|
||||
prev = txr->tx_cidx_processed;
|
||||
ntxd = scctx->isc_ntxd[0];
|
||||
do {
|
||||
delta = (int32_t)cur - (int32_t)prev;
|
||||
MPASS(prev == 0 || delta != 0);
|
||||
if (delta < 0)
|
||||
delta += ntxd;
|
||||
DPRINTF(iflib_get_dev(adapter->ctx),
|
||||
"%s: cidx_processed=%u cur=%u clear=%d delta=%d\n",
|
||||
__FUNCTION__, prev, cur, clear, delta);
|
||||
|
||||
while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
|
||||
/* We clean the range of the packet */
|
||||
while (cidx != done) {
|
||||
if (clear) {
|
||||
tx_desc->upper.data = 0;
|
||||
tx_desc->lower.data = 0;
|
||||
tx_desc->buffer_addr = 0;
|
||||
buf->eop = -1;
|
||||
}
|
||||
tx_desc++;
|
||||
buf++;
|
||||
processed++;
|
||||
|
||||
/* wrap the ring ? */
|
||||
if (++cidx == scctx->isc_ntxd[0]) {
|
||||
cidx = 0;
|
||||
}
|
||||
buf = &txr->tx_buffers[cidx];
|
||||
tx_desc = &txr->tx_base[cidx];
|
||||
}
|
||||
/* See if we can continue to the next packet */
|
||||
last = buf->eop;
|
||||
if (last == -1)
|
||||
processed += delta;
|
||||
prev = cur;
|
||||
rs_cidx = (rs_cidx + 1) & (ntxd-1);
|
||||
if (rs_cidx == txr->tx_rs_pidx)
|
||||
break;
|
||||
eop_desc = &txr->tx_base[last];
|
||||
/* Get new done point */
|
||||
if (++last == scctx->isc_ntxd[0])
|
||||
last = 0;
|
||||
done = last;
|
||||
}
|
||||
cur = txr->tx_rsq[rs_cidx];
|
||||
MPASS(cur != QIDX_INVALID);
|
||||
status = txr->tx_base[cur].upper.fields.status;
|
||||
} while ((status & E1000_TXD_STAT_DD));
|
||||
|
||||
DPRINTF(iflib_get_dev(adapter->ctx), "Processed %d credits update\n", processed);
|
||||
txr->tx_rs_cidx = rs_cidx;
|
||||
txr->tx_cidx_processed = prev;
|
||||
return(processed);
|
||||
}
|
||||
|
||||
static void
|
||||
lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
|
||||
lem_isc_rxd_refill(void *arg, if_rxd_update_t iru)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct em_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct e1000_rx_desc *rxd;
|
||||
uint64_t *paddrs;
|
||||
uint32_t next_pidx, pidx;
|
||||
uint16_t count;
|
||||
int i;
|
||||
uint32_t next_pidx;
|
||||
|
||||
paddrs = iru->iru_paddrs;
|
||||
pidx = iru->iru_pidx;
|
||||
count = iru->iru_count;
|
||||
|
||||
for (i = 0, next_pidx = pidx; i < count; i++) {
|
||||
rxd = (struct e1000_rx_desc *)&rxr->rx_base[next_pidx];
|
||||
@ -487,48 +503,60 @@ lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
}
|
||||
|
||||
static void
|
||||
em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
|
||||
em_isc_rxd_refill(void *arg, if_rxd_update_t iru)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
uint16_t rxqid = iru->iru_qsidx;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
union e1000_rx_desc_extended *rxd;
|
||||
uint64_t *paddrs;
|
||||
uint32_t next_pidx, pidx;
|
||||
uint16_t count;
|
||||
int i;
|
||||
uint32_t next_pidx;
|
||||
|
||||
paddrs = iru->iru_paddrs;
|
||||
pidx = iru->iru_pidx;
|
||||
count = iru->iru_count;
|
||||
|
||||
for (i = 0, next_pidx = pidx; i < count; i++) {
|
||||
rxd = &rxr->rx_base[next_pidx];
|
||||
rxd->read.buffer_addr = htole64(paddrs[i]);
|
||||
/* DD bits must be cleared */
|
||||
rxd->wb.upper.status_error = 0;
|
||||
|
||||
rxd->wb.upper.status_error = 0;
|
||||
|
||||
if (++next_pidx == scctx->isc_nrxd[0])
|
||||
next_pidx = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
|
||||
em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct adapter *sc = arg;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
|
||||
E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
|
||||
E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
|
||||
}
|
||||
|
||||
static int
|
||||
lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
|
||||
lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct e1000_rx_desc *rxd;
|
||||
u32 staterr = 0;
|
||||
int cnt, i;
|
||||
u32 staterr = 0;
|
||||
int cnt, i;
|
||||
|
||||
if (budget == 1) {
|
||||
rxd = (struct e1000_rx_desc *)&rxr->rx_base[idx];
|
||||
staterr = rxd->status;
|
||||
return (staterr & E1000_RXD_STAT_DD);
|
||||
}
|
||||
|
||||
for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
|
||||
rxd = (struct e1000_rx_desc *)&rxr->rx_base[i];
|
||||
@ -547,15 +575,21 @@ lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
|
||||
}
|
||||
|
||||
static int
|
||||
em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
|
||||
em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
union e1000_rx_desc_extended *rxd;
|
||||
u32 staterr = 0;
|
||||
int cnt, i;
|
||||
u32 staterr = 0;
|
||||
int cnt, i;
|
||||
|
||||
if (budget == 1) {
|
||||
rxd = &rxr->rx_base[idx];
|
||||
staterr = le32toh(rxd->wb.upper.status_error);
|
||||
return (staterr & E1000_RXD_STAT_DD);
|
||||
}
|
||||
|
||||
for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
|
||||
rxd = &rxr->rx_base[i];
|
||||
@ -578,15 +612,15 @@ em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
|
||||
static int
|
||||
lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
||||
{
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct e1000_rx_desc *rxd;
|
||||
u16 len;
|
||||
u32 status, errors;
|
||||
bool eop;
|
||||
int i, cidx;
|
||||
u16 len;
|
||||
u32 status, errors;
|
||||
bool eop;
|
||||
int i, cidx;
|
||||
|
||||
status = errors = i = 0;
|
||||
cidx = ri->iri_cidx;
|
||||
@ -639,31 +673,31 @@ lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
||||
static int
|
||||
em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
||||
{
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
union e1000_rx_desc_extended *rxd;
|
||||
|
||||
u16 len;
|
||||
u32 pkt_info;
|
||||
u32 staterr = 0;
|
||||
bool eop;
|
||||
int i, cidx, vtag;
|
||||
u16 len;
|
||||
u32 pkt_info;
|
||||
u32 staterr = 0;
|
||||
bool eop;
|
||||
int i, cidx, vtag;
|
||||
|
||||
i = vtag = 0;
|
||||
cidx = ri->iri_cidx;
|
||||
|
||||
do {
|
||||
rxd = &rxr->rx_base[cidx];
|
||||
staterr = le32toh(rxd->wb.upper.status_error);
|
||||
staterr = le32toh(rxd->wb.upper.status_error);
|
||||
pkt_info = le32toh(rxd->wb.lower.mrq);
|
||||
|
||||
|
||||
/* Error Checking then decrement count */
|
||||
MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
|
||||
|
||||
len = le16toh(rxd->wb.upper.length);
|
||||
ri->iri_len += len;
|
||||
ri->iri_len += len;
|
||||
|
||||
eop = (staterr & E1000_RXD_STAT_EOP) != 0;
|
||||
|
||||
@ -690,15 +724,14 @@ em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
||||
|
||||
if (staterr & E1000_RXD_STAT_VP) {
|
||||
vtag = le16toh(rxd->wb.upper.vlan);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ri->iri_vtag = vtag;
|
||||
if (vtag)
|
||||
ri->iri_flags |= M_VLANTAG;
|
||||
|
||||
ri->iri_flowid =
|
||||
le32toh(rxd->wb.lower.hi_dword.rss);
|
||||
ri->iri_rsstype = em_determine_rsstype(pkt_info);
|
||||
|
||||
ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
|
||||
ri->iri_rsstype = em_determine_rsstype(pkt_info);
|
||||
|
||||
ri->iri_nfrags = i;
|
||||
return (0);
|
||||
@ -736,23 +769,24 @@ lem_receive_checksum(int status, int errors, if_rxd_info_t ri)
|
||||
static int
|
||||
em_determine_rsstype(u32 pkt_info)
|
||||
{
|
||||
switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
|
||||
case E1000_RXDADV_RSSTYPE_IPV4_TCP:
|
||||
return M_HASHTYPE_RSS_TCP_IPV4;
|
||||
case E1000_RXDADV_RSSTYPE_IPV4:
|
||||
return M_HASHTYPE_RSS_IPV4;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6_TCP:
|
||||
return M_HASHTYPE_RSS_TCP_IPV6;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6_EX:
|
||||
return M_HASHTYPE_RSS_IPV6_EX;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6:
|
||||
return M_HASHTYPE_RSS_IPV6;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
|
||||
return M_HASHTYPE_RSS_TCP_IPV6_EX;
|
||||
default:
|
||||
return M_HASHTYPE_OPAQUE;
|
||||
}
|
||||
switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
|
||||
case E1000_RXDADV_RSSTYPE_IPV4_TCP:
|
||||
return M_HASHTYPE_RSS_TCP_IPV4;
|
||||
case E1000_RXDADV_RSSTYPE_IPV4:
|
||||
return M_HASHTYPE_RSS_IPV4;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6_TCP:
|
||||
return M_HASHTYPE_RSS_TCP_IPV6;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6_EX:
|
||||
return M_HASHTYPE_RSS_IPV6_EX;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6:
|
||||
return M_HASHTYPE_RSS_IPV6;
|
||||
case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
|
||||
return M_HASHTYPE_RSS_TCP_IPV6_EX;
|
||||
default:
|
||||
return M_HASHTYPE_OPAQUE;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
em_receive_checksum(uint32_t status, if_rxd_info_t ri)
|
||||
{
|
||||
@ -764,7 +798,7 @@ em_receive_checksum(uint32_t status, if_rxd_info_t ri)
|
||||
|
||||
/* If the IP checksum exists and there is no IP Checksum error */
|
||||
if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
|
||||
E1000_RXD_STAT_IPCS) {
|
||||
E1000_RXD_STAT_IPCS) {
|
||||
ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -356,14 +356,14 @@ struct em_int_delay_info {
|
||||
*/
|
||||
struct tx_ring {
|
||||
struct adapter *adapter;
|
||||
struct em_tx_queue *que;
|
||||
u32 me;
|
||||
int busy;
|
||||
struct e1000_tx_desc *tx_base;
|
||||
uint64_t tx_paddr;
|
||||
struct em_txbuffer *tx_buffers;
|
||||
u32 tx_tso; /* last tx was tso */
|
||||
|
||||
qidx_t *tx_rsq;
|
||||
bool tx_tso; /* last tx was tso */
|
||||
uint8_t me;
|
||||
qidx_t tx_rs_cidx;
|
||||
qidx_t tx_rs_pidx;
|
||||
qidx_t tx_cidx_processed;
|
||||
/* Interrupt resources */
|
||||
void *tag;
|
||||
struct resource *res;
|
||||
@ -532,10 +532,7 @@ typedef struct _em_vendor_info_t {
|
||||
unsigned int index;
|
||||
} em_vendor_info_t;
|
||||
|
||||
struct em_txbuffer {
|
||||
int eop;
|
||||
};
|
||||
|
||||
void em_dump_rs(struct adapter *);
|
||||
|
||||
#define EM_CORE_LOCK_INIT(_sc, _name) \
|
||||
mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF)
|
||||
|
@ -27,7 +27,7 @@
|
||||
/* $FreeBSD$ */
|
||||
#include "if_em.h"
|
||||
|
||||
#ifdef RSS
|
||||
#ifdef RSS
|
||||
#include <net/rss_config.h>
|
||||
#include <netinet/in_rss.h>
|
||||
#endif
|
||||
@ -42,14 +42,14 @@
|
||||
* Local Function prototypes
|
||||
*********************************************************************/
|
||||
static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi);
|
||||
static void igb_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
|
||||
static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx, bool clear);
|
||||
static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
|
||||
static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
|
||||
|
||||
static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru);
|
||||
|
||||
static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx);
|
||||
static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget);
|
||||
|
||||
static void igb_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buf_len __unused);
|
||||
static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx);
|
||||
static int igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
|
||||
int budget);
|
||||
static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
|
||||
|
||||
static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status);
|
||||
@ -61,7 +61,7 @@ static int igb_determine_rsstype(u16 pkt_info);
|
||||
extern void igb_if_enable_intr(if_ctx_t ctx);
|
||||
extern int em_intr(void *arg);
|
||||
|
||||
struct if_txrx igb_txrx = {
|
||||
struct if_txrx igb_txrx = {
|
||||
igb_isc_txd_encap,
|
||||
igb_isc_txd_flush,
|
||||
igb_isc_txd_credits_update,
|
||||
@ -84,34 +84,34 @@ static int
|
||||
igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status)
|
||||
{
|
||||
struct e1000_adv_tx_context_desc *TXD;
|
||||
struct adapter *adapter = txr->adapter;
|
||||
u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0;
|
||||
u32 mss_l4len_idx = 0;
|
||||
u32 paylen;
|
||||
|
||||
switch(pi->ipi_etype) {
|
||||
case ETHERTYPE_IPV6:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
|
||||
break;
|
||||
case ETHERTYPE_IP:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
|
||||
/* Tell transmit desc to also do IPv4 checksum. */
|
||||
*olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
|
||||
break;
|
||||
default:
|
||||
panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
|
||||
__func__, ntohs(pi->ipi_etype));
|
||||
break;
|
||||
}
|
||||
struct adapter *adapter = txr->adapter;
|
||||
u32 type_tucmd_mlhl = 0, vlan_macip_lens = 0;
|
||||
u32 mss_l4len_idx = 0;
|
||||
u32 paylen;
|
||||
|
||||
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
|
||||
switch(pi->ipi_etype) {
|
||||
case ETHERTYPE_IPV6:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
|
||||
break;
|
||||
case ETHERTYPE_IP:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
|
||||
/* Tell transmit desc to also do IPv4 checksum. */
|
||||
*olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
|
||||
break;
|
||||
default:
|
||||
panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
|
||||
__func__, ntohs(pi->ipi_etype));
|
||||
break;
|
||||
}
|
||||
|
||||
/* This is used in the transmit desc in encap */
|
||||
paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
|
||||
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
|
||||
|
||||
/* VLAN MACLEN IPLEN */
|
||||
/* This is used in the transmit desc in encap */
|
||||
paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
|
||||
|
||||
/* VLAN MACLEN IPLEN */
|
||||
if (pi->ipi_mflags & M_VLANTAG) {
|
||||
vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
|
||||
vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
|
||||
}
|
||||
|
||||
vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
|
||||
@ -132,11 +132,11 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *oli
|
||||
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
|
||||
|
||||
TXD->seqnum_seed = htole32(0);
|
||||
*cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
|
||||
*cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
|
||||
*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
|
||||
*olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
|
||||
|
||||
return (1);
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
@ -147,29 +147,29 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *oli
|
||||
static int
|
||||
igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *olinfo_status)
|
||||
{
|
||||
struct e1000_adv_tx_context_desc *TXD;
|
||||
struct e1000_adv_tx_context_desc *TXD;
|
||||
struct adapter *adapter = txr->adapter;
|
||||
u32 vlan_macip_lens, type_tucmd_mlhl;
|
||||
u32 vlan_macip_lens, type_tucmd_mlhl;
|
||||
u32 mss_l4len_idx;
|
||||
mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
|
||||
int offload = TRUE;
|
||||
|
||||
/* First check if TSO is to be used */
|
||||
/* First check if TSO is to be used */
|
||||
if (pi->ipi_csum_flags & CSUM_TSO)
|
||||
return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status));
|
||||
|
||||
/* Indicate the whole packet as payload when not doing TSO */
|
||||
*olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
|
||||
/* Indicate the whole packet as payload when not doing TSO */
|
||||
*olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
|
||||
|
||||
/* Now ready a context descriptor */
|
||||
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
|
||||
|
||||
/*
|
||||
/*
|
||||
** In advanced descriptors the vlan tag must
|
||||
** be placed into the context descriptor. Hence
|
||||
** we need to make one even if not doing offloads.
|
||||
*/
|
||||
if (pi->ipi_mflags & M_VLANTAG) {
|
||||
if (pi->ipi_mflags & M_VLANTAG) {
|
||||
vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
|
||||
} else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) {
|
||||
return (0);
|
||||
@ -179,108 +179,92 @@ igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, u32 *cmd_type_len, u32 *
|
||||
vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
|
||||
|
||||
switch(pi->ipi_etype) {
|
||||
case ETHERTYPE_IP:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
|
||||
break;
|
||||
case ETHERTYPE_IPV6:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
|
||||
break;
|
||||
default:
|
||||
offload = FALSE;
|
||||
break;
|
||||
case ETHERTYPE_IP:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
|
||||
break;
|
||||
case ETHERTYPE_IPV6:
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
|
||||
break;
|
||||
default:
|
||||
offload = FALSE;
|
||||
break;
|
||||
}
|
||||
|
||||
vlan_macip_lens |= pi->ipi_ip_hlen;
|
||||
|
||||
vlan_macip_lens |= pi->ipi_ip_hlen;
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
|
||||
|
||||
switch (pi->ipi_ipproto) {
|
||||
case IPPROTO_TCP:
|
||||
#if __FreeBSD_version >= 1000000
|
||||
if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
|
||||
#else
|
||||
if (pi->ipi_csum_flags & CSUM_TCP)
|
||||
#endif
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
#if __FreeBSD_version >= 1000000
|
||||
if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
|
||||
#else
|
||||
if (pi->ipi_csum_flags & CSUM_UDP)
|
||||
#endif
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
|
||||
break;
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
case IPPROTO_SCTP:
|
||||
#if __FreeBSD_version >= 1000000
|
||||
if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
|
||||
#else
|
||||
if (pi->ipi_csum_flags & CSUM_SCTP)
|
||||
#endif
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
offload = FALSE;
|
||||
break;
|
||||
case IPPROTO_TCP:
|
||||
if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
|
||||
break;
|
||||
case IPPROTO_SCTP:
|
||||
if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
|
||||
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
|
||||
break;
|
||||
default:
|
||||
offload = FALSE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (offload) /* For the TX descriptor setup */
|
||||
*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
|
||||
*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
|
||||
|
||||
/* 82575 needs the queue index added */
|
||||
if (adapter->hw.mac.type == e1000_82575)
|
||||
mss_l4len_idx = txr->me << 4;
|
||||
|
||||
|
||||
/* Now copy bits into descriptor */
|
||||
TXD->vlan_macip_lens = htole32(vlan_macip_lens);
|
||||
TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
|
||||
TXD->seqnum_seed = htole32(0);
|
||||
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
|
||||
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
||||
static int
|
||||
igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
int nsegs = pi->ipi_nsegs;
|
||||
bus_dma_segment_t *segs = pi->ipi_segs;
|
||||
struct em_txbuffer *txbuf;
|
||||
union e1000_adv_tx_desc *txd = NULL;
|
||||
|
||||
int i, j, first, pidx_last;
|
||||
u32 olinfo_status, cmd_type_len;
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
int nsegs = pi->ipi_nsegs;
|
||||
bus_dma_segment_t *segs = pi->ipi_segs;
|
||||
union e1000_adv_tx_desc *txd = NULL;
|
||||
int i, j, first, pidx_last;
|
||||
u32 olinfo_status, cmd_type_len, txd_flags;
|
||||
qidx_t ntxd;
|
||||
|
||||
pidx_last = olinfo_status = 0;
|
||||
/* Basic descriptor defines */
|
||||
cmd_type_len = (E1000_ADVTXD_DTYP_DATA |
|
||||
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
|
||||
|
||||
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
|
||||
|
||||
if (pi->ipi_mflags & M_VLANTAG)
|
||||
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
|
||||
|
||||
first = i = pi->ipi_pidx;
|
||||
|
||||
ntxd = scctx->isc_ntxd[0];
|
||||
txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0;
|
||||
/* Consume the first descriptor */
|
||||
i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
|
||||
if (i == scctx->isc_ntxd[0])
|
||||
i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
|
||||
if (i == scctx->isc_ntxd[0])
|
||||
i = 0;
|
||||
|
||||
|
||||
/* 82575 needs the queue index added */
|
||||
if (sc->hw.mac.type == e1000_82575)
|
||||
olinfo_status |= txr->me << 4;
|
||||
|
||||
|
||||
for (j = 0; j < nsegs; j++) {
|
||||
bus_size_t seglen;
|
||||
bus_addr_t segaddr;
|
||||
|
||||
txbuf = &txr->tx_buffers[i];
|
||||
txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
|
||||
seglen = segs[j].ds_len;
|
||||
segaddr = htole64(segs[j].ds_addr);
|
||||
@ -294,108 +278,91 @@ igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
|
||||
txd->read.cmd_type_len |=
|
||||
htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
|
||||
|
||||
/* Set the EOP descriptor that will be marked done */
|
||||
txbuf = &txr->tx_buffers[first];
|
||||
txbuf->eop = pidx_last;
|
||||
if (txd_flags) {
|
||||
txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
|
||||
txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
|
||||
MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
|
||||
}
|
||||
|
||||
txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags);
|
||||
pi->ipi_new_pidx = i;
|
||||
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
igb_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
|
||||
igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
|
||||
{
|
||||
struct adapter *adapter = arg;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[txqid];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx);
|
||||
struct adapter *adapter = arg;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[txqid];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx);
|
||||
}
|
||||
|
||||
static int
|
||||
igb_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear)
|
||||
igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
|
||||
{
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_tx_queue *que = &adapter->tx_queues[txqid];
|
||||
struct tx_ring *txr = &que->txr;
|
||||
struct tx_ring *txr = &que->txr;
|
||||
|
||||
u32 cidx, ntxd, processed = 0;
|
||||
qidx_t processed = 0;
|
||||
int updated;
|
||||
qidx_t cur, prev, ntxd, rs_cidx;
|
||||
int32_t delta;
|
||||
uint8_t status;
|
||||
|
||||
struct em_txbuffer *buf;
|
||||
union e1000_adv_tx_desc *txd, *eop;
|
||||
int limit;
|
||||
|
||||
cidx = cidx_init;
|
||||
rs_cidx = txr->tx_rs_cidx;
|
||||
if (rs_cidx == txr->tx_rs_pidx)
|
||||
return (0);
|
||||
cur = txr->tx_rsq[rs_cidx];
|
||||
status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
|
||||
updated = !!(status & E1000_TXD_STAT_DD);
|
||||
|
||||
buf = &txr->tx_buffers[cidx];
|
||||
txd = (union e1000_adv_tx_desc *)&txr->tx_base[cidx];
|
||||
if (!clear || !updated)
|
||||
return (updated);
|
||||
|
||||
prev = txr->tx_cidx_processed;
|
||||
ntxd = scctx->isc_ntxd[0];
|
||||
limit = adapter->tx_process_limit;
|
||||
|
||||
do {
|
||||
if (buf->eop == -1) /* No work */
|
||||
delta = (int32_t)cur - (int32_t)prev;
|
||||
MPASS(prev == 0 || delta != 0);
|
||||
if (delta < 0)
|
||||
delta += ntxd;
|
||||
|
||||
processed += delta;
|
||||
prev = cur;
|
||||
rs_cidx = (rs_cidx + 1) & (ntxd-1);
|
||||
if (rs_cidx == txr->tx_rs_pidx)
|
||||
break;
|
||||
cur = txr->tx_rsq[rs_cidx];
|
||||
status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
|
||||
} while ((status & E1000_TXD_STAT_DD));
|
||||
|
||||
eop = (union e1000_adv_tx_desc *)&txr->tx_base[buf->eop];
|
||||
if ((eop->wb.status & E1000_TXD_STAT_DD) == 0)
|
||||
break; /* I/O not complete */
|
||||
|
||||
if (clear)
|
||||
buf->eop = -1; /* clear indicate processed */
|
||||
|
||||
/* We clean the range if multi segment */
|
||||
while (txd != eop) {
|
||||
++txd;
|
||||
++buf;
|
||||
/* wrap the ring? */
|
||||
if (++cidx == scctx->isc_ntxd[0]) {
|
||||
cidx = 0;
|
||||
buf = txr->tx_buffers;
|
||||
txd = (union e1000_adv_tx_desc *)txr->tx_base;
|
||||
}
|
||||
|
||||
buf = &txr->tx_buffers[cidx];
|
||||
if (clear)
|
||||
buf->eop = -1;
|
||||
processed++;
|
||||
}
|
||||
processed++;
|
||||
|
||||
/* Try the next packet */
|
||||
txd++;
|
||||
buf++;
|
||||
|
||||
/* reset with a wrap */
|
||||
if (++cidx == scctx->isc_ntxd[0]) {
|
||||
cidx = 0;
|
||||
buf = txr->tx_buffers;
|
||||
txd = (union e1000_adv_tx_desc *)txr->tx_base;
|
||||
}
|
||||
prefetch(txd);
|
||||
prefetch(txd+1);
|
||||
} while (__predict_true(--limit) && cidx != cidx_init);
|
||||
|
||||
txr->tx_rs_cidx = rs_cidx;
|
||||
txr->tx_cidx_processed = prev;
|
||||
return (processed);
|
||||
}
|
||||
|
||||
static void
|
||||
igb_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused,
|
||||
uint16_t count, uint16_t buf_len __unused)
|
||||
igb_isc_rxd_refill(void *arg, if_rxd_update_t iru)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
uint16_t rxqid = iru->iru_qsidx;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
union e1000_adv_rx_desc *rxd;
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
int i;
|
||||
uint32_t next_pidx;
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
uint64_t *paddrs;
|
||||
uint32_t next_pidx, pidx;
|
||||
uint16_t count;
|
||||
int i;
|
||||
|
||||
paddrs = iru->iru_paddrs;
|
||||
pidx = iru->iru_pidx;
|
||||
count = iru->iru_count;
|
||||
|
||||
for (i = 0, next_pidx = pidx; i < count; i++) {
|
||||
rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx];
|
||||
@ -407,33 +374,39 @@ igb_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
|
||||
}
|
||||
|
||||
static void
|
||||
igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
|
||||
igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct adapter *sc = arg;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
|
||||
E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
|
||||
}
|
||||
|
||||
static int
|
||||
igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
|
||||
igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
|
||||
{
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct adapter *sc = arg;
|
||||
if_softc_ctx_t scctx = sc->shared;
|
||||
struct em_rx_queue *que = &sc->rx_queues[rxqid];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
union e1000_adv_rx_desc *rxd;
|
||||
u32 staterr = 0;
|
||||
int cnt, i, iter;
|
||||
u32 staterr = 0;
|
||||
int cnt, i, iter;
|
||||
|
||||
if (budget == 1) {
|
||||
rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[idx];
|
||||
staterr = le32toh(rxd->wb.upper.status_error);
|
||||
return (staterr & E1000_RXD_STAT_DD);
|
||||
}
|
||||
|
||||
for (iter = cnt = 0, i = idx; iter < scctx->isc_nrxd[0] && iter <= budget;) {
|
||||
rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i];
|
||||
staterr = le32toh(rxd->wb.upper.status_error);
|
||||
|
||||
staterr = le32toh(rxd->wb.upper.status_error);
|
||||
|
||||
if ((staterr & E1000_RXD_STAT_DD) == 0)
|
||||
break;
|
||||
|
||||
|
||||
if (++i == scctx->isc_nrxd[0]) {
|
||||
i = 0;
|
||||
}
|
||||
@ -442,13 +415,6 @@ igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
|
||||
cnt++;
|
||||
iter++;
|
||||
}
|
||||
{
|
||||
struct e1000_hw *hw = &sc->hw;
|
||||
int rdt, rdh;
|
||||
rdt = E1000_READ_REG(hw, E1000_RDT(rxr->me));
|
||||
rdh = E1000_READ_REG(hw, E1000_RDH(rxr->me));
|
||||
DPRINTF(iflib_get_dev(sc->ctx), "sidx:%d eidx:%d iter=%d pktcnt=%d RDT=%d RDH=%d\n", idx, i, iter, cnt, rdt, rdh);
|
||||
}
|
||||
return (cnt);
|
||||
}
|
||||
|
||||
@ -462,39 +428,39 @@ igb_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
|
||||
static int
|
||||
igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
||||
{
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
|
||||
union e1000_adv_rx_desc *rxd;
|
||||
struct adapter *adapter = arg;
|
||||
if_softc_ctx_t scctx = adapter->shared;
|
||||
struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
|
||||
struct rx_ring *rxr = &que->rxr;
|
||||
struct ifnet *ifp = iflib_get_ifp(adapter->ctx);
|
||||
union e1000_adv_rx_desc *rxd;
|
||||
|
||||
u16 pkt_info, len;
|
||||
u16 vtag = 0;
|
||||
u32 ptype;
|
||||
u32 staterr = 0;
|
||||
bool eop;
|
||||
int i = 0;
|
||||
int cidx = ri->iri_cidx;
|
||||
u16 pkt_info, len;
|
||||
u16 vtag = 0;
|
||||
u32 ptype;
|
||||
u32 staterr = 0;
|
||||
bool eop;
|
||||
int i = 0;
|
||||
int cidx = ri->iri_cidx;
|
||||
|
||||
do {
|
||||
rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx];
|
||||
staterr = le32toh(rxd->wb.upper.status_error);
|
||||
pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
|
||||
|
||||
|
||||
MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
|
||||
|
||||
len = le16toh(rxd->wb.upper.length);
|
||||
ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
|
||||
|
||||
ri->iri_len += len;
|
||||
rxr->rx_bytes += ri->iri_len;
|
||||
rxr->rx_bytes += ri->iri_len;
|
||||
|
||||
rxd->wb.upper.status_error = 0;
|
||||
eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
|
||||
|
||||
if (((adapter->hw.mac.type == e1000_i350) ||
|
||||
(adapter->hw.mac.type == e1000_i354)) &&
|
||||
(adapter->hw.mac.type == e1000_i354)) &&
|
||||
(staterr & E1000_RXDEXT_STATERR_LB))
|
||||
vtag = be16toh(rxd->wb.upper.vlan);
|
||||
else
|
||||
@ -509,25 +475,25 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
||||
ri->iri_frags[i].irf_flid = 0;
|
||||
ri->iri_frags[i].irf_idx = cidx;
|
||||
ri->iri_frags[i].irf_len = len;
|
||||
|
||||
|
||||
if (++cidx == scctx->isc_nrxd[0])
|
||||
cidx = 0;
|
||||
#ifdef notyet
|
||||
#ifdef notyet
|
||||
if (rxr->hdr_split == TRUE) {
|
||||
ri->iri_frags[i].irf_flid = 1;
|
||||
ri->iri_frags[i].irf_idx = cidx;
|
||||
ri->iri_frags[i].irf_idx = cidx;
|
||||
if (++cidx == scctx->isc_nrxd[0])
|
||||
cidx = 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
i++;
|
||||
} while (!eop);
|
||||
|
||||
|
||||
rxr->rx_packets++;
|
||||
|
||||
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
|
||||
igb_rx_checksum(staterr, ri, ptype);
|
||||
|
||||
|
||||
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
|
||||
(staterr & E1000_RXD_STAT_VP) != 0) {
|
||||
ri->iri_vtag = vtag;
|
||||
@ -538,7 +504,7 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
|
||||
ri->iri_rsstype = igb_determine_rsstype(pkt_info);
|
||||
ri->iri_nfrags = i;
|
||||
|
||||
return (0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
@ -552,8 +518,8 @@ static void
|
||||
igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
|
||||
{
|
||||
u16 status = (u16)staterr;
|
||||
u8 errors = (u8) (staterr >> 24);
|
||||
bool sctp = FALSE;
|
||||
u8 errors = (u8) (staterr >> 24);
|
||||
bool sctp = FALSE;
|
||||
|
||||
/* Ignore Checksum bit is set */
|
||||
if (status & E1000_RXD_STAT_IXSM) {
|
||||
@ -579,10 +545,8 @@ igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
|
||||
|
||||
if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
|
||||
u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
|
||||
#if __FreeBSD_version >= 800000
|
||||
if (sctp) /* reassign */
|
||||
type = CSUM_SCTP_VALID;
|
||||
#endif
|
||||
/* Did it pass? */
|
||||
if (!(errors & E1000_RXD_ERR_TCPE)) {
|
||||
ri->iri_csum_flags |= type;
|
||||
@ -598,10 +562,10 @@ igb_rx_checksum(u32 staterr, if_rxd_info_t ri, u32 ptype)
|
||||
* Parse the packet type to determine the appropriate hash
|
||||
*
|
||||
******************************************************************/
|
||||
static int
|
||||
igb_determine_rsstype(u16 pkt_info)
|
||||
static int
|
||||
igb_determine_rsstype(u16 pkt_info)
|
||||
{
|
||||
switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
|
||||
switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
|
||||
case E1000_RXDADV_RSSTYPE_IPV4_TCP:
|
||||
return M_HASHTYPE_RSS_TCP_IPV4;
|
||||
case E1000_RXDADV_RSSTYPE_IPV4:
|
||||
|
@ -195,7 +195,12 @@ METHOD void intr_disable {
|
||||
if_ctx_t _ctx;
|
||||
};
|
||||
|
||||
METHOD int queue_intr_enable {
|
||||
METHOD int rx_queue_intr_enable {
|
||||
if_ctx_t _ctx;
|
||||
uint16_t _qid;
|
||||
} DEFAULT null_queue_intr_enable;
|
||||
|
||||
METHOD int tx_queue_intr_enable {
|
||||
if_ctx_t _ctx;
|
||||
uint16_t _qid;
|
||||
} DEFAULT null_queue_intr_enable;
|
||||
@ -333,4 +338,6 @@ METHOD int sysctl_int_delay {
|
||||
if_int_delay_info_t _iidi;
|
||||
} DEFAULT null_sysctl_int_delay;
|
||||
|
||||
|
||||
METHOD void debug {
|
||||
if_ctx_t _ctx;
|
||||
} DEFAULT null_void_op;
|
||||
|
1127
sys/net/iflib.c
1127
sys/net/iflib.c
File diff suppressed because it is too large
Load Diff
114
sys/net/iflib.h
114
sys/net/iflib.h
@ -37,7 +37,13 @@
|
||||
#include <sys/nv.h>
|
||||
#include <sys/gtaskqueue.h>
|
||||
|
||||
|
||||
/*
|
||||
* The value type for indexing, limits max descriptors
|
||||
* to 65535 can be conditionally redefined to uint32_t
|
||||
* in the future if the need arises.
|
||||
*/
|
||||
typedef uint16_t qidx_t;
|
||||
#define QIDX_INVALID 0xFFFF
|
||||
/*
|
||||
* Most cards can handle much larger TSO requests
|
||||
* but the FreeBSD TCP stack will break on larger
|
||||
@ -63,7 +69,7 @@ typedef struct if_int_delay_info *if_int_delay_info_t;
|
||||
|
||||
typedef struct if_rxd_frag {
|
||||
uint8_t irf_flid;
|
||||
uint16_t irf_idx;
|
||||
qidx_t irf_idx;
|
||||
uint16_t irf_len;
|
||||
} *if_rxd_frag_t;
|
||||
|
||||
@ -73,47 +79,61 @@ typedef struct if_rxd_info {
|
||||
uint16_t iri_vtag; /* vlan tag - if flag set */
|
||||
/* XXX redundant with the new irf_len field */
|
||||
uint16_t iri_len; /* packet length */
|
||||
uint16_t iri_cidx; /* consumer index of cq */
|
||||
qidx_t iri_cidx; /* consumer index of cq */
|
||||
struct ifnet *iri_ifp; /* some drivers >1 interface per softc */
|
||||
|
||||
/* updated by driver */
|
||||
uint16_t iri_flags; /* mbuf flags for packet */
|
||||
if_rxd_frag_t iri_frags;
|
||||
uint32_t iri_flowid; /* RSS hash for packet */
|
||||
uint32_t iri_csum_flags; /* m_pkthdr csum flags */
|
||||
|
||||
uint32_t iri_csum_data; /* m_pkthdr csum data */
|
||||
uint8_t iri_flags; /* mbuf flags for packet */
|
||||
uint8_t iri_nfrags; /* number of fragments in packet */
|
||||
uint8_t iri_rsstype; /* RSS hash type */
|
||||
uint8_t iri_pad; /* any padding in the received data */
|
||||
if_rxd_frag_t iri_frags;
|
||||
} *if_rxd_info_t;
|
||||
|
||||
typedef struct if_rxd_update {
|
||||
uint64_t *iru_paddrs;
|
||||
caddr_t *iru_vaddrs;
|
||||
qidx_t *iru_idxs;
|
||||
qidx_t iru_pidx;
|
||||
uint16_t iru_qsidx;
|
||||
uint16_t iru_count;
|
||||
uint16_t iru_buf_size;
|
||||
uint8_t iru_flidx;
|
||||
} *if_rxd_update_t;
|
||||
|
||||
#define IPI_TX_INTR 0x1 /* send an interrupt when this packet is sent */
|
||||
#define IPI_TX_IPV4 0x2 /* ethertype IPv4 */
|
||||
#define IPI_TX_IPV6 0x4 /* ethertype IPv6 */
|
||||
|
||||
typedef struct if_pkt_info {
|
||||
uint32_t ipi_len; /* packet length */
|
||||
bus_dma_segment_t *ipi_segs; /* physical addresses */
|
||||
uint16_t ipi_qsidx; /* queue set index */
|
||||
uint16_t ipi_nsegs; /* number of segments */
|
||||
uint16_t ipi_ndescs; /* number of descriptors used by encap */
|
||||
uint16_t ipi_flags; /* iflib per-packet flags */
|
||||
uint32_t ipi_pidx; /* start pidx for encap */
|
||||
uint32_t ipi_new_pidx; /* next available pidx post-encap */
|
||||
bus_dma_segment_t *ipi_segs; /* physical addresses */
|
||||
uint32_t ipi_len; /* packet length */
|
||||
uint16_t ipi_qsidx; /* queue set index */
|
||||
qidx_t ipi_nsegs; /* number of segments */
|
||||
|
||||
qidx_t ipi_ndescs; /* number of descriptors used by encap */
|
||||
uint16_t ipi_flags; /* iflib per-packet flags */
|
||||
qidx_t ipi_pidx; /* start pidx for encap */
|
||||
qidx_t ipi_new_pidx; /* next available pidx post-encap */
|
||||
/* offload handling */
|
||||
uint64_t ipi_csum_flags; /* packet checksum flags */
|
||||
uint16_t ipi_tso_segsz; /* tso segment size */
|
||||
uint16_t ipi_mflags; /* packet mbuf flags */
|
||||
uint16_t ipi_vtag; /* VLAN tag */
|
||||
uint16_t ipi_etype; /* ether header type */
|
||||
uint8_t ipi_ehdrlen; /* ether header length */
|
||||
uint8_t ipi_ip_hlen; /* ip header length */
|
||||
uint8_t ipi_tcp_hlen; /* tcp header length */
|
||||
uint8_t ipi_tcp_hflags; /* tcp header flags */
|
||||
uint8_t ipi_ipproto; /* ip protocol */
|
||||
/* implied padding */
|
||||
uint32_t ipi_tcp_seq; /* tcp seqno */
|
||||
uint32_t ipi_tcp_sum; /* tcp csum */
|
||||
uint8_t ipi_ehdrlen; /* ether header length */
|
||||
uint8_t ipi_ip_hlen; /* ip header length */
|
||||
uint8_t ipi_tcp_hlen; /* tcp header length */
|
||||
uint8_t ipi_ipproto; /* ip protocol */
|
||||
|
||||
uint32_t ipi_csum_flags; /* packet checksum flags */
|
||||
uint16_t ipi_tso_segsz; /* tso segment size */
|
||||
uint16_t ipi_vtag; /* VLAN tag */
|
||||
uint16_t ipi_etype; /* ether header type */
|
||||
uint8_t ipi_tcp_hflags; /* tcp header flags */
|
||||
uint8_t ipi_mflags; /* packet mbuf flags */
|
||||
|
||||
uint32_t ipi_tcp_seq; /* tcp seqno */
|
||||
uint32_t ipi_tcp_sum; /* tcp csum */
|
||||
} *if_pkt_info_t;
|
||||
|
||||
typedef struct if_irq {
|
||||
@ -156,15 +176,13 @@ typedef struct pci_vendor_info {
|
||||
|
||||
typedef struct if_txrx {
|
||||
int (*ift_txd_encap) (void *, if_pkt_info_t);
|
||||
void (*ift_txd_flush) (void *, uint16_t, uint32_t);
|
||||
int (*ift_txd_credits_update) (void *, uint16_t, uint32_t, bool);
|
||||
void (*ift_txd_flush) (void *, uint16_t, qidx_t pidx);
|
||||
int (*ift_txd_credits_update) (void *, uint16_t qsidx, bool clear);
|
||||
|
||||
int (*ift_rxd_available) (void *, uint16_t qsidx, uint32_t pidx,
|
||||
int budget);
|
||||
int (*ift_rxd_available) (void *, uint16_t qsidx, qidx_t pidx, qidx_t budget);
|
||||
int (*ift_rxd_pkt_get) (void *, if_rxd_info_t ri);
|
||||
void (*ift_rxd_refill) (void * , uint16_t qsidx, uint8_t flidx, uint32_t pidx,
|
||||
uint64_t *paddrs, caddr_t *vaddrs, uint16_t count, uint16_t buf_size);
|
||||
void (*ift_rxd_flush) (void *, uint16_t qsidx, uint8_t flidx, uint32_t pidx);
|
||||
void (*ift_rxd_refill) (void * , if_rxd_update_t iru);
|
||||
void (*ift_rxd_flush) (void *, uint16_t qsidx, uint8_t flidx, qidx_t pidx);
|
||||
int (*ift_legacy_intr) (void *);
|
||||
} *if_txrx_t;
|
||||
|
||||
@ -179,6 +197,10 @@ typedef struct if_softc_ctx {
|
||||
|
||||
uint32_t isc_txqsizes[8];
|
||||
uint32_t isc_rxqsizes[8];
|
||||
/* is there such thing as a descriptor that is more than 248 bytes ? */
|
||||
uint8_t isc_txd_size[8];
|
||||
uint8_t isc_rxd_size[8];
|
||||
|
||||
int isc_max_txqsets;
|
||||
int isc_max_rxqsets;
|
||||
int isc_tx_tso_segments_max;
|
||||
@ -203,22 +225,14 @@ typedef struct if_softc_ctx {
|
||||
struct if_shared_ctx {
|
||||
int isc_magic;
|
||||
driver_t *isc_driver;
|
||||
int isc_nfl;
|
||||
int isc_flags;
|
||||
bus_size_t isc_q_align;
|
||||
bus_size_t isc_tx_maxsize;
|
||||
bus_size_t isc_tx_maxsegsize;
|
||||
bus_size_t isc_rx_maxsize;
|
||||
bus_size_t isc_rx_maxsegsize;
|
||||
int isc_rx_nsegments;
|
||||
int isc_rx_process_limit;
|
||||
int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */
|
||||
int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */
|
||||
int isc_admin_intrcnt; /* # of admin/link interrupts */
|
||||
|
||||
|
||||
int isc_tx_reclaim_thresh;
|
||||
|
||||
/* fields necessary for probe */
|
||||
pci_vendor_info_t *isc_vendor_info;
|
||||
char *isc_driver_version;
|
||||
@ -231,6 +245,14 @@ struct if_shared_ctx {
|
||||
int isc_ntxd_min[8];
|
||||
int isc_ntxd_default[8];
|
||||
int isc_ntxd_max[8];
|
||||
|
||||
/* actively used during operation */
|
||||
int isc_nfl __aligned(CACHE_LINE_SIZE);
|
||||
int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */
|
||||
int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */
|
||||
int isc_rx_process_limit;
|
||||
int isc_tx_reclaim_thresh;
|
||||
int isc_flags;
|
||||
};
|
||||
|
||||
typedef struct iflib_dma_info {
|
||||
@ -244,8 +266,9 @@ typedef struct iflib_dma_info {
|
||||
#define IFLIB_MAGIC 0xCAFEF00D
|
||||
|
||||
typedef enum {
|
||||
IFLIB_INTR_TX,
|
||||
IFLIB_INTR_RX,
|
||||
IFLIB_INTR_TX,
|
||||
IFLIB_INTR_RXTX,
|
||||
IFLIB_INTR_ADMIN,
|
||||
IFLIB_INTR_IOV,
|
||||
} iflib_intr_type_t;
|
||||
@ -279,6 +302,10 @@ typedef enum {
|
||||
* Interface doesn't expect in_pseudo for th_sum
|
||||
*/
|
||||
#define IFLIB_TSO_INIT_IP 0x20
|
||||
/*
|
||||
* Interface doesn't align IP header
|
||||
*/
|
||||
#define IFLIB_DO_RX_FIXUP 0x40
|
||||
|
||||
|
||||
|
||||
@ -298,9 +325,6 @@ if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx);
|
||||
|
||||
void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]);
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* If the driver can plug cleanly in to newbus use these
|
||||
*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user