2017 IFLIB updates in preparation for commits to e1000 and ixgbe.

- iflib - add checksum in place support (mmacy)
- iflib - initialize IP for TSO (going to be needed for e1000) (mmacy)
- iflib - move isc_txrx from shared context to softc context (mmacy)
- iflib - Normalize checks in TXQ drainage. (shurd)
- iflib - Fix queue capping checks (mmacy)
- iflib - Fix invalid assert, em can need 2 sentinels (mmacy)
- iflib - let the driver determine what capabilities are set and what
          tx csum flags are used (mmacy)
- add INVARIANTS debugging hooks to gtaskqueue enqueue (mmacy)
- update bnxt(4) to support the changes to iflib (shurd)

Some other various, sundry updates.  Slightly more verbose changelog:

Submitted by:	mmacy@nextbsd.org
Reviewed by:	shurd
mFC after:
Sponsored by:	LimeLight Networks and Dell EMC Isilon
This commit is contained in:
Sean Bruno 2017-01-02 00:56:33 +00:00
parent 1569205f0a
commit 1248952a50
5 changed files with 145 additions and 66 deletions

View File

@ -277,7 +277,6 @@ char bnxt_driver_version[] = "FreeBSD base";
extern struct if_txrx bnxt_txrx;
static struct if_shared_ctx bnxt_sctx_init = {
.isc_magic = IFLIB_MAGIC,
.isc_txrx = &bnxt_txrx,
.isc_driver = &bnxt_iflib_driver,
.isc_nfl = 2, // Number of Free Lists
.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ,
@ -679,6 +678,20 @@ bnxt_attach_pre(if_ctx_t ctx)
goto failed;
iflib_set_mac(ctx, softc->func.mac_addr);
scctx->isc_txrx = &bnxt_txrx;
scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
scctx->isc_capenable =
/* These are translated to hwassit bits */
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
/* These are checked by iflib */
IFCAP_LRO | IFCAP_VLAN_HWFILTER |
/* These are part of the iflib mask */
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
/* These likely get lost... */
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
/* Get the queue config */
rc = bnxt_hwrm_queue_qportcfg(softc);
if (rc) {
@ -793,7 +806,6 @@ bnxt_attach_post(if_ctx_t ctx)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
int capabilities, enabling;
int rc;
bnxt_create_config_sysctls_post(softc);
@ -808,26 +820,6 @@ bnxt_attach_post(if_ctx_t ctx)
bnxt_add_media_types(softc);
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
if_sethwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 |
CSUM_UDP_IPV6 | CSUM_TSO));
capabilities =
/* These are translated to hwassit bits */
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
/* These are checked by iflib */
IFCAP_LRO | IFCAP_VLAN_HWFILTER |
/* These are part of the iflib mask */
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
/* These likely get lost... */
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
if_setcapabilities(ifp, capabilities);
enabling = capabilities;
if_setcapenable(ifp, enabling);
softc->scctx->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN;

View File

@ -99,6 +99,15 @@ struct gtaskqueue {
} while (0)
#define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
#ifdef INVARIANTS
static void
gtask_dump(struct gtask *gtask)
{
printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n",
gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context);
}
#endif
static __inline int
TQ_SLEEP(struct gtaskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
int t)
@ -172,6 +181,12 @@ gtaskqueue_free(struct gtaskqueue *queue)
int
grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
{
#ifdef INVARIANTS
if (queue == NULL) {
gtask_dump(gtask);
panic("queue == NULL");
}
#endif
TQ_LOCK(queue);
if (gtask->ta_flags & TASK_ENQUEUED) {
TQ_UNLOCK(queue);

View File

@ -229,6 +229,7 @@ METHOD int promisc_set {
METHOD void crcstrip_set {
if_ctx_t _ctx;
int _onoff;
int _strip;
};
#

View File

@ -355,6 +355,9 @@ struct iflib_txq {
char ift_mtx_name[MTX_NAME_LEN];
char ift_db_mtx_name[MTX_NAME_LEN];
bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ift_cpu_exec_count[256];
#endif
} __aligned(CACHE_LINE_SIZE);
struct iflib_fl {
@ -431,6 +434,9 @@ struct iflib_rxq {
iflib_dma_info_t ifr_ifdi;
/* dynamically allocate if any drivers need a value substantially larger than this */
struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE);
#ifdef IFLIB_DIAGNOSTICS
uint64_t ifr_cpu_exec_count[256];
#endif
} __aligned(CACHE_LINE_SIZE);
/*
@ -632,7 +638,7 @@ static void iflib_init_locked(if_ctx_t ctx);
static void iflib_add_device_sysctl_pre(if_ctx_t ctx);
static void iflib_add_device_sysctl_post(if_ctx_t ctx);
static void iflib_ifmp_purge(iflib_txq_t txq);
static void _iflib_pre_assert(if_softc_ctx_t scctx);
#ifdef DEV_NETMAP
#include <sys/selinfo.h>
@ -684,7 +690,7 @@ iflib_netmap_register(struct netmap_adapter *na, int onoff)
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if (!CTX_IS_VF(ctx))
IFDI_CRCSTRIP_SET(ctx, onoff);
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip);
/* enable or disable flags and callbacks in na and ifp */
if (onoff) {
@ -693,7 +699,7 @@ iflib_netmap_register(struct netmap_adapter *na, int onoff)
nm_clear_native_flags(na);
}
IFDI_INIT(ctx);
IFDI_CRCSTRIP_SET(ctx, onoff); // XXX why twice ?
IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ?
CTX_UNLOCK(ctx);
return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
}
@ -1192,6 +1198,9 @@ iflib_fast_intr(void *arg)
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
if (!smp_started)
return (FILTER_HANDLED);
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
return (FILTER_HANDLED);
@ -1410,7 +1419,7 @@ iflib_txq_setup(iflib_txq_t txq)
iflib_dma_info_t di;
int i;
/* Set number of descriptors available */
/* Set number of descriptors available */
txq->ift_qstatus = IFLIB_QUEUE_IDLE;
/* Reset indices */
@ -1833,22 +1842,25 @@ static void
iflib_init_locked(if_ctx_t ctx)
{
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
if_t ifp = ctx->ifc_ifp;
iflib_fl_t fl;
iflib_txq_t txq;
iflib_rxq_t rxq;
int i, j;
int i, j, tx_ip_csum_flags, tx_ip6_csum_flags;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
IFDI_INTR_DISABLE(ctx);
tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP);
tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP);
/* Set hardware offload abilities */
if_clearhwassist(ifp);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP, 0);
if_sethwassistbits(ifp, tx_ip_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
if_sethwassistbits(ifp, tx_ip6_csum_flags, 0);
if (if_getcapenable(ifp) & IFCAP_TSO4)
if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TSO6)
@ -2258,10 +2270,21 @@ print_pkt(if_pkt_info_t pi)
static int
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
{
if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
struct ether_vlan_header *eh;
struct mbuf *m, *n;
n = m = *mp;
if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) &&
M_WRITABLE(m) == 0) {
if ((m = m_dup(m, M_NOWAIT)) == NULL) {
return (ENOMEM);
} else {
m_freem(*mp);
n = *mp = m;
}
}
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present,
@ -2345,6 +2368,10 @@ iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
th->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz;
if (sctx->isc_flags & IFLIB_TSO_INIT_IP) {
ip->ip_sum = 0;
ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
}
}
break;
}
@ -2396,10 +2423,10 @@ iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
break;
}
*mp = m;
return (0);
}
static __noinline struct mbuf *
collapse_pkthdr(struct mbuf *m0)
{
@ -2688,6 +2715,11 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
ndesc += txq->ift_size;
txq->ift_gen = 1;
}
/*
* drivers can need as many as
* two sentinels
*/
MPASS(ndesc <= pi.ipi_nsegs + 2);
MPASS(pi.ipi_new_pidx != pidx);
MPASS(ndesc > 0);
txq->ift_in_use += ndesc;
@ -2855,7 +2887,7 @@ iflib_txq_can_drain(struct ifmp_ring *r)
iflib_txq_t txq = r->cookie;
if_ctx_t ctx = txq->ift_ctx;
return ((TXQ_AVAIL(txq) >= MAX_TX_DESC(ctx)) ||
return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, txq->ift_cidx_processed, false));
}
@ -2930,13 +2962,13 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
break;
if (desc_used > TXQ_MAX_DB_CONSUMED(txq->ift_size))
if (desc_used >= TXQ_MAX_DB_CONSUMED(txq->ift_size))
break;
}
if ((iflib_min_tx_latency || iflib_txq_min_occupancy(txq)) && txq->ift_db_pending)
iflib_txd_db_check(ctx, txq, TRUE);
else if ((txq->ift_db_pending || TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)) &&
else if ((txq->ift_db_pending || TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2) &&
(callout_pending(&txq->ift_db_check) == 0)) {
txq->ift_db_pending_queued = txq->ift_db_pending;
callout_reset_on(&txq->ift_db_check, 1, iflib_txd_deferred_db_check,
@ -3004,6 +3036,9 @@ _task_fn_tx(void *context)
iflib_txq_t txq = context;
if_ctx_t ctx = txq->ift_ctx;
#ifdef IFLIB_DIAGNOSTICS
txq->ift_cpu_exec_count[curcpu]++;
#endif
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))
return;
ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE);
@ -3017,6 +3052,9 @@ _task_fn_rx(void *context)
bool more;
int rc;
#ifdef IFLIB_DIAGNOSTICS
rxq->ifr_cpu_exec_count[curcpu]++;
#endif
DBG_COUNTER_INC(task_fn_rxs);
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
return;
@ -3520,7 +3558,6 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
ctx->ifc_sctx = sctx;
ctx->ifc_dev = dev;
ctx->ifc_txrx = *sctx->isc_txrx;
ctx->ifc_softc = sc;
if ((err = iflib_register(ctx)) != 0) {
@ -3530,6 +3567,8 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
iflib_add_device_sysctl_pre(ctx);
scctx = &ctx->ifc_softc_ctx;
ifp = ctx->ifc_ifp;
/*
* XXX sanity check that ntxd & nrxd are a power of 2
*/
@ -3582,10 +3621,22 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
return (err);
}
if (scctx->isc_ntxqsets_max)
scctx->isc_ntxqsets = min(scctx->isc_ntxqsets, scctx->isc_ntxqsets_max);
if (scctx->isc_nrxqsets_max)
scctx->isc_nrxqsets = min(scctx->isc_nrxqsets, scctx->isc_nrxqsets_max);
_iflib_pre_assert(scctx);
ctx->ifc_txrx = *scctx->isc_txrx;
#ifdef INVARIANTS
MPASS(scctx->isc_capenable);
if (scctx->isc_capenable & IFCAP_TXCSUM)
MPASS(scctx->isc_tx_csum_flags);
#endif
if_setcapabilities(ifp, scctx->isc_capenable);
if_setcapenable(ifp, scctx->isc_capenable);
if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets))
scctx->isc_ntxqsets = scctx->isc_ntxqsets_max;
if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets))
scctx->isc_nrxqsets = scctx->isc_nrxqsets_max;
#ifdef ACPI_DMAR
if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
@ -3594,8 +3645,6 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
msix_bar = scctx->isc_msix_bar;
ifp = ctx->ifc_ifp;
if(sctx->isc_flags & IFLIB_HAS_TXCQ)
main_txq = 1;
else
@ -3679,6 +3728,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
goto fail_queues;
}
IFDI_INTR_DISABLE(ctx);
if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) {
device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err);
goto fail_intr_free;
@ -3948,15 +3998,6 @@ _iflib_assert(if_shared_ctx_t sctx)
MPASS(sctx->isc_rx_nsegments);
MPASS(sctx->isc_rx_maxsegsize);
MPASS(sctx->isc_txrx->ift_txd_encap);
MPASS(sctx->isc_txrx->ift_txd_flush);
MPASS(sctx->isc_txrx->ift_txd_credits_update);
MPASS(sctx->isc_txrx->ift_rxd_available);
MPASS(sctx->isc_txrx->ift_rxd_pkt_get);
MPASS(sctx->isc_txrx->ift_rxd_refill);
MPASS(sctx->isc_txrx->ift_rxd_flush);
MPASS(sctx->isc_nrxd_min[0]);
MPASS(sctx->isc_nrxd_max[0]);
MPASS(sctx->isc_nrxd_default[0]);
@ -3965,9 +4006,18 @@ _iflib_assert(if_shared_ctx_t sctx)
MPASS(sctx->isc_ntxd_default[0]);
}
#define DEFAULT_CAPS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | \
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | IFCAP_HWSTATS)
static void
_iflib_pre_assert(if_softc_ctx_t scctx)
{
MPASS(scctx->isc_txrx->ift_txd_encap);
MPASS(scctx->isc_txrx->ift_txd_flush);
MPASS(scctx->isc_txrx->ift_txd_credits_update);
MPASS(scctx->isc_txrx->ift_rxd_available);
MPASS(scctx->isc_txrx->ift_rxd_pkt_get);
MPASS(scctx->isc_txrx->ift_rxd_refill);
MPASS(scctx->isc_txrx->ift_rxd_flush);
}
static int
iflib_register(if_ctx_t ctx)
@ -4003,10 +4053,6 @@ iflib_register(if_ctx_t ctx)
if_setqflushfn(ifp, iflib_if_qflush);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
/* XXX - move this in to the driver for non-default settings */
if_setcapabilities(ifp, DEFAULT_CAPS);
if_setcapenable(ifp, DEFAULT_CAPS);
ctx->ifc_vlan_attach_event =
EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
EVENTHANDLER_PRI_FIRST);
@ -4507,6 +4553,13 @@ iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *
void *q;
int err;
/*
* group taskqueues aren't properly set up until SMP is started
* so we disable interrupts until we can handle them post
* SI_SUB_SMP
*/
IFDI_INTR_DISABLE(ctx);
q = &ctx->ifc_rxqs[0];
info = &rxq[0].ifr_filter_info;
gtask = &rxq[0].ifr_task;
@ -4527,9 +4580,6 @@ iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *
GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, tqrid, "tx");
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin/link");
return (0);
}
@ -4558,6 +4608,12 @@ iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid)
void
iflib_admin_intr_deferred(if_ctx_t ctx)
{
#ifdef INVARIANTS
struct grouptask *gtask;
gtask = &ctx->ifc_admin_task;
MPASS(gtask->gt_taskqueue != NULL);
#endif
GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
}
@ -4598,7 +4654,6 @@ iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
if_t ifp = ctx->ifc_ifp;
iflib_txq_t txq = ctx->ifc_txqs;
if_setbaudrate(ifp, baudrate);
/* If link down, disable watchdog */
@ -4614,6 +4669,9 @@ static int
iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
{
int credits;
#ifdef INVARIANTS
int credits_pre = txq->ift_cidx_processed;
#endif
if (ctx->isc_txd_credits_update == NULL)
return (0);
@ -4624,6 +4682,7 @@ iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
txq->ift_processed += credits;
txq->ift_cidx_processed += credits;
MPASS(credits_pre + credits == txq->ift_cidx_processed);
if (txq->ift_cidx_processed >= txq->ift_size)
txq->ift_cidx_processed -= txq->ift_size;
return (credits);
@ -4671,6 +4730,8 @@ iflib_msix_init(if_ctx_t ctx)
iflib_num_tx_queues = scctx->isc_ntxqsets;
iflib_num_rx_queues = scctx->isc_nrxqsets;
device_printf(dev, "msix_init qsets capped at %d\n", iflib_num_tx_queues);
bar = ctx->ifc_softc_ctx.isc_msix_bar;
admincnt = sctx->isc_admin_intrcnt;
/* Override by tuneable */

View File

@ -184,6 +184,8 @@ typedef struct if_softc_ctx {
int isc_tx_tso_segments_max;
int isc_tx_tso_size_max;
int isc_tx_tso_segsize_max;
int isc_tx_csum_flags;
int isc_capenable;
int isc_rss_table_size;
int isc_rss_table_mask;
int isc_nrxqsets_max;
@ -192,6 +194,7 @@ typedef struct if_softc_ctx {
iflib_intr_mode_t isc_intr;
uint16_t isc_max_frame_size; /* set at init time by driver */
pci_vendor_info_t isc_vendor_info; /* set by iflib prior to attach_pre */
if_txrx_t isc_txrx;
} *if_softc_ctx_t;
/*
@ -199,7 +202,6 @@ typedef struct if_softc_ctx {
*/
struct if_shared_ctx {
int isc_magic;
if_txrx_t isc_txrx;
driver_t *isc_driver;
int isc_nfl;
int isc_flags;
@ -256,20 +258,28 @@ typedef enum {
/*
* Interface has a separate command queue for RX
*/
#define IFLIB_HAS_RXCQ 0x1
#define IFLIB_HAS_RXCQ 0x01
/*
* Driver has already allocated vectors
*/
#define IFLIB_SKIP_MSIX 0x2
#define IFLIB_SKIP_MSIX 0x02
/*
* Interface is a virtual function
*/
#define IFLIB_IS_VF 0x4
#define IFLIB_IS_VF 0x04
/*
* Interface has a separate command queue for TX
*/
#define IFLIB_HAS_TXCQ 0x8
#define IFLIB_HAS_TXCQ 0x08
/*
* Interface does checksum in place
*/
#define IFLIB_NEED_SCRATCH 0x10
/*
* Interface doesn't expect in_pseudo for th_sum
*/
#define IFLIB_TSO_INIT_IP 0x20
/*