ixgbe(4): Convert driver to use iflib

Initial update to the ixgbe PF and VF drivers to support the iflib interface.

The PF driver version is bumped to 4.0.0, and the VF driver version is bumped to 2.0.0.

Special thanks to sbruno@ for the support in helping make this conversion happen.

Submitted by:	Jeb Cramer <cramerj@intel.com>, Krzysztof Galazka (Chris) <krzysztof.galazka@intel.com>, Piotr Pietruszewski <piotr.pietruszewski@intel.com>
Reviewed by:	sbruno@, shurd@, #IntelNetworking
Tested by:	Jeffrey Pieper <jeffrey.e.pieper@intel.com>, Sergey Kozlov <kozlov.sergey.404@gmail.com>
Sponsored by:	Limelight Networks, Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D11727
This commit is contained in:
Eric Joyner 2017-12-20 18:15:06 +00:00
parent 23e1a2d7da
commit c19c7afee3
20 changed files with 2541 additions and 6159 deletions

View File

@ -2223,11 +2223,9 @@ dev/ixgbe/if_ixv.c optional ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/if_bypass.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_netmap.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/if_fdir.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/if_sriov.c optional ix inet | ixv inet \
dev/ixgbe/if_sriov.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"

View File

@ -165,12 +165,12 @@ ixgbe_bp_set_state(SYSCTL_HANDLER_ARGS)
error = hw->mac.ops.bypass_rw(hw,
BYPASS_PAGE_CTL0, &state);
ixgbe_bypass_mutex_clear(adapter);
if (error)
if (error != 0)
return (error);
state = (state >> BYPASS_STATUS_OFF_SHIFT) & 0x3;
error = sysctl_handle_int(oidp, &state, 0, req);
if ((error) || (req->newptr == NULL))
if ((error != 0) || (req->newptr == NULL))
return (error);
/* Sanity check new state */
@ -437,7 +437,7 @@ ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
struct ixgbe_hw *hw = &adapter->hw;
int error, tmp;
static int timeout = 0;
u32 mask, arg = BYPASS_PAGE_CTL0;
u32 mask, arg;
/* Get the current hardware value */
ixgbe_bypass_mutex_enter(adapter);
@ -456,48 +456,38 @@ ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
if ((error) || (req->newptr == NULL))
return (error);
mask = BYPASS_WDT_ENABLE_M;
arg = 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask = BYPASS_WDT_ENABLE_M | BYPASS_WDT_VALUE_M;
switch (timeout) {
case 0: /* disables the timer */
break;
case 1:
arg = BYPASS_WDT_1_5 << BYPASS_WDT_TIME_SHIFT;
arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask |= BYPASS_WDT_VALUE_M;
break;
case 2:
arg = BYPASS_WDT_2 << BYPASS_WDT_TIME_SHIFT;
arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask |= BYPASS_WDT_VALUE_M;
break;
case 3:
arg = BYPASS_WDT_3 << BYPASS_WDT_TIME_SHIFT;
arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask |= BYPASS_WDT_VALUE_M;
break;
case 4:
arg = BYPASS_WDT_4 << BYPASS_WDT_TIME_SHIFT;
arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask |= BYPASS_WDT_VALUE_M;
break;
case 8:
arg = BYPASS_WDT_8 << BYPASS_WDT_TIME_SHIFT;
arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask |= BYPASS_WDT_VALUE_M;
break;
case 16:
arg = BYPASS_WDT_16 << BYPASS_WDT_TIME_SHIFT;
arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask |= BYPASS_WDT_VALUE_M;
break;
case 32:
arg = BYPASS_WDT_32 << BYPASS_WDT_TIME_SHIFT;
arg |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
mask |= BYPASS_WDT_VALUE_M;
break;
default:
return (EINVAL);
case 0: /* disables the timer */
arg = BYPASS_PAGE_CTL0;
mask = BYPASS_WDT_ENABLE_M;
break;
case 1:
arg |= BYPASS_WDT_1_5 << BYPASS_WDT_TIME_SHIFT;
break;
case 2:
arg |= BYPASS_WDT_2 << BYPASS_WDT_TIME_SHIFT;
break;
case 3:
arg |= BYPASS_WDT_3 << BYPASS_WDT_TIME_SHIFT;
break;
case 4:
arg |= BYPASS_WDT_4 << BYPASS_WDT_TIME_SHIFT;
break;
case 8:
arg |= BYPASS_WDT_8 << BYPASS_WDT_TIME_SHIFT;
break;
case 16:
arg |= BYPASS_WDT_16 << BYPASS_WDT_TIME_SHIFT;
break;
case 32:
arg |= BYPASS_WDT_32 << BYPASS_WDT_TIME_SHIFT;
break;
default:
return (EINVAL);
}
/* Set the new watchdog */
ixgbe_bypass_mutex_enter(adapter);
error = hw->mac.ops.bypass_set(hw, BYPASS_PAGE_CTL0, mask, arg);
@ -541,7 +531,8 @@ ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
error = IXGBE_BYPASS_FW_WRITE_FAILURE;
break;
}
if (hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd)) {
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd);
if (error != 0) {
error = IXGBE_ERR_INVALID_ARGUMENT;
break;
}
@ -615,7 +606,7 @@ ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
&data);
ixgbe_bypass_mutex_clear(adapter);
if (error)
return (-EINVAL);
return (EINVAL);
eeprom[count].logs += data << (8 * i);
}
@ -624,7 +615,7 @@ ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
log_off + i, &eeprom[count].actions);
ixgbe_bypass_mutex_clear(adapter);
if (error)
return (-EINVAL);
return (EINVAL);
/* Quit if not a unread log */
if (!(eeprom[count].logs & BYPASS_LOG_CLEAR_M))
@ -696,21 +687,21 @@ ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
ixgbe_bypass_mutex_clear(adapter);
if (error)
return (-EINVAL);
return (EINVAL);
}
status = 0; /* reset */
/* Another log command can now run */
while (atomic_cmpset_int(&adapter->bypass.log, 1, 0) == 0)
usec_delay(3000);
return(error);
return (error);
unlock_err:
ixgbe_bypass_mutex_clear(adapter);
status = 0; /* reset */
while (atomic_cmpset_int(&adapter->bypass.log, 1, 0) == 0)
usec_delay(3000);
return (-EINVAL);
return (EINVAL);
} /* ixgbe_bp_log */
/************************************************************************
@ -802,7 +793,5 @@ ixgbe_bypass_init(struct adapter *adapter)
adapter, 0, ixgbe_bp_wd_reset, "S", "Bypass WD Reset");
adapter->feat_en |= IXGBE_FEATURE_BYPASS;
return;
} /* ixgbe_bypass_init */

View File

@ -50,10 +50,11 @@ ixgbe_init_fdir(struct adapter *adapter)
} /* ixgbe_init_fdir */
void
ixgbe_reinit_fdir(void *context, int pending)
ixgbe_reinit_fdir(void *context)
{
struct adapter *adapter = context;
struct ifnet *ifp = adapter->ifp;
if_ctx_t ctx = context;
struct adapter *adapter = iflib_get_softc(ctx);
struct ifnet *ifp = iflib_get_ifp(ctx);
if (!(adapter->feat_en & IXGBE_FEATURE_FDIR))
return;
@ -146,9 +147,9 @@ ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
/* TASK_INIT needs this function defined regardless if it's enabled */
void
ixgbe_reinit_fdir(void *context, int pending)
ixgbe_reinit_fdir(void *context)
{
UNREFERENCED_2PARAMETER(context, pending);
UNREFERENCED_PARAMETER(context);
} /* ixgbe_reinit_fdir */
void

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -33,6 +33,7 @@
/*$FreeBSD$*/
#include "ixgbe.h"
#include "ixgbe_sriov.h"
#ifdef PCI_IOV
@ -80,10 +81,14 @@ ixgbe_align_all_queue_indices(struct adapter *adapter)
int i;
int index;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_rx_queues; i++) {
index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
adapter->rx_rings[i].me = index;
adapter->tx_rings[i].me = index;
adapter->rx_queues[i].rxr.me = index;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
adapter->tx_queues[i].txr.me = index;
}
}
@ -233,7 +238,7 @@ ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
if (tag == 0) {
/* Accept non-vlan tagged traffic. */
//vmolr |= IXGBE_VMOLR_AUPE;
vmolr |= IXGBE_VMOLR_AUPE;
/* Allow VM to tag outgoing traffic; no default tag. */
vmvir = 0;
@ -269,7 +274,7 @@ ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
* frames on either the PF or the VF.
*/
if (adapter->max_frame_size > ETHER_MAX_LEN ||
vf->max_frame_size > ETHER_MAX_LEN)
vf->maximum_frame_size > ETHER_MAX_LEN)
return (FALSE);
return (TRUE);
@ -281,7 +286,7 @@ ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
* 1.1 or later VF versions always work if they aren't using
* jumbo frames.
*/
if (vf->max_frame_size <= ETHER_MAX_LEN)
if (vf->maximum_frame_size <= ETHER_MAX_LEN)
return (TRUE);
/*
@ -292,7 +297,6 @@ ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
return (TRUE);
return (FALSE);
}
} /* ixgbe_vf_frame_size_compatible */
@ -451,7 +455,7 @@ ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
}
/* It is illegal to enable vlan tag 0. */
if (tag == 0 && enable != 0){
if (tag == 0 && enable != 0) {
ixgbe_send_vf_nack(adapter, vf, msg[0]);
return;
}
@ -484,8 +488,8 @@ ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
return;
}
vf->max_frame_size = vf_max_size;
ixgbe_update_max_frame(adapter, vf->max_frame_size);
vf->maximum_frame_size = vf_max_size;
ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
/*
* We might have to disable reception to this VF if the frame size is
@ -565,8 +569,12 @@ ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
static void
ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
{
struct adapter *adapter = iflib_get_softc(ctx);
#ifdef KTR
struct ifnet *ifp = iflib_get_ifp(ctx);
#endif
struct ixgbe_hw *hw;
uint32_t msg[IXGBE_VFMAILBOX_SIZE];
int error;
@ -578,8 +586,8 @@ ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
if (error != 0)
return;
CTR3(KTR_MALLOC, "%s: received msg %x from %d",
adapter->ifp->if_xname, msg[0], vf->pool);
CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname,
msg[0], vf->pool);
if (msg[0] == IXGBE_VF_RESET) {
ixgbe_vf_reset_msg(adapter, vf, msg);
return;
@ -620,17 +628,16 @@ ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
/* Tasklet for handling VF -> PF mailbox messages */
void
ixgbe_handle_mbx(void *context, int pending)
ixgbe_handle_mbx(void *context)
{
struct adapter *adapter;
if_ctx_t ctx = context;
struct adapter *adapter = iflib_get_softc(ctx);
struct ixgbe_hw *hw;
struct ixgbe_vf *vf;
int i;
adapter = context;
hw = &adapter->hw;
IXGBE_CORE_LOCK(adapter);
for (i = 0; i < adapter->num_vfs; i++) {
vf = &adapter->vfs[i];
@ -639,22 +646,21 @@ ixgbe_handle_mbx(void *context, int pending)
ixgbe_process_vf_reset(adapter, vf);
if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
ixgbe_process_vf_msg(adapter, vf);
ixgbe_process_vf_msg(ctx, vf);
if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
ixgbe_process_vf_ack(adapter, vf);
}
}
IXGBE_CORE_UNLOCK(adapter);
} /* ixgbe_handle_mbx */
int
ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
{
struct adapter *adapter;
int retval = 0;
adapter = device_get_softc(dev);
adapter = iflib_get_softc(ctx);
adapter->iov_mode = IXGBE_NO_VM;
if (num_vfs == 0) {
@ -682,45 +688,38 @@ ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
goto err_init_iov;
}
IXGBE_CORE_LOCK(adapter);
adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
M_NOWAIT | M_ZERO);
if (adapter->vfs == NULL) {
retval = ENOMEM;
IXGBE_CORE_UNLOCK(adapter);
goto err_init_iov;
}
adapter->num_vfs = num_vfs;
adapter->init_locked(adapter);
ixgbe_if_init(adapter->ctx);
adapter->feat_en |= IXGBE_FEATURE_SRIOV;
IXGBE_CORE_UNLOCK(adapter);
return retval;
return (retval);
err_init_iov:
adapter->num_vfs = 0;
adapter->pool = 0;
adapter->iov_mode = IXGBE_NO_VM;
return retval;
} /* ixgbe_init_iov */
return (retval);
} /* ixgbe_if_iov_init */
void
ixgbe_uninit_iov(device_t dev)
ixgbe_if_iov_uninit(if_ctx_t ctx)
{
struct ixgbe_hw *hw;
struct adapter *adapter;
uint32_t pf_reg, vf_reg;
adapter = device_get_softc(dev);
adapter = iflib_get_softc(ctx);
hw = &adapter->hw;
IXGBE_CORE_LOCK(adapter);
/* Enable rx/tx for the PF and disable it for all VFs. */
pf_reg = IXGBE_VF_INDEX(adapter->pool);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
@ -739,9 +738,7 @@ ixgbe_uninit_iov(device_t dev)
adapter->vfs = NULL;
adapter->num_vfs = 0;
adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
IXGBE_CORE_UNLOCK(adapter);
} /* ixgbe_uninit_iov */
} /* ixgbe_if_iov_uninit */
static void
ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
@ -749,8 +746,6 @@ ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
struct ixgbe_hw *hw;
uint32_t vf_index, pfmbimr;
IXGBE_CORE_LOCK_ASSERT(adapter);
hw = &adapter->hw;
if (!(vf->flags & IXGBE_VF_ACTIVE))
@ -786,8 +781,6 @@ ixgbe_initialize_iov(struct adapter *adapter)
if (adapter->iov_mode == IXGBE_NO_VM)
return;
IXGBE_CORE_LOCK_ASSERT(adapter);
/* RMW appropriate registers based on IOV mode */
/* Read... */
mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
@ -844,36 +837,33 @@ ixgbe_recalculate_max_frame(struct adapter *adapter)
{
struct ixgbe_vf *vf;
IXGBE_CORE_LOCK_ASSERT(adapter);
for (int i = 0; i < adapter->num_vfs; i++) {
vf = &adapter->vfs[i];
if (vf->flags & IXGBE_VF_ACTIVE)
ixgbe_update_max_frame(adapter, vf->max_frame_size);
ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
}
} /* ixgbe_recalculate_max_frame */
int
ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
{
struct adapter *adapter;
struct ixgbe_vf *vf;
const void *mac;
adapter = device_get_softc(dev);
adapter = iflib_get_softc(ctx);
KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
vfnum, adapter->num_vfs));
IXGBE_CORE_LOCK(adapter);
vf = &adapter->vfs[vfnum];
vf->pool= vfnum;
/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
vf->rar_index = vfnum + 1;
vf->default_vlan = 0;
vf->max_frame_size = ETHER_MAX_LEN;
ixgbe_update_max_frame(adapter, vf->max_frame_size);
vf->maximum_frame_size = ETHER_MAX_LEN;
ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
if (nvlist_exists_binary(config, "mac-addr")) {
mac = nvlist_get_binary(config, "mac-addr", NULL);
@ -890,25 +880,16 @@ ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
vf->flags |= IXGBE_VF_ACTIVE;
ixgbe_init_vf(adapter, vf);
IXGBE_CORE_UNLOCK(adapter);
return (0);
} /* ixgbe_add_vf */
} /* ixgbe_if_iov_vf_add */
#else
void
ixgbe_handle_mbx(void *context, int pending)
ixgbe_handle_mbx(void *context)
{
UNREFERENCED_2PARAMETER(context, pending);
UNREFERENCED_PARAMETER(context);
} /* ixgbe_handle_mbx */
inline int
ixgbe_vf_que_index(int mode, int vfnum, int num)
{
UNREFERENCED_2PARAMETER(mode, vfnum);
return num;
} /* ixgbe_vf_que_index */
#endif

File diff suppressed because it is too large Load Diff

View File

@ -58,20 +58,13 @@
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <net/iflib.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet/udp.h>
#include <machine/in_cksum.h>
#include <sys/bus.h>
#include <machine/bus.h>
@ -85,7 +78,7 @@
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/taskqueue.h>
#include <sys/gtaskqueue.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
#include <machine/smp.h>
@ -106,7 +99,7 @@
* bytes. Performance tests have show the 2K value to be optimal for top
* performance.
*/
#define DEFAULT_TXD 1024
#define DEFAULT_TXD 2048
#define PERFORM_TXD 2048
#define MAX_TXD 4096
#define MIN_TXD 64
@ -121,7 +114,7 @@
* against the system mbuf pool limit, you can tune nmbclusters
* to adjust for this.
*/
#define DEFAULT_RXD 1024
#define DEFAULT_RXD 2048
#define PERFORM_RXD 2048
#define MAX_RXD 4096
#define MIN_RXD 64
@ -219,6 +212,11 @@
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
#define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \
IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \
IFCAP_HWSTATS | IFCAP_VLAN_HWFILTER | IFCAP_WOL)
/* Backward compatibility items for very old versions */
#ifndef pci_find_cap
#define pci_find_cap pci_find_extcap
@ -241,7 +239,6 @@
IXGBE_EITR_ITR_INT_MASK)
/************************************************************************
* vendor_info_array
*
@ -262,23 +259,8 @@ struct ixgbe_bp_data {
u32 log;
};
struct ixgbe_tx_buf {
union ixgbe_adv_tx_desc *eop;
struct mbuf *m_head;
bus_dmamap_t map;
};
struct ixgbe_rx_buf {
struct mbuf *buf;
struct mbuf *fmp;
bus_dmamap_t pmap;
u_int flags;
#define IXGBE_RX_COPY 0x01
uint64_t addr;
};
/*
* Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free
*/
struct ixgbe_dma_alloc {
bus_addr_t dma_paddr;
@ -295,47 +277,19 @@ struct ixgbe_mc_addr {
u32 vmdq;
};
/*
* Driver queue struct: this is the interrupt container
* for the associated tx and rx ring.
*/
struct ix_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSI-X vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
u32 me;
struct resource *res;
void *tag;
int busy;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* The transmit ring, one per queue
*/
struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
u32 tail;
int busy;
union ixgbe_adv_tx_desc *tx_base;
struct ixgbe_tx_buf *tx_buffers;
struct ixgbe_dma_alloc txdma;
volatile u16 tx_avail;
u16 next_avail_desc;
u16 next_to_clean;
u16 num_desc;
u32 txd_cmd;
bus_dma_tag_t txtag;
char mtx_name[16];
struct buf_ring *br;
struct task txq_task;
uint64_t tx_paddr;
u32 tail;
qidx_t *tx_rsq;
qidx_t tx_rs_cidx;
qidx_t tx_rs_pidx;
qidx_t tx_cidx_processed;
uint8_t me;
/* Flow Director */
u16 atr_sample;
@ -345,9 +299,6 @@ struct tx_ring {
u32 packets;
/* Soft Stats */
u64 tso_tx;
u64 no_tx_map_avail;
u64 no_tx_dma_setup;
u64 no_desc_avail;
u64 total_packets;
};
@ -356,22 +307,14 @@ struct tx_ring {
* The Receive ring, one per rx queue
*/
struct rx_ring {
struct ix_rx_queue *que;
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
u32 tail;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
struct lro_ctrl lro;
bool lro_enabled;
bool hw_rsc;
bool vtag_strip;
u16 next_to_refresh;
u16 next_to_check;
u16 num_desc;
u16 mbuf_sz;
char mtx_name[16];
struct ixgbe_rx_buf *rx_buffers;
uint64_t rx_paddr;
bus_dma_tag_t ptag;
u32 bytes; /* Used for AIM calc */
@ -389,12 +332,35 @@ struct rx_ring {
u64 flm;
};
/*
* Driver queue struct: this is the interrupt container
* for the associated tx and rx ring.
*/
struct ix_rx_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
struct resource *res;
void *tag;
int busy;
struct rx_ring rxr;
struct if_irq que_irq;
u64 irqs;
};
struct ix_tx_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
struct tx_ring txr;
};
#define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
struct ixgbe_vf {
u_int pool;
u_int rar_index;
u_int max_frame_size;
u_int maximum_frame_size;
uint32_t flags;
uint8_t ether_addr[ETHER_ADDR_LEN];
uint16_t mc_hash[IXGBE_MAX_VF_MC];
@ -408,33 +374,32 @@ struct ixgbe_vf {
struct adapter {
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
if_ctx_t ctx;
if_softc_ctx_t shared;
#define num_tx_queues shared->isc_ntxqsets
#define num_rx_queues shared->isc_nrxqsets
#define max_frame_size shared->isc_max_frame_size
#define intr_type shared->isc_intr
device_t dev;
struct ifnet *ifp;
struct resource *pci_mem;
struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSI-X
*/
struct if_irq irq;
void *tag;
struct resource *res;
struct ifmedia media;
struct callout timer;
int link_rid;
struct ifmedia *media;
int if_flags;
struct mtx core_mtx;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
int msix;
u16 num_vlans;
u16 num_queues;
/*
* Shadow VFTA table, this is needed because
@ -446,9 +411,7 @@ struct adapter {
/* Info about the interface */
int advertise; /* link speeds */
int enable_aim; /* adaptive interrupt moderation */
bool link_active;
u16 max_frame_size;
u16 num_segs;
u32 link_speed;
bool link_up;
@ -465,17 +428,16 @@ struct adapter {
/* Support for pluggable optics */
bool sfp_probe;
struct task link_task; /* Link tasklet */
struct task mod_task; /* SFP tasklet */
struct task msf_task; /* Multispeed Fiber */
struct task mbx_task; /* VF -> PF mailbox interrupt */
struct grouptask mod_task; /* SFP tasklet */
struct grouptask msf_task; /* Multispeed Fiber */
struct grouptask mbx_task; /* VF -> PF mailbox interrupt */
int sfp_reinit;
/* Flow Director */
int fdir_reinit;
struct task fdir_task;
struct grouptask fdir_task;
struct task phy_task; /* PHY intr tasklet */
struct taskqueue *tq;
struct grouptask phy_task; /* PHY intr tasklet */
/*
* Queues:
@ -483,24 +445,9 @@ struct adapter {
* and RX/TX pair or rings associated
* with it.
*/
struct ix_queue *queues;
/*
* Transmit rings
* Allocated at run time, an array of rings
*/
struct tx_ring *tx_rings;
u32 num_tx_desc;
u32 tx_process_limit;
/*
* Receive rings
* Allocated at run time, an array of rings
*/
struct rx_ring *rx_rings;
u64 active_queues;
u32 num_rx_desc;
u32 rx_process_limit;
struct ix_tx_queue *tx_queues;
struct ix_rx_queue *rx_queues;
u64 active_queues;
/* Multicast array memory */
struct ixgbe_mc_addr *mta;
@ -514,13 +461,8 @@ struct adapter {
/* Bypass */
struct ixgbe_bp_data bypass;
/* Netmap */
void (*init_locked)(struct adapter *);
void (*stop_locked)(void *);
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long watchdog_events;
@ -547,29 +489,12 @@ struct adapter {
u32 feat_en;
};
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
#define IXGBE_ADVTXD_TSTAMP 0x00080000
#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
#define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define IXGBE_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
#define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
/* For backward compatibility */
#if !defined(PCIER_LINK_STA)
#define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA
@ -626,34 +551,13 @@ static __inline int
drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (1);
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (1);
#endif
return (!buf_ring_empty(br));
return (!buf_ring_empty(br));
}
#endif
/*
* Find the number of unrefreshed RX descriptors
*/
static inline u16
ixgbe_rx_unrefreshed(struct rx_ring *rxr)
{
if (rxr->next_to_check > rxr->next_to_refresh)
return (rxr->next_to_check - rxr->next_to_refresh - 1);
else
return ((rxr->num_desc + rxr->next_to_check) -
rxr->next_to_refresh - 1);
}
static inline int
ixgbe_legacy_ring_empty(struct ifnet *ifp, struct buf_ring *dummy)
{
UNREFERENCED_1PARAMETER(dummy);
return IFQ_DRV_IS_EMPTY(&ifp->if_snd);
}
/*
* This checks for a zero mac addr, something that will be likely
* unless the Admin on the Host has created one.
@ -671,25 +575,16 @@ ixv_check_ether_addr(u8 *addr)
}
/* Shared Prototypes */
void ixgbe_legacy_start(struct ifnet *);
int ixgbe_legacy_start_locked(struct ifnet *, struct tx_ring *);
int ixgbe_mq_start(struct ifnet *, struct mbuf *);
int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixgbe_qflush(struct ifnet *);
void ixgbe_deferred_mq_start(void *, int);
int ixgbe_allocate_queues(struct adapter *);
int ixgbe_setup_transmit_structures(struct adapter *);
void ixgbe_free_transmit_structures(struct adapter *);
int ixgbe_setup_receive_structures(struct adapter *);
void ixgbe_free_receive_structures(struct adapter *);
void ixgbe_txeof(struct tx_ring *);
bool ixgbe_rxeof(struct ix_queue *);
int ixgbe_get_regs(SYSCTL_HANDLER_ARGS);
#include "ixgbe_bypass.h"
#include "ixgbe_sriov.h"
#include "ixgbe_fdir.h"
#include "ixgbe_rss.h"
#include "ixgbe_netmap.h"
#endif /* _IXGBE_H_ */

View File

@ -223,6 +223,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
"Device %x does not support flow control autoneg",
hw->device_id);
return supported;
}
@ -2001,7 +2002,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
usec_delay(5);
ixgbe_standby_eeprom(hw);
};
}
/*
* On some parts, SPI write time could vary from 0-20mSec on 3.3V
@ -2087,7 +2088,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* EEPROM
*/
mask = mask >> 1;
};
}
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
@ -3518,7 +3519,6 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
if (index > 3)
return IXGBE_ERR_PARAM;
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val != IXGBE_SUCCESS)
goto out;
@ -3715,7 +3715,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
* @vmdq: VMDq pool to assign
*
* Puts an ethernet address into a receive address register, or
* finds the rar that it is aleady in; adds to the pool list
* finds the rar that it is already in; adds to the pool list
**/
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
{
@ -4126,7 +4126,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
}
return IXGBE_SUCCESS;

View File

@ -52,7 +52,7 @@ void ixgbe_init_fdir(struct adapter *);
#endif
void ixgbe_reinit_fdir(void *, int);
void ixgbe_reinit_fdir(void *);
void ixgbe_atr(struct tx_ring *, struct mbuf *);
#endif /* _IXGBE_FDIR_H_ */

View File

@ -1,521 +0,0 @@
/******************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
/*
* Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* $FreeBSD$
*
* netmap support for: ixgbe
*
* This file is meant to be a reference on how to implement
* netmap support for a network driver.
* This file contains code but only static or inline functions used
* by a single driver. To avoid replication of code we just #include
* it near the beginning of the standard driver.
*/
#ifdef DEV_NETMAP
/*
* Some drivers may need the following headers. Others
* already include them by default
#include <vm/vm.h>
#include <vm/pmap.h>
*/
#include "ixgbe.h"
/*
* device-specific sysctl variables:
*
* ix_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
* During regular operations the CRC is stripped, but on some
* hardware reception of frames not multiple of 64 is slower,
* so using crcstrip=0 helps in benchmarks.
*
* ix_rx_miss, ix_rx_miss_bufs:
* count packets that might be missed due to lost interrupts.
*/
SYSCTL_DECL(_dev_netmap);
static int ix_rx_miss, ix_rx_miss_bufs;
int ix_crcstrip;
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames");
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss,
CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr");
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs,
CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs");
static void
set_crcstrip(struct ixgbe_hw *hw, int onoff)
{
/* crc stripping is set in two places:
* IXGBE_HLREG0 (modified on init_locked and hw reset)
* IXGBE_RDRXCTL (set by the original driver in
* ixgbe_setup_hw_rsc() called in init_locked.
* We disable the setting when netmap is compiled in).
* We update the values here, but also in ixgbe.c because
* init_locked sometimes is called outside our control.
*/
uint32_t hl, rxc;
hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
if (netmap_verbose)
D("%s read HLREG 0x%x rxc 0x%x",
onoff ? "enter" : "exit", hl, rxc);
/* hw requirements ... */
rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
rxc |= IXGBE_RDRXCTL_RSCACKC;
if (onoff && !ix_crcstrip) {
/* keep the crc. Fast rx */
hl &= ~IXGBE_HLREG0_RXCRCSTRP;
rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
} else {
/* reset default mode */
hl |= IXGBE_HLREG0_RXCRCSTRP;
rxc |= IXGBE_RDRXCTL_CRCSTRIP;
}
if (netmap_verbose)
D("%s write HLREG 0x%x rxc 0x%x",
onoff ? "enter" : "exit", hl, rxc);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
}
/*
* Register/unregister. We are already under netmap lock.
* Only called on the first register or the last unregister.
*/
static int
ixgbe_netmap_reg(struct netmap_adapter *na, int onoff)
{
struct ifnet *ifp = na->ifp;
struct adapter *adapter = ifp->if_softc;
IXGBE_CORE_LOCK(adapter);
adapter->stop_locked(adapter);
set_crcstrip(&adapter->hw, onoff);
/* enable or disable flags and callbacks in na and ifp */
if (onoff) {
nm_set_native_flags(na);
} else {
nm_clear_native_flags(na);
}
adapter->init_locked(adapter); /* also enables intr */
set_crcstrip(&adapter->hw, onoff); // XXX why twice ?
IXGBE_CORE_UNLOCK(adapter);
return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
}
/*
* Reconcile kernel and user view of the transmit ring.
*
* All information is in the kring.
* Userspace wants to send packets up to the one before kring->rhead,
* kernel knows kring->nr_hwcur is the first unsent packet.
*
* Here we push packets out (as many as possible), and possibly
* reclaim buffers from previously completed transmission.
*
* The caller (netmap) guarantees that there is only one instance
* running at any time. Any interference with other driver
* methods should be handled by the individual drivers.
*/
static int
ixgbe_netmap_txsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
struct ifnet *ifp = na->ifp;
struct netmap_ring *ring = kring->ring;
u_int nm_i; /* index into the netmap ring */
u_int nic_i; /* index into the NIC ring */
u_int n;
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = kring->rhead;
/*
* interrupts on every tx packet are expensive so request
* them every half ring, or where NS_REPORT is set
*/
u_int report_frequency = kring->nkr_num_slots >> 1;
/* device-specific */
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = &adapter->tx_rings[kring->ring_id];
int reclaim_tx;
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_POSTREAD);
/*
* First part: process new packets to send.
* nm_i is the current index in the netmap ring,
* nic_i is the corresponding index in the NIC ring.
* The two numbers differ because upon a *_init() we reset
* the NIC ring but leave the netmap ring unchanged.
* For the transmit ring, we have
*
* nm_i = kring->nr_hwcur
* nic_i = IXGBE_TDT (not tracked in the driver)
* and
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
*
* In this driver kring->nkr_hwofs >= 0, but for other
* drivers it might be negative as well.
*/
/*
* If we have packets to send (kring->nr_hwcur != kring->rhead)
* iterate over the netmap ring, fetch length and update
* the corresponding slot in the NIC ring. Some drivers also
* need to update the buffer's physical address in the NIC slot
* even NS_BUF_CHANGED is not set (PNMB computes the addresses).
*
* The netmap_reload_map() calls is especially expensive,
* even when (as in this case) the tag is 0, so do only
* when the buffer has actually changed.
*
* If possible do not set the report/intr bit on all slots,
* but only a few times per ring or when NS_REPORT is set.
*
* Finally, on 10G and faster drivers, it might be useful
* to prefetch the next slot and txr entry.
*/
nm_i = kring->nr_hwcur;
if (nm_i != head) { /* we have new packets to send */
nic_i = netmap_idx_k2n(kring, nm_i);
__builtin_prefetch(&ring->slot[nm_i]);
__builtin_prefetch(&txr->tx_buffers[nic_i]);
for (n = 0; nm_i != head; n++) {
struct netmap_slot *slot = &ring->slot[nm_i];
u_int len = slot->len;
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
/* device-specific */
union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i];
struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i];
int flags = (slot->flags & NS_REPORT ||
nic_i == 0 || nic_i == report_frequency) ?
IXGBE_TXD_CMD_RS : 0;
/* prefetch for next round */
__builtin_prefetch(&ring->slot[nm_i + 1]);
__builtin_prefetch(&txr->tx_buffers[nic_i + 1]);
NM_CHECK_ADDR_LEN(na, addr, len);
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(na, txr->txtag, txbuf->map, addr);
}
slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
/* Fill the slot in the NIC ring. */
/* Use legacy descriptor, they are faster? */
curr->read.buffer_addr = htole64(paddr);
curr->read.olinfo_status = 0;
curr->read.cmd_type_len = htole32(len | flags |
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP);
/* make sure changes to the buffer are synced */
bus_dmamap_sync(txr->txtag, txbuf->map,
BUS_DMASYNC_PREWRITE);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
kring->nr_hwcur = head;
/* synchronize the NIC ring */
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* (re)start the tx unit up to slot nic_i (excluded) */
IXGBE_WRITE_REG(&adapter->hw, txr->tail, nic_i);
}
/*
* Second part: reclaim buffers for completed transmissions.
* Because this is expensive (we read a NIC register etc.)
* we only do it in specific cases (see below).
*/
if (flags & NAF_FORCE_RECLAIM) {
reclaim_tx = 1; /* forced reclaim */
} else if (!nm_kr_txempty(kring)) {
reclaim_tx = 0; /* have buffers, no reclaim */
} else {
/*
* No buffers available. Locate previous slot with
* REPORT_STATUS set.
* If the slot has DD set, we can reclaim space,
* otherwise wait for the next interrupt.
* This enables interrupt moderation on the tx
* side though it might reduce throughput.
*/
struct ixgbe_legacy_tx_desc *txd =
(struct ixgbe_legacy_tx_desc *)txr->tx_base;
nic_i = txr->next_to_clean + report_frequency;
if (nic_i > lim)
nic_i -= lim + 1;
// round to the closest with dd set
nic_i = (nic_i < kring->nkr_num_slots / 4 ||
nic_i >= kring->nkr_num_slots*3/4) ?
0 : report_frequency;
reclaim_tx = txd[nic_i].upper.fields.status & IXGBE_TXD_STAT_DD; // XXX cpu_to_le32 ?
}
if (reclaim_tx) {
/*
* Record completed transmissions.
* We (re)use the driver's txr->next_to_clean to keep
* track of the most recently completed transmission.
*
* The datasheet discourages the use of TDH to find
* out the number of sent packets, but we only set
* REPORT_STATUS in a few slots so TDH is the only
* good way.
*/
nic_i = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(kring->ring_id));
if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
D("TDH wrap %d", nic_i);
nic_i -= kring->nkr_num_slots;
}
if (nic_i != txr->next_to_clean) {
/* some tx completed, increment avail */
txr->next_to_clean = nic_i;
kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
}
}
return 0;
}
/*
* Reconcile kernel and user view of the receive ring.
* Same as for the txsync, this routine must be efficient.
* The caller guarantees a single invocations, but races against
* the rest of the driver should be handled here.
*
* On call, kring->rhead is the first packet that userspace wants
* to keep, and kring->rcur is the wakeup point.
* The kernel has previously reported packets up to kring->rtail.
*
* If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
* of whether or not we received an interrupt.
*/
static int
ixgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
struct ifnet *ifp = na->ifp;
struct netmap_ring *ring = kring->ring;
u_int nm_i; /* index into the netmap ring */
u_int nic_i; /* index into the NIC ring */
u_int n;
u_int const lim = kring->nkr_num_slots - 1;
u_int const head = kring->rhead;
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
/* device-specific */
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id];
if (head > lim)
return netmap_ring_reinit(kring);
/* XXX check sync modes */
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* First part: import newly received packets.
*
* nm_i is the index of the next free slot in the netmap ring,
* nic_i is the index of the next received packet in the NIC ring,
* and they may differ in case if_init() has been called while
* in netmap mode. For the receive ring we have
*
* nic_i = rxr->next_to_check;
* nm_i = kring->nr_hwtail (previous)
* and
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
*
* rxr->next_to_check is set to 0 on a ring reinit
*/
if (netmap_no_pendintr || force_update) {
int crclen = (ix_crcstrip) ? 0 : 4;
uint16_t slot_flags = kring->nkr_slot_flags;
nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
nm_i = netmap_idx_n2k(kring, nic_i);
for (n = 0; ; n++) {
union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
uint32_t staterr = le32toh(curr->wb.upper.status_error);
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
break;
ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
ring->slot[nm_i].flags = slot_flags;
bus_dmamap_sync(rxr->ptag,
rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
if (n) { /* update the state variables */
if (netmap_no_pendintr && !force_update) {
/* diagnostics */
ix_rx_miss ++;
ix_rx_miss_bufs += n;
}
rxr->next_to_check = nic_i;
kring->nr_hwtail = nm_i;
}
kring->nr_kflags &= ~NKR_PENDINTR;
}
/*
* Second part: skip past packets that userspace has released.
* (kring->nr_hwcur to kring->rhead excluded),
* and make the buffers available for reception.
* As usual nm_i is the index in the netmap ring,
* nic_i is the index in the NIC ring, and
* nm_i == (nic_i + kring->nkr_hwofs) % ring_size
*/
nm_i = kring->nr_hwcur;
if (nm_i != head) {
nic_i = netmap_idx_k2n(kring, nm_i);
for (n = 0; nm_i != head; n++) {
struct netmap_slot *slot = &ring->slot[nm_i];
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[nic_i];
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
goto ring_reset;
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr);
slot->flags &= ~NS_BUF_CHANGED;
}
curr->wb.upper.status_error = 0;
curr->read.pkt_addr = htole64(paddr);
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
BUS_DMASYNC_PREREAD);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
kring->nr_hwcur = head;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* IMPORTANT: we must leave one free slot in the ring,
* so move nic_i back by one unit
*/
nic_i = nm_prev(nic_i, lim);
IXGBE_WRITE_REG(&adapter->hw, rxr->tail, nic_i);
}
return 0;
ring_reset:
return netmap_ring_reinit(kring);
}
/*
* The attach routine, called near the end of ixgbe_attach(),
* fills the parameters for netmap_attach() and calls it.
* It cannot fail, in the worst case (such as no memory)
* netmap mode will be disabled and the driver will only
* operate in standard mode.
*/
void
ixgbe_netmap_attach(struct adapter *adapter)
{
struct netmap_adapter na;
bzero(&na, sizeof(na));
na.ifp = adapter->ifp;
na.na_flags = NAF_BDG_MAYSLEEP;
na.num_tx_desc = adapter->num_tx_desc;
na.num_rx_desc = adapter->num_rx_desc;
na.nm_txsync = ixgbe_netmap_txsync;
na.nm_rxsync = ixgbe_netmap_rxsync;
na.nm_register = ixgbe_netmap_reg;
na.num_tx_rings = na.num_rx_rings = adapter->num_queues;
netmap_attach(&na);
}
#endif /* DEV_NETMAP */
/* end of file */

View File

@ -1,59 +0,0 @@
/******************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXGBE_NETMAP_H_
#define _IXGBE_NETMAP_H_
#ifdef DEV_NETMAP
#include <net/netmap.h>
#include <sys/selinfo.h>
#include <dev/netmap/netmap_kern.h>
extern int ix_crcstrip;
/*
* ixgbe_netmap.c contains functions for netmap
* support that extend the standard driver. See additional
* comments in ixgbe_netmap.c.
*/
void ixgbe_netmap_attach(struct adapter *adapter);
#else
#define ixgbe_netmap_attach(a)
#define netmap_detach(a)
#endif /* DEV_NETMAP */
#endif /* _IXGBE_NETMAP_H_ */

View File

@ -1491,21 +1491,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_sfp_intel;
break;
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type =
ixgbe_phy_sfp_passive_unknown;
else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type =
ixgbe_phy_sfp_active_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
/* Allow any DA cable vendor */
if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
IXGBE_SFF_DA_ACTIVE_CABLE)) {
IXGBE_SFF_DA_ACTIVE_CABLE)) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_sfp_passive_unknown;
else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type = ixgbe_phy_sfp_active_unknown;
status = IXGBE_SUCCESS;
goto out;
}

View File

@ -41,6 +41,7 @@
#include <sys/nv.h>
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
#include <net/iflib.h>
#include "ixgbe_mbx.h"
#define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
@ -66,15 +67,16 @@
#define IXGBE_32_VM 32
#define IXGBE_64_VM 64
int ixgbe_add_vf(device_t, u16, const nvlist_t *);
int ixgbe_init_iov(device_t, u16, const nvlist_t *);
void ixgbe_uninit_iov(device_t);
int ixgbe_if_iov_vf_add(if_ctx_t, u16, const nvlist_t *);
int ixgbe_if_iov_init(if_ctx_t, u16, const nvlist_t *);
void ixgbe_if_iov_uninit(if_ctx_t);
void ixgbe_initialize_iov(struct adapter *);
void ixgbe_recalculate_max_frame(struct adapter *);
void ixgbe_ping_all_vfs(struct adapter *);
int ixgbe_pci_iov_detach(device_t);
void ixgbe_define_iov_schemas(device_t, int *);
void ixgbe_align_all_queue_indices(struct adapter *);
int ixgbe_vf_que_index(int, int, int);
u32 ixgbe_get_mtqc(int);
u32 ixgbe_get_mrqc(int);
@ -91,12 +93,13 @@ u32 ixgbe_get_mrqc(int);
#define ixgbe_pci_iov_detach(_a) 0
#define ixgbe_define_iov_schemas(_a,_b)
#define ixgbe_align_all_queue_indices(_a)
#define ixgbe_vf_que_index(_a, _b, _c) (_c)
#define ixgbe_get_mtqc(_a) IXGBE_MTQC_64Q_1PB
#define ixgbe_get_mrqc(_a) 0
#endif /* PCI_IOV */
void ixgbe_handle_mbx(void *, int);
int ixgbe_vf_que_index(int, int, int);
void ixgbe_if_init(if_ctx_t ctx);
void ixgbe_handle_mbx(void *);
#endif

View File

@ -1563,7 +1563,7 @@ struct ixgbe_dmac_config {
#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */
#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */
#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */
#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */
#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */

View File

@ -3,10 +3,9 @@
.PATH: ${SRCTOP}/sys/dev/ixgbe
KMOD = if_ix
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += if_ix.c if_bypass.c if_fdir.c if_sriov.c ix_txrx.c ixgbe_osdep.c
SRCS += ixgbe_netmap.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c

View File

@ -3,9 +3,9 @@
.PATH: ${SRCTOP}/sys/dev/ixgbe
KMOD = if_ixv
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += if_ixv.c if_fdir.c if_sriov.c ix_txrx.c ixgbe_osdep.c ixgbe_netmap.c
SRCS += if_ixv.c if_fdir.c ix_txrx.c ixgbe_osdep.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c