ixl: Update to 1.4.24-k.

Changes by author:

Eric Joyner		ixl: Fix compile error when IXL_DEBUG is defined.
Eric Joyner		ixl: Fix taskqueues created in init() not being freed in stop().
Eric Joyner		ixl: Add additional debug sysctls, for Tx and Rx queue stats.
Eric Joyner		ixl: Enable dynamic itr by default.
Eric Joyner		ixl: Edit spacing, comments,  function signatures (to conform to style(9)).
Eric Joyner		ixl: Check for errors when tearing down msix interrupts.
Eric Joyner		ixl: Remove unnecessary register reads/writes.
Eric Joyner		ixl: Remove admin queue interrupt enable from general interrupt enable.
Eric Joyner		ixl: Update switch config after teardown/reset flow in init().
Eric Joyner		ixl: Add additional admin queue error code output to admin queue call errors.
Eric Joyner		ixl: Don't destroy i40e spinlock if it's already uninitialized.
Shannon Nelson		i40e-shared: clean event descriptor before use
Anjali Singhai Jain	i40e-shared: When in promisc mode apply promisc mode to Tx Traffic as well
Kevin Scott		i40e_shared: Increase timeout when checking GLGEN_RSTAT_DEVSTATE bit
Eric Joyner		ixlv: Fix IXL_DEBUG compile issue.
Eric Joyner		ixlv: Attempt to fix panic/other issues when rapidly unloading/loading driver.
Eric Joyner		ixl/ixlv: Revert m_collapse() in ixl_xmit() to m_defrag().
Deepthi Kavalur		i40e_shared: Trace logging HW capabilities
Eric Joyner		ixlv: Correctly unlock/relock around init() call in vc_completion().
Eric Joyner		ixl: Stop preventing changing flow control mode for CR4 media.
Eric Joyner		ixl: Set IPv6 TCP offload flag when doing TSO.

Differential Revision:  https://reviews.freebsd.org/D6211
Reviewed by:    sbruno, kmacy, jeffrey.e.pieper@intel.com
MFC after:      2 weeks
Sponsored by:   Intel Corporation
This commit is contained in:
Eric Joyner 2016-05-12 18:21:52 +00:00
parent 6c42605965
commit 6d011ad5f6
10 changed files with 337 additions and 114 deletions

View File

@ -993,6 +993,9 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
u16 flags;
u16 ntu;
/* pre-clean the event info */
i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
/* take the lock before we start messing with the ring */
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
@ -1065,13 +1068,6 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
i40e_release_spinlock(&hw->aq.arq_spinlock);
if (i40e_is_nvm_update_op(&e->desc)) {
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
@ -1092,6 +1088,13 @@ clean_arq_element_err:
}
}
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
i40e_release_spinlock(&hw->aq.arq_spinlock);
return ret_code;
}

View File

@ -1087,6 +1087,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;

View File

@ -1103,7 +1103,11 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 10; cnt++) {
/* It can take upto 15 secs for GRST steady state */
grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@ -2012,12 +2016,19 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
if (set)
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
(hw->aq.api_maj_ver > 1))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
(hw->aq.api_maj_ver > 1))
cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
cmd->seid = CPU_TO_LE16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@ -3367,41 +3378,73 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
switch (id) {
case I40E_AQ_CAP_ID_SWITCH_MODE:
p->switch_mode = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Switch mode = %d\n",
p->switch_mode);
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
break;
case I40E_AQ_CAP_ID_NPAR_ACTIVE:
p->npar_enable = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: NPAR enable = %d\n",
p->npar_enable);
break;
case I40E_AQ_CAP_ID_OS2BMC_CAP:
p->os2bmc = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: OS2BMC = %d\n", p->os2bmc);
break;
case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
p->valid_functions = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Valid Functions = %d\n",
p->valid_functions);
break;
case I40E_AQ_CAP_ID_SRIOV:
if (number == 1)
p->sr_iov_1_1 = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: SR-IOV = %d\n",
p->sr_iov_1_1);
break;
case I40E_AQ_CAP_ID_VF:
p->num_vfs = number;
p->vf_base_id = logical_id;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: VF count = %d\n",
p->num_vfs);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: VF base_id = %d\n",
p->vf_base_id);
break;
case I40E_AQ_CAP_ID_VMDQ:
if (number == 1)
p->vmdq = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: VMDQ = %d\n", p->vmdq);
break;
case I40E_AQ_CAP_ID_8021QBG:
if (number == 1)
p->evb_802_1_qbg = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: 802.1Qbg = %d\n", number);
break;
case I40E_AQ_CAP_ID_8021QBR:
if (number == 1)
p->evb_802_1_qbh = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: 802.1Qbh = %d\n", number);
break;
case I40E_AQ_CAP_ID_VSI:
p->num_vsis = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: VSI count = %d\n",
p->num_vsis);
break;
case I40E_AQ_CAP_ID_DCB:
if (number == 1) {
@ -3409,33 +3452,68 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->enabled_tcmap = logical_id;
p->maxtc = phys_id;
}
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: DCB = %d\n", p->dcb);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: TC Mapping = %d\n",
logical_id);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: TC Max = %d\n", p->maxtc);
break;
case I40E_AQ_CAP_ID_FCOE:
if (number == 1)
p->fcoe = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: FCOE = %d\n", p->fcoe);
break;
case I40E_AQ_CAP_ID_ISCSI:
if (number == 1)
p->iscsi = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: iSCSI = %d\n", p->iscsi);
break;
case I40E_AQ_CAP_ID_RSS:
p->rss = TRUE;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: RSS = %d\n", p->rss);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: RSS table size = %d\n",
p->rss_table_size);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: RSS table width = %d\n",
p->rss_table_entry_width);
break;
case I40E_AQ_CAP_ID_RXQ:
p->num_rx_qp = number;
p->base_queue = phys_id;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Rx QP = %d\n", number);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: base_queue = %d\n",
p->base_queue);
break;
case I40E_AQ_CAP_ID_TXQ:
p->num_tx_qp = number;
p->base_queue = phys_id;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Tx QP = %d\n", number);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: base_queue = %d\n",
p->base_queue);
break;
case I40E_AQ_CAP_ID_MSIX:
p->num_msix_vectors = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MSIX vector count = %d\n",
p->num_msix_vectors_vf);
break;
case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MSIX VF vector count = %d\n",
p->num_msix_vectors_vf);
break;
case I40E_AQ_CAP_ID_FLEX10:
if (major_rev == 1) {
@ -3452,41 +3530,72 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
}
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Flex10 mode = %d\n",
p->flex10_mode);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Flex10 status = %d\n",
p->flex10_status);
break;
case I40E_AQ_CAP_ID_CEM:
if (number == 1)
p->mgmt_cem = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: CEM = %d\n", p->mgmt_cem);
break;
case I40E_AQ_CAP_ID_IWARP:
if (number == 1)
p->iwarp = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: iWARP = %d\n", p->iwarp);
break;
case I40E_AQ_CAP_ID_LED:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->led[phys_id] = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: LED - PIN %d\n", phys_id);
break;
case I40E_AQ_CAP_ID_SDP:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->sdp[phys_id] = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: SDP - PIN %d\n", phys_id);
break;
case I40E_AQ_CAP_ID_MDIO:
if (number == 1) {
p->mdio_port_num = phys_id;
p->mdio_port_mode = logical_id;
}
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MDIO port number = %d\n",
p->mdio_port_num);
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MDIO port mode = %d\n",
p->mdio_port_mode);
break;
case I40E_AQ_CAP_ID_1588:
if (number == 1)
p->ieee_1588 = TRUE;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: IEEE 1588 = %d\n",
p->ieee_1588);
break;
case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
p->fd = TRUE;
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Flow Director = 1\n");
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Guaranteed FD filters = %d\n",
p->fd_filters_guaranteed);
break;
case I40E_AQ_CAP_ID_WSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: wr_csr_prot = 0x%llX\n\n",
(p->wr_csr_prot & 0xffff));
break;
default:
break;

View File

@ -155,7 +155,8 @@ i40e_release_spinlock(struct i40e_spinlock *lock)
void
i40e_destroy_spinlock(struct i40e_spinlock *lock)
{
mtx_destroy(&lock->mutex);
if (mtx_initialized(&lock->mutex))
mtx_destroy(&lock->mutex);
}
/*

View File

@ -48,7 +48,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixl_driver_version[] = "1.4.20-k";
char ixl_driver_version[] = "1.4.24-k";
/*********************************************************************
* PCI Device ID Table
@ -211,6 +211,10 @@ static int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
static void ixl_handle_empr_reset(struct ixl_pf *);
static int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
/* Debug helper functions */
#ifdef IXL_DEBUG
static void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
#endif
#ifdef PCI_IOV
static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
@ -303,12 +307,12 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
** - true/false for dynamic adjustment
** - default values for static ITR
*/
int ixl_dynamic_rx_itr = 0;
int ixl_dynamic_rx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
int ixl_dynamic_tx_itr = 0;
int ixl_dynamic_tx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
@ -1103,7 +1107,7 @@ ixl_init_locked(struct ixl_pf *pf)
int ret;
mtx_assert(&pf->pf_mtx, MA_OWNED);
INIT_DEBUGOUT("ixl_init: begin");
INIT_DEBUGOUT("ixl_init_locked: begin");
ixl_stop_locked(pf);
@ -1261,6 +1265,7 @@ ixl_reset(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
u8 set_fc_err_mask;
int error = 0;
// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
@ -1274,7 +1279,8 @@ ixl_reset(struct ixl_pf *pf)
error = i40e_init_adminq(hw);
if (error) {
device_printf(dev, "init: Admin queue init failure; status code %d", error);
device_printf(dev, "init: Admin queue init failure;"
" status code %d", error);
error = EIO;
goto err_out;
}
@ -1283,26 +1289,35 @@ ixl_reset(struct ixl_pf *pf)
error = ixl_get_hw_capabilities(pf);
if (error) {
device_printf(dev, "init: Error retrieving HW capabilities; status code %d\n", error);
device_printf(dev, "init: Error retrieving HW capabilities;"
" status code %d\n", error);
goto err_out;
}
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
device_printf(dev, "init: LAN HMC init failed; status code %d\n", error);
device_printf(dev, "init: LAN HMC init failed; status code %d\n",
error);
error = EIO;
goto err_out;
}
error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (error) {
device_printf(dev, "init: LAN HMC config failed; status code %d\n", error);
device_printf(dev, "init: LAN HMC config failed; status code %d\n",
error);
error = EIO;
goto err_out;
}
// XXX: need to do switch config here?
// XXX: possible fix for panic, but our failure recovery is still broken
error = ixl_switch_config(pf);
if (error) {
device_printf(dev, "init: ixl_switch_config() failed: %d\n",
error);
goto err_out;
}
error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
NULL);
@ -1313,7 +1328,6 @@ ixl_reset(struct ixl_pf *pf)
goto err_out;
}
u8 set_fc_err_mask;
error = i40e_set_fc(hw, &set_fc_err_mask, true);
if (error) {
device_printf(dev, "init: setting link flow control failed; retcode %d,"
@ -1344,6 +1358,7 @@ static void
ixl_init(void *arg)
{
struct ixl_pf *pf = arg;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
int error = 0;
@ -1366,11 +1381,15 @@ ixl_init(void *arg)
* so this is done outside of init_locked().
*/
if (pf->msix > 1) {
error = ixl_setup_queue_msix(&pf->vsi);
/* Teardown existing interrupts, if they exist */
ixl_teardown_queue_msix(vsi);
ixl_free_queue_tqs(vsi);
/* Then set them up again */
error = ixl_setup_queue_msix(vsi);
if (error)
device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
error);
error = ixl_setup_queue_tqs(&pf->vsi);
error = ixl_setup_queue_tqs(vsi);
if (error)
device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
error);
@ -1385,7 +1404,6 @@ ixl_init(void *arg)
IXL_PF_LOCK(pf);
ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
return;
}
/*
@ -2032,7 +2050,7 @@ ixl_local_timer(void *arg)
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
for (int i = 0; i < vsi->num_queues; i++,que++) {
for (int i = 0; i < vsi->num_queues; i++, que++) {
/* Any queues with outstanding work get a sw irq */
if (que->busy)
wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
@ -2129,6 +2147,7 @@ ixl_stop(struct ixl_pf *pf)
IXL_PF_UNLOCK(pf);
ixl_teardown_queue_msix(&pf->vsi);
ixl_free_queue_tqs(&pf->vsi);
}
/*********************************************************************
@ -2267,18 +2286,22 @@ ixl_setup_queue_tqs(struct ixl_vsi *vsi)
static void
ixl_free_adminq_tq(struct ixl_pf *pf)
{
if (pf->tq)
if (pf->tq) {
taskqueue_free(pf->tq);
pf->tq = NULL;
}
}
static void
ixl_free_queue_tqs(struct ixl_vsi *vsi)
{
struct ixl_queue *que = vsi->queues;
struct ixl_queue *que = vsi->queues;
for (int i = 0; i < vsi->num_queues; i++, que++) {
if (que->tq)
if (que->tq) {
taskqueue_free(que->tq);
que->tq = NULL;
}
}
}
@ -2359,7 +2382,7 @@ ixl_setup_queue_msix(struct ixl_vsi *vsi)
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
return (error);
}
error = bus_describe_intr(dev, que->res, que->tag, "que%d", i);
error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
if (error) {
device_printf(dev, "bus_describe_intr() for Queue %d"
" interrupt name failed, error %d\n",
@ -2627,12 +2650,11 @@ ixl_configure_legacy(struct ixl_pf *pf)
| (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
| (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(0), reg);
}
/*
* Set the Initial ITR state
* Get initial ITR values from tunable values.
*/
static void
ixl_configure_itr(struct ixl_pf *pf)
@ -2642,11 +2664,7 @@ ixl_configure_itr(struct ixl_pf *pf)
struct ixl_queue *que = vsi->queues;
vsi->rx_itr_setting = ixl_rx_itr;
if (ixl_dynamic_rx_itr)
vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
vsi->tx_itr_setting = ixl_tx_itr;
if (ixl_dynamic_tx_itr)
vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
@ -2656,6 +2674,7 @@ ixl_configure_itr(struct ixl_pf *pf)
vsi->rx_itr_setting);
rxr->itr = vsi->rx_itr_setting;
rxr->latency = IXL_AVE_LATENCY;
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
vsi->tx_itr_setting);
txr->itr = vsi->tx_itr_setting;
@ -2732,23 +2751,33 @@ ixl_teardown_queue_msix(struct ixl_vsi *vsi)
{
struct ixl_queue *que = vsi->queues;
device_t dev = vsi->dev;
int rid;
int rid, error = 0;
/* We may get here before stations are setup */
if ((!ixl_enable_msix) || (que == NULL))
return (0);
/* Release all MSIX queue resources */
// TODO: Check for errors from bus_teardown_intr
// TODO: Check for errors from bus_release_resource
for (int i = 0; i < vsi->num_queues; i++, que++) {
rid = que->msix + 1;
if (que->tag != NULL) {
bus_teardown_intr(dev, que->res, que->tag);
error = bus_teardown_intr(dev, que->res, que->tag);
if (error) {
device_printf(dev, "bus_teardown_intr() for"
" Queue %d interrupt failed\n",
que->me);
// return (ENXIO);
}
que->tag = NULL;
}
if (que->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
if (error) {
device_printf(dev, "bus_release_resource() for"
" Queue %d interrupt failed [rid=%d]\n",
que->me, rid);
// return (ENXIO);
}
que->res = NULL;
}
}
@ -3018,8 +3047,8 @@ ixl_switch_config(struct ixl_pf *pf)
ret = i40e_aq_get_switch_config(hw, sw_config,
sizeof(aq_buf), &next, NULL);
if (ret) {
device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
ret);
device_printf(dev, "aq_get_switch_config() failed, error %d,"
" aq_error %d\n", ret, pf->hw.aq.asq_last_status);
return (ret);
}
#ifdef IXL_DEBUG
@ -3066,7 +3095,8 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
ctxt.pf_num = hw->pf_id;
err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
if (err) {
device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d\n", err);
device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
" aq_error %d\n", err, hw->aq.asq_last_status);
return (err);
}
#ifdef IXL_DEBUG
@ -3202,7 +3232,6 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
device_printf(dev, "Fail in init_rx_ring %d\n", i);
break;
}
wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
#ifdef DEV_NETMAP
/* preserve queue */
if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
@ -3419,7 +3448,6 @@ ixl_set_queue_rx_itr(struct ixl_queue *que)
u16 rx_latency = 0;
int rx_bytes;
/* Idle, do nothing */
if (rxr->bytes == 0)
return;
@ -3571,6 +3599,52 @@ ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
}
#ifdef IXL_DEBUG
/**
* ixl_sysctl_qtx_tail_handler
* Retrieves I40E_QTX_TAIL value from hardware
* for a sysctl.
*/
static int
ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
int error;
u32 val;
que = ((struct ixl_queue *)oidp->oid_arg1);
if (!que) return 0;
val = rd32(que->vsi->hw, que->txr.tail);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
return (0);
}
/**
* ixl_sysctl_qrx_tail_handler
* Retrieves I40E_QRX_TAIL value from hardware
* for a sysctl.
*/
static int
ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
{
struct ixl_queue *que;
int error;
u32 val;
que = ((struct ixl_queue *)oidp->oid_arg1);
if (!que) return 0;
val = rd32(que->vsi->hw, que->rxr.tail);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
return (0);
}
#endif
static void
ixl_add_hw_stats(struct ixl_pf *pf)
{
@ -3615,9 +3689,6 @@ ixl_add_hw_stats(struct ixl_pf *pf)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
"m_defrag() failed");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
CTLFLAG_RD, &(queues[q].dropped_pkts),
"Driver dropped packets");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(queues[q].irqs),
"irqs on this queue");
@ -3642,6 +3713,45 @@ ixl_add_hw_stats(struct ixl_pf *pf)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
CTLFLAG_RD, &(rxr->desc_errs),
"Queue Rx Descriptor Errors");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
CTLFLAG_RD, &(rxr->itr), 0,
"Queue Rx ITR Interval");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
// Not actual latency; just a calculated value to put in a register
// TODO: Put in better descriptions here
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_latency",
CTLFLAG_RD, &(rxr->latency), 0,
"Queue Rx ITRL Average Interval");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_latency",
CTLFLAG_RD, &(txr->latency), 0,
"Queue Tx ITRL Average Interval");
#ifdef IXL_DEBUG
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
CTLFLAG_RD, &(rxr->not_done),
"Queue Rx Descriptors not Done");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
CTLFLAG_RD, &(rxr->next_refresh), 0,
"Queue Rx Descriptors not Done");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
CTLFLAG_RD, &(rxr->next_check), 0,
"Queue Rx Descriptors not Done");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixl_sysctl_qtx_tail_handler, "IU",
"Queue Transmit Descriptor Tail");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
sizeof(struct ixl_queue),
ixl_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
#endif
}
/* MAC stats */
@ -3747,7 +3857,8 @@ ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
** ixl_config_rss - setup RSS
** - note this is done for the single vsi
*/
static void ixl_config_rss(struct ixl_vsi *vsi)
static void
ixl_config_rss(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
@ -4302,7 +4413,8 @@ ixl_disable_rings(struct ixl_vsi *vsi)
* Called from interrupt handler to identify possibly malicious vfs
* (But also detects events from the PF, as well)
**/
static void ixl_handle_mdd_event(struct ixl_pf *pf)
static void
ixl_handle_mdd_event(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
@ -4375,7 +4487,6 @@ ixl_enable_intr(struct ixl_vsi *vsi)
struct ixl_queue *que = vsi->queues;
if (ixl_enable_msix) {
ixl_enable_adminq(hw);
for (int i = 0; i < vsi->num_queues; i++, que++)
ixl_enable_queue(hw, que->me);
} else
@ -4678,7 +4789,6 @@ ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
}
ixl_configure_intr0_msix(pf);
ixl_enable_adminq(hw);
/* setup hmc */
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
@ -4798,7 +4908,8 @@ ixl_do_adminq(void *context, int pending)
/**
* Update VSI-specific ethernet statistics counters.
**/
void ixl_update_eth_stats(struct ixl_vsi *vsi)
void
ixl_update_eth_stats(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = &pf->hw;
@ -4996,10 +5107,11 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
#if 0
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rx_itr", CTLFLAG_RW,
&ixl_rx_itr, IXL_ITR_8K, "RX ITR");
&ixl_rx_itr, 0, "RX ITR");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
@ -5009,12 +5121,13 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "tx_itr", CTLFLAG_RW,
&ixl_tx_itr, IXL_ITR_4K, "TX ITR");
&ixl_tx_itr, 0, "TX ITR");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
#endif
#ifdef IXL_DEBUG_SYSCTL
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
@ -5095,17 +5208,6 @@ ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
/*
** Changing flow control mode currently does not work on
** 40GBASE-CR4 PHYs
*/
if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
|| hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
device_printf(dev, "Changing flow control mode unsupported"
" on 40GBase-CR4 media.\n");
return (ENODEV);
}
/* Set fc ability for port */
hw->fc.requested_mode = requested_fc;
aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
@ -5365,7 +5467,6 @@ ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
break;
}
device_printf(dev,"PCI Express Bus: Speed %s %s\n",
((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
(hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
@ -5402,7 +5503,8 @@ ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
return 0;
}
inline void
#ifdef IXL_DEBUG
static void
ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
{
if ((nvma->command == I40E_NVM_READ) &&
@ -5434,6 +5536,7 @@ ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
}
}
#endif
static int
ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
@ -5691,18 +5794,10 @@ ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
sbuf_cat(buf, "\n");
sbuf_printf(buf, "# of entries: %d\n", num_entries);
sbuf_printf(buf,
#if 0
"Type | Guaranteed | Total | Used | Un-allocated\n"
" | (this) | (all) | (this) | (all) \n");
#endif
" Type | Guaranteed | Total | Used | Un-allocated\n"
" | (this) | (all) | (this) | (all) \n");
for (int i = 0; i < num_entries; i++) {
sbuf_printf(buf,
#if 0
"%#4x | %10d %5d %6d %12d",
resp[i].resource_type,
#endif
"%25s | %10d %5d %6d %12d",
ixl_switch_res_type_string(resp[i].resource_type),
resp[i].guaranteed,

View File

@ -48,7 +48,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixlv_driver_version[] = "1.2.7-k";
char ixlv_driver_version[] = "1.2.10-k";
/*********************************************************************
* PCI Device ID Table
@ -670,7 +670,7 @@ ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
error = EINVAL;
IOCTL_DBG_IF(ifp, "mtu too large");
} else {
IOCTL_DBG_IF2(ifp, "mtu: %u -> %d", ifp->if_mtu, ifr->ifr_mtu);
IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
// ERJ: Interestingly enough, these types don't match
ifp->if_mtu = (u_long)ifr->ifr_mtu;
vsi->max_frame_size =
@ -941,12 +941,12 @@ ixlv_init(void *arg)
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
&& ++retries < 100) {
i40e_msec_delay(10);
&& ++retries < IXLV_AQ_MAX_ERR) {
i40e_msec_delay(25);
}
if (retries >= IXLV_AQ_MAX_ERR)
if_printf(vsi->ifp,
"Init failed to complete in alloted time!\n");
"Init failed to complete in allotted time!\n");
}
/*
@ -2598,6 +2598,7 @@ ixlv_config_rss(struct ixlv_sc *sc)
wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
// TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
/* Populate the LUT with max no. of queues in round robin fashion */
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
if (j == vsi->num_queues)

View File

@ -399,8 +399,8 @@ struct tx_ring {
u16 next_to_clean;
u16 atr_rate;
u16 atr_count;
u16 itr;
u16 latency;
u32 itr;
u32 latency;
struct ixl_tx_buf *buffers;
volatile u16 avail;
u32 cmd;
@ -432,10 +432,10 @@ struct rx_ring {
bool lro_enabled;
bool hdr_split;
bool discard;
u16 next_refresh;
u16 next_check;
u16 itr;
u16 latency;
u32 next_refresh;
u32 next_check;
u32 itr;
u32 latency;
char mtx_name[16];
struct ixl_rx_buf *buffers;
u32 mbuf_sz;
@ -451,7 +451,7 @@ struct rx_ring {
u64 split;
u64 rx_packets;
u64 rx_bytes;
u64 discarded;
u64 desc_errs;
u64 not_done;
};
@ -504,8 +504,8 @@ struct ixl_vsi {
u16 msix_base; /* station base MSIX vector */
u16 first_queue;
u16 num_queues;
u16 rx_itr_setting;
u16 tx_itr_setting;
u32 rx_itr_setting;
u32 tx_itr_setting;
struct ixl_queue *queues; /* head of queues */
bool link_active;
u16 seid;

View File

@ -247,7 +247,6 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
bus_dma_tag_t tag;
bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
cmd = off = 0;
m_head = *m_headp;
@ -286,7 +285,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
if (error == EFBIG) {
struct mbuf *m;
m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
m = m_defrag(*m_headp, M_NOWAIT);
if (m == NULL) {
que->mbuf_defrag_failed++;
m_freem(*m_headp);
@ -390,7 +389,6 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
++txr->total_packets;
wr32(hw, txr->tail, i);
ixl_flush(hw);
/* Mark outstanding work */
if (que->busy == 0)
que->busy = 1;
@ -631,7 +629,6 @@ ixl_tx_setup_offload(struct ixl_queue *que,
u8 ipproto = 0;
bool tso = FALSE;
/* Set up the TSO context descriptor if required */
if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
tso = ixl_tso_setup(que, mp);
@ -769,6 +766,12 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
tcp_hlen = th->th_off << 2;
/*
* The corresponding flag is set by the stack in the IPv4
* TSO case, but not in IPv6 (at least in FreeBSD 10.2).
* So, set it here because the rest of the flow requires it.
*/
mp->m_pkthdr.csum_flags |= CSUM_TCP_IPV6;
break;
#endif
#ifdef INET
@ -1579,7 +1582,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
** error results.
*/
if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
rxr->discarded++;
rxr->desc_errs++;
ixl_rx_discard(rxr, i);
goto next_desc;
}

View File

@ -38,7 +38,7 @@
#include "ixlv_vc_mgr.h"
#define IXLV_AQ_MAX_ERR 100
#define IXLV_AQ_MAX_ERR 200
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)

View File

@ -70,7 +70,8 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
break;
case I40E_VIRTCHNL_OP_RESET_VF:
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
valid_len = 0;
// TODO: valid length in api v1.0 is 0, v1.1 is 4
valid_len = 4;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
@ -224,29 +225,34 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
goto out;
}
do {
for (;;) {
if (++retries > IXLV_AQ_MAX_ERR)
goto out_alloc;
/* NOTE: initial delay is necessary */
/* Initial delay here is necessary */
i40e_msec_delay(100);
err = i40e_clean_arq_element(hw, &event, NULL);
} while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
if (err)
goto out_alloc;
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
continue;
else if (err) {
err = EIO;
goto out_alloc;
}
err = (i40e_status)le32toh(event.desc.cookie_low);
if (err) {
err = EIO;
goto out_alloc;
}
if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_VERSION) {
DDPRINTF(dev, "Received unexpected op response: %d\n",
le32toh(event.desc.cookie_high));
/* Don't stop looking for expected response */
continue;
}
if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_VERSION) {
DDPRINTF(dev, "Received unexpected op response: %d\n",
le32toh(event.desc.cookie_high));
err = EIO;
goto out_alloc;
err = (i40e_status)le32toh(event.desc.cookie_low);
if (err) {
err = EIO;
goto out_alloc;
} else
break;
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
@ -266,7 +272,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
out_alloc:
free(event.msg_buf, M_DEVBUF);
out:
return err;
return (err);
}
/*
@ -871,7 +877,9 @@ ixlv_vc_completion(struct ixlv_sc *sc,
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
sc->init_state = IXLV_RESET_PENDING;
ixlv_init(sc);
mtx_unlock(&sc->mtx);
ixlv_init(vsi);
mtx_lock(&sc->mtx);
break;
default:
device_printf(dev, "%s: Unknown event %d from AQ\n",
@ -954,9 +962,11 @@ ixlv_vc_completion(struct ixlv_sc *sc,
v_retval);
break;
default:
#ifdef IXL_DEBUG
device_printf(dev,
"%s: Received unexpected message %d from PF.\n",
__func__, v_opcode);
#endif
break;
}
return;