This delta introduces SRIOV support, thanks to Ryan Stone of Sandvine for

adding this major feature to the driver. Secondly, this updates the base
driver with new 20G device support, and with the new firmware levels some
changes to link handling and initialization were required.

MFC after: 1 week
This commit is contained in:
jfv 2015-03-10 19:17:40 +00:00
parent ea04b2d2c9
commit 1d8f4435d6
11 changed files with 2001 additions and 473 deletions

View File

@ -42,7 +42,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0004
#define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc {
__le16 flags;
@ -140,7 +140,12 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
i40e_aqc_opc_set_cppm_configuration = 0x0103,
i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@ -265,6 +270,7 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
i40e_aqc_opc_tunnel_key_structure = 0x0B10,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@ -276,6 +282,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
i40e_aqc_opc_debug_get_deviceid = 0xFF00,
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@ -509,8 +517,7 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
#define I40E_AQC_ADDR_VALID_MASK 0x1F0
#define I40E_AQC_ADDR_VALID_MASK 0xf0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@ -533,9 +540,7 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
#define I40E_AQC_WRITE_TYPE_MASK 0xC000
#define I40E_AQC_WRITE_TYPE_MASK 0xc000
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@ -1071,7 +1076,6 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@ -2066,12 +2070,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA
#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;

View File

@ -866,7 +866,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
return media;
}
#define I40E_PF_RESET_WAIT_COUNT 110
#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@ -1108,11 +1108,9 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
/* ignore gpio LED src mode entries related to the activity
* LEDs
*/
current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
>> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
/* ignore gpio LED src mode entries related to the activity LEDs */
current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
switch (current_mode) {
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
@ -1156,11 +1154,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
/* ignore gpio LED src mode entries related to the activity
* LEDs
*/
current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
>> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
/* ignore gpio LED src mode entries related to the activity LEDs */
current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
switch (current_mode) {
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
@ -1529,6 +1525,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_set_phy_int_mask
* @hw: pointer to the hw struct
@ -2816,13 +2813,12 @@ enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
#define I40E_DEV_FUNC_CAP_CEM 0xF2
#define I40E_DEV_FUNC_CAP_IWARP 0x51
#define I40E_DEV_FUNC_CAP_LED 0x61
#define I40E_DEV_FUNC_CAP_SDP 0x62
#define I40E_DEV_FUNC_CAP_MDIO 0x63
#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
/**
* i40e_parse_discover_capabilities
@ -2840,7 +2836,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
struct i40e_aqc_list_capabilities_element_resp *cap;
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
u8 major_rev;
struct i40e_hw_capabilities *p;
u32 i = 0;
u16 id;
@ -2859,7 +2854,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
number = LE32_TO_CPU(cap->number);
logical_id = LE32_TO_CPU(cap->logical_id);
phys_id = LE32_TO_CPU(cap->phys_id);
major_rev = cap->major_rev;
switch (id) {
case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@ -2934,21 +2928,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
case I40E_DEV_FUNC_CAP_MSIX_VF:
p->num_msix_vectors_vf = number;
break;
case I40E_DEV_FUNC_CAP_FLEX10:
if (major_rev == 1) {
if (number == 1) {
p->flex10_enable = TRUE;
p->flex10_capable = TRUE;
}
} else {
/* Capability revision >= 2 */
if (number & 1)
p->flex10_enable = TRUE;
if (number & 2)
p->flex10_capable = TRUE;
}
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
case I40E_DEV_FUNC_CAP_MFP_MODE_1:
if (number == 1)
p->mfp_mode_1 = TRUE;
break;
case I40E_DEV_FUNC_CAP_CEM:
if (number == 1)
@ -2981,18 +2963,11 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
default:
break;
}
}
if (p->fcoe)
i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
/* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
p->fcoe = FALSE;
@ -4947,63 +4922,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
}
}
/**
* i40e_aq_debug_dump
* @hw: pointer to the hardware structure
* @cluster_id: specific cluster to dump
* @table_id: table id within cluster
* @start_index: index of line in the block to read
* @buff_size: dump buffer size
* @buff: dump buffer
* @ret_buff_size: actual buffer size returned
* @ret_next_table: next block to read
* @ret_next_index: next index to read
*
* Dump internal FW/HW data for debug purposes.
*
**/
enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
u8 table_id, u32 start_index, u16 buff_size,
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_dump_internals *cmd =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
struct i40e_aqc_debug_dump_internals *resp =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
enum i40e_status_code status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_debug_dump_internals);
/* Indirect Command */
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
if (buff_size > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
cmd->cluster_id = cluster_id;
cmd->table_id = table_id;
cmd->idx = CPU_TO_LE32(start_index);
desc.datalen = CPU_TO_LE16(buff_size);
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
if (ret_buff_size != NULL)
*ret_buff_size = LE16_TO_CPU(desc.datalen);
if (ret_next_table != NULL)
*ret_next_table = resp->table_id;
if (ret_next_index != NULL)
*ret_next_index = LE32_TO_CPU(resp->idx);
}
return status;
}
/**
* i40e_read_bw_from_alt_ram
* @hw: pointer to the hardware structure

View File

@ -445,9 +445,4 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
u8 table_id, u32 start_index, u16 buff_size,
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */

View File

@ -287,17 +287,7 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
bool flex10_enable;
bool flex10_capable;
u32 flex10_mode;
#define I40E_FLEX10_MODE_UNKNOWN 0x0
#define I40E_FLEX10_MODE_DCC 0x1
#define I40E_FLEX10_MODE_DCI 0x2
u32 flex10_status;
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
bool mfp_mode_1;
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@ -326,7 +316,6 @@ struct i40e_hw_capabilities {
u8 rx_buf_chain_len;
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
};
struct i40e_mac_info {
@ -573,7 +562,7 @@ struct i40e_hw {
u32 debug_mask;
};
static INLINE bool i40e_is_vf(struct i40e_hw *hw)
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
return hw->mac.type == I40E_MAC_VF;
}
@ -1274,9 +1263,6 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
u64 fd_atr_tunnel_match;
u32 fd_atr_status;
u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;

File diff suppressed because it is too large Load Diff

View File

@ -48,7 +48,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixlv_driver_version[] = "1.2.1";
char ixlv_driver_version[] = "1.2.4";
/*********************************************************************
* PCI Device ID Table
@ -398,7 +398,7 @@ ixlv_attach(device_t dev)
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
vsi->link_up = TRUE;
sc->link_up = TRUE;
/* This allocates the memory and early settings */
if (ixlv_setup_queues(sc) != 0) {
@ -480,7 +480,7 @@ ixlv_detach(device_t dev)
/* Make sure VLANS are not using driver */
if (vsi->ifp->if_vlantrunk != NULL) {
device_printf(dev, "Vlan in use, detach first\n");
if_printf(vsi->ifp, "Vlan in use, detach first\n");
INIT_DBG_DEV(dev, "end");
return (EBUSY);
}
@ -893,7 +893,7 @@ ixlv_init_locked(struct ixlv_sc *sc)
ixl_init_tx_ring(que);
if (vsi->max_frame_size <= 2048)
if (vsi->max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
@ -1383,7 +1383,7 @@ ixlv_assign_msix(struct ixlv_sc *sc)
struct tx_ring *txr;
int error, rid, vector = 1;
#ifdef RSS
cpuset_t cpu_mask;
cpuset_t cpu_mask;
#endif
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
@ -1413,7 +1413,7 @@ ixlv_assign_msix(struct ixlv_sc *sc)
#endif
bus_bind_intr(dev, que->res, cpu_id);
que->msix = vector;
vsi->que_mask |= (u64)(1 << que->msix);
vsi->que_mask |= (u64)(1 << que->msix);
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixlv_handle_que, que);
que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
@ -1718,12 +1718,12 @@ ixlv_setup_queues(struct ixlv_sc *sc)
static void
ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ixl_vsi *vsi = arg;
struct ixlv_sc *sc = vsi->back;
struct ixlv_vlan_filter *v;
if (ifp->if_softc != arg) /* Not our event */
if (ifp->if_softc != arg) /* Not our event */
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
@ -1755,12 +1755,12 @@ ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
static void
ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
struct ixl_vsi *vsi = ifp->if_softc;
struct ixl_vsi *vsi = arg;
struct ixlv_sc *sc = vsi->back;
struct ixlv_vlan_filter *v;
int i = 0;
int i = 0;
if (ifp->if_softc != arg)
if (ifp->if_softc != arg)
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
@ -2154,7 +2154,7 @@ ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (!vsi->link_up) {
if (!sc->link_up) {
mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end: link not up");
return;
@ -2395,7 +2395,7 @@ ixlv_local_timer(void *arg)
} else {
/* Check if we've come back from hung */
if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
vsi->active_queues |= ((u64)1 << que->me);
vsi->active_queues |= ((u64)1 << que->me);
}
if (que->busy >= IXL_MAX_TX_BUSY) {
device_printf(dev,"Warning queue %d "
@ -2426,20 +2426,19 @@ ixlv_update_link_status(struct ixlv_sc *sc)
{
struct ixl_vsi *vsi = &sc->vsi;
struct ifnet *ifp = vsi->ifp;
device_t dev = sc->dev;
if (vsi->link_up){
if (sc->link_up){
if (vsi->link_active == FALSE) {
if (bootverbose)
device_printf(dev,"Link is Up, %d Gbps\n",
(vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
if_printf(ifp,"Link is Up, %d Gbps\n",
(sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
vsi->link_active = TRUE;
if_link_state_change(ifp, LINK_STATE_UP);
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
if (bootverbose)
device_printf(dev,"Link is Down\n");
if_printf(ifp,"Link is Down\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
vsi->link_active = FALSE;
}
@ -2657,7 +2656,6 @@ static int
ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
{
struct ixlv_mac_filter *f;
device_t dev = sc->dev;
/* Does one already exist? */
f = ixlv_find_mac_filter(sc, macaddr);
@ -2670,7 +2668,7 @@ ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
/* If not, get a new empty filter */
f = ixlv_get_mac_filter(sc);
if (f == NULL) {
device_printf(dev, "%s: no filters available!!\n",
if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
__func__);
return (ENOMEM);
}
@ -2836,7 +2834,7 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
struct ixl_sysctl_info *entry = ctls;
while (entry->stat != 0)
{
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
CTLFLAG_RD, entry->stat,
entry->description);
entry++;
@ -2852,34 +2850,34 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
txr = &(queues[q].txr);
rxr = &(queues[q].rxr);
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
"m_defrag() failed");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
CTLFLAG_RD, &(queues[q].dropped_pkts),
"Driver dropped packets");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(queues[q].irqs),
"irqs on this queue");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &(queues[q].tso),
"TSO");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
CTLFLAG_RD, &(queues[q].tx_dma_setup),
"Driver tx dma failure in xmit");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
"Queue No Descriptor Available");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &(txr->total_packets),
"Queue Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
CTLFLAG_RD, &(txr->tx_bytes),
"Queue Bytes Transmitted");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &(rxr->rx_packets),
"Queue Packets Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");

View File

@ -90,6 +90,11 @@
#include <sys/smp.h>
#include <machine/smp.h>
#ifdef PCI_IOV
#include <sys/nv.h>
#include <sys/iov_schema.h>
#endif
#include "i40e_type.h"
#include "i40e_prototype.h"
@ -224,6 +229,10 @@
#define IXL_QUEUE_HUNG 0x80000000
#define IXL_KEYSZ 10
#define IXL_VF_MAX_BUFFER 0x3F80
#define IXL_VF_MAX_HDR_BUFFER 0x840
#define IXL_VF_MAX_FRAME 0x3FFF
/* ERJ: hardware can support ~1.5k filters between all functions */
#define IXL_MAX_FILTERS 256
#define IXL_MAX_TX_BUSY 10
@ -265,6 +274,35 @@
#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
#define IXL_VF_RESET_TIMEOUT 100
#define IXL_VSI_DATA_PORT 0x01
#define IXLV_MAX_QUEUES 16
#define IXL_MAX_VSI_QUEUES (2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1))
#define IXL_RX_CTX_BASE_UNITS 128
#define IXL_TX_CTX_BASE_UNITS 128
#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \
I40E_VPINT_LNKLSTN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \
I40E_VFINT_DYN_CTLN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
#define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA
#define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20
#define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf) ((glb_vf) / 32)
#define IXL_GLGEN_VFLRSTAT_MASK(glb_vf) (1 << ((glb_vf) % 32))
#define IXL_MAX_ITR_IDX 3
#define IXL_END_OF_INTR_LNKLST 0x7FF
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@ -461,19 +499,22 @@ struct ixl_vsi {
struct ifmedia media;
u64 que_mask;
int id;
u16 vsi_num;
u16 msix_base; /* station base MSIX vector */
u16 first_queue;
u16 num_queues;
u16 rx_itr_setting;
u16 tx_itr_setting;
struct ixl_queue *queues; /* head of queues */
bool link_active;
u16 seid;
u16 uplink_seid;
u16 downlink_seid;
u16 max_frame_size;
u32 link_speed;
bool link_up;
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
u16 num_macs;
struct i40e_aqc_vsi_properties_data info;
@ -505,6 +546,7 @@ struct ixl_vsi {
/* Misc. */
u64 active_queues;
u64 flags;
struct sysctl_oid *vsi_node;
};
/*
@ -543,7 +585,7 @@ ixl_get_filter(struct ixl_vsi *vsi)
** Compare two ethernet addresses
*/
static inline bool
cmp_etheraddr(u8 *ea1, u8 *ea2)
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
{
bool cmp = FALSE;

View File

@ -36,6 +36,22 @@
#ifndef _IXL_PF_H_
#define _IXL_PF_H_
#define VF_FLAG_ENABLED 0x01
#define VF_FLAG_SET_MAC_CAP 0x02
#define VF_FLAG_VLAN_CAP 0x04
#define VF_FLAG_PROMISC_CAP 0x08
#define VF_FLAG_MAC_ANTI_SPOOF 0x10
struct ixl_vf {
struct ixl_vsi vsi;
uint32_t vf_flags;
uint8_t mac[ETHER_ADDR_LEN];
uint16_t vf_num;
struct sysctl_ctx_list ctx;
};
/* Physical controller structure */
struct ixl_pf {
struct i40e_hw hw;
@ -64,15 +80,18 @@ struct ixl_pf {
struct task adminq;
struct taskqueue *tq;
bool link_up;
u32 link_speed;
int advertised_speed;
int fc; /* local flow ctrl setting */
/*
** VSI - Stations:
** Network interfaces
** These are the traffic class holders, and
** will have a stack interface and queues
** associated with them.
** NOTE: for now using just one, so embed it.
** NOTE: The PF has only a single interface,
** so it is embedded in the PF struct.
*/
struct ixl_vsi vsi;
@ -84,8 +103,31 @@ struct ixl_pf {
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
bool stat_offsets_loaded;
struct ixl_vf *vfs;
int num_vfs;
uint16_t veb_seid;
struct task vflr_task;
int vc_debug_lvl;
};
#define IXL_SET_ADVERTISE_HELP \
"Control link advertise speed:\n" \
"\tFlags:\n" \
"\t\t0x1 - advertise 100 Mb\n" \
"\t\t0x2 - advertise 1G\n" \
"\t\t0x4 - advertise 10G\n" \
"\t\t0x8 - advertise 20G\n\n" \
"\tDoes not work on 40G devices."
#define I40E_VC_DEBUG(pf, level, ...) \
do { \
if ((pf)->vc_debug_lvl >= (level)) \
device_printf((pf)->dev, __VA_ARGS__); \
} while (0)
#define i40e_send_vf_nack(pf, vf, op, st) \
ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
#define IXL_PF_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)

View File

@ -62,13 +62,9 @@ static __inline void ixl_rx_discard(struct rx_ring *, int);
static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
struct mbuf *, u8);
#ifdef DEV_NETMAP
#include <dev/netmap/if_ixl_netmap.h>
#endif /* DEV_NETMAP */
/*
** Multiqueue Transmit driver
**
*/
int
ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
@ -112,7 +108,7 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
err = drbr_enqueue(ifp, txr->br, m);
if (err)
return(err);
return (err);
if (IXL_TX_TRYLOCK(txr)) {
ixl_mq_start_locked(ifp, txr);
IXL_TX_UNLOCK(txr);
@ -488,22 +484,12 @@ ixl_allocate_tx_data(struct ixl_queue *que)
void
ixl_init_tx_ring(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
struct ixl_tx_buf *buf;
#ifdef DEV_NETMAP
struct netmap_adapter *na = NA(que->vsi->ifp);
struct netmap_slot *slot;
#endif /* DEV_NETMAP */
struct tx_ring *txr = &que->txr;
struct ixl_tx_buf *buf;
/* Clear the old ring contents */
IXL_TX_LOCK(txr);
#ifdef DEV_NETMAP
/*
* (under lock): if in netmap mode, do some consistency
* checks and set slot to entry 0 of the netmap ring.
*/
slot = netmap_reset(na, NR_TX, que->me, 0);
#endif /* DEV_NETMAP */
bzero((void *)txr->base,
(sizeof(struct i40e_tx_desc)) * que->num_desc);
@ -528,19 +514,6 @@ ixl_init_tx_ring(struct ixl_queue *que)
m_freem(buf->m_head);
buf->m_head = NULL;
}
#ifdef DEV_NETMAP
/*
* In netmap mode, set the map for the packet buffer.
* NOTE: Some drivers (not this one) also need to set
* the physical buffer address in the NIC ring.
* netmap_idx_n2k() maps a nic index, i, into the corresponding
* netmap slot index, si
*/
if (slot) {
int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
}
#endif /* DEV_NETMAP */
/* Clear the EOP index */
buf->eop_index = -1;
}
@ -852,11 +825,6 @@ ixl_txeof(struct ixl_queue *que)
mtx_assert(&txr->mtx, MA_OWNED);
#ifdef DEV_NETMAP
// XXX todo: implement moderation
if (netmap_tx_irq(que->vsi->ifp, que->me))
return FALSE;
#endif /* DEF_NETMAP */
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
@ -1158,16 +1126,8 @@ ixl_init_rx_ring(struct ixl_queue *que)
struct ixl_rx_buf *buf;
bus_dma_segment_t pseg[1], hseg[1];
int rsize, nsegs, error = 0;
#ifdef DEV_NETMAP
struct netmap_adapter *na = NA(que->vsi->ifp);
struct netmap_slot *slot;
#endif /* DEV_NETMAP */
IXL_RX_LOCK(rxr);
#ifdef DEV_NETMAP
/* same as in ixl_init_tx_ring() */
slot = netmap_reset(na, NR_RX, que->me, 0);
#endif /* DEV_NETMAP */
/* Clear the ring contents */
rsize = roundup2(que->num_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
@ -1201,27 +1161,6 @@ ixl_init_rx_ring(struct ixl_queue *que)
struct mbuf *mh, *mp;
buf = &rxr->buffers[j];
#ifdef DEV_NETMAP
/*
* In netmap mode, fill the map and set the buffer
* address in the NIC ring, considering the offset
* between the netmap and NIC rings (see comment in
* ixgbe_setup_transmit_ring() ). No need to allocate
* an mbuf, so end the block with a continue;
*/
if (slot) {
int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
uint64_t paddr;
void *addr;
addr = PNMB(na, slot + sj, &paddr);
netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
/* Update descriptor and the cached value */
rxr->base[j].read.pkt_addr = htole64(paddr);
rxr->base[j].read.hdr_addr = 0;
continue;
}
#endif /* DEV_NETMAP */
/*
** Don't allocate mbufs if not
@ -1522,12 +1461,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
IXL_RX_LOCK(rxr);
#ifdef DEV_NETMAP
if (netmap_rx_irq(ifp, que->me, &count)) {
IXL_RX_UNLOCK(rxr);
return (FALSE);
}
#endif /* DEV_NETMAP */
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2014, Intel Corporation
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -119,6 +119,9 @@ struct ixlv_sc {
int msix;
int if_flags;
bool link_up;
u32 link_speed;
struct mtx mtx;
u32 qbase;

View File

@ -362,7 +362,7 @@ ixlv_configure_queues(struct ixlv_sc *sc)
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
pairs = vsi->num_queues;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
@ -788,14 +788,11 @@ ixlv_request_stats(struct ixlv_sc *sc)
void
ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
{
struct ixl_vsi *vsi;
struct ixl_vsi *vsi = &sc->vsi;
uint64_t tx_discards;
int i;
vsi = &sc->vsi;
tx_discards = es->tx_discards;
for (i = 0; i < sc->vsi.num_queues; i++)
for (int i = 0; i < vsi->num_queues; i++)
tx_discards += sc->vsi.queues[i].txr.br->br_drops;
/* Update ifnet stats */
@ -816,7 +813,7 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
IXL_SET_COLLISIONS(vsi, 0);
sc->vsi.eth_stats = *es;
vsi->eth_stats = *es;
}
/*
@ -845,9 +842,9 @@ ixlv_vc_completion(struct ixlv_sc *sc,
vpe->event_data.link_event.link_status,
vpe->event_data.link_event.link_speed);
#endif
vsi->link_up =
sc->link_up =
vpe->event_data.link_event.link_status;
vsi->link_speed =
sc->link_speed =
vpe->event_data.link_event.link_speed;
ixlv_update_link_status(sc);
break;