ixl(4): Update to use iflib

Update the driver to use iflib in order to bring performance,
maintainability, and (hopefully) stability benefits to the driver.

The driver currently isn't completely ported; features that are missing:

- VF driver (ixlv)
- SR-IOV host support
- RDMA support

The plan is to have these re-added to the driver before the next FreeBSD release.

Reviewed by:	gallatin@
Contributions by: gallatin@, mmacy@, krzysztof.galazka@intel.com
Tested by:	jeffrey.e.pieper@intel.com
MFC after:	1 month
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D15577
This commit is contained in:
Eric Joyner 2018-06-18 20:12:54 +00:00
parent 52666d3675
commit 1031d839aa
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=335338
18 changed files with 3892 additions and 5598 deletions

View File

@ -240,8 +240,8 @@ device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel PRO/10GbE PCIE PF Ethernet
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel XL710 40Gbe PCIE Ethernet
options IXL_IW # Enable iWARP Client Interface in ixl(4)
device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
#options IXL_IW # Enable iWARP Client Interface in ixl(4)
#device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device ti # Alteon Networks Tigon I/II gigabit Ethernet
device txp # 3Com 3cR990 (``Typhoon'')

View File

@ -270,10 +270,10 @@ dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_i2c.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_iw.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/if_ixlv.c optional ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
#dev/ixl/ixl_iw.c optional ixl pci \
# compile-with "${NORMAL_C} -I$S/dev/ixl"
#dev/ixl/if_ixlv.c optional ixlv pci \
# compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixlvc.c optional ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_txrx.c optional ixl pci | ixlv pci \

View File

@ -132,7 +132,7 @@ i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
bus_dmamap_unload(mem->tag, mem->map);
bus_dmamem_free(mem->tag, mem->va, mem->map);
bus_dma_tag_destroy(mem->tag);
return (0);
return (I40E_SUCCESS);
}
void

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -61,6 +61,7 @@
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/iflib.h>
#include <net/bpf.h>
#include <net/if_types.h>
@ -102,80 +103,13 @@
#include <netinet/in_rss.h>
#endif
#include "ifdi_if.h"
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "ixl_debug.h"
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
(mac_addr)[4], (mac_addr)[5]
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
#ifdef IXL_DEBUG
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
/* Defines for printing generic debug information */
#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
/* Defines for printing specific debug information */
#define DEBUG_INIT 1
#define DEBUG_IOCTL 1
#define DEBUG_HW 1
#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
if_printf(ifp, S "\n", ##__VA_ARGS__)
#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
#else /* no IXL_DEBUG */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define DPRINTF(...)
#define DDPRINTF(...)
#define IDPRINTF(...)
#define INIT_DEBUGOUT(...)
#define INIT_DBG_DEV(...)
#define INIT_DBG_IF(...)
#define IOCTL_DEBUGOUT(...)
#define IOCTL_DBG_IF2(...)
#define IOCTL_DBG_IF(...)
#define HW_DEBUGOUT(...)
#endif /* IXL_DEBUG */
enum ixl_dbg_mask {
IXL_DBG_INFO = 0x00000001,
IXL_DBG_EN_DIS = 0x00000002,
IXL_DBG_AQ = 0x00000004,
IXL_DBG_NVMUPD = 0x00000008,
IXL_DBG_IOCTL_KNOWN = 0x00000010,
IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
IXL_DBG_IOCTL_ALL = 0x00000030,
I40E_DEBUG_RSS = 0x00000100,
IXL_DBG_IOV = 0x00001000,
IXL_DBG_IOV_VC = 0x00002000,
IXL_DBG_SWITCH_INFO = 0x00010000,
IXL_DBG_I2C = 0x00020000,
IXL_DBG_ALL = 0xFFFFFFFF
};
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - " IXL_DRIVER_VERSION_STRING)
/* Tunables */
@ -195,48 +129,27 @@ enum ixl_dbg_mask {
#define IXL_AQ_LEN 256
#define IXL_AQ_LEN_MAX 1024
/*
** Default number of entries in Tx queue buf_ring.
*/
#define DEFAULT_TXBRSZ 4096
/* Alignment for rings */
#define DBA_ALIGN 128
/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*
* XXX: Watchdog currently counts down in units of (hz)
* Set this to just (hz) if you want queues to hang under a little bit of stress
*/
#define IXL_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
* transmit descriptors.
*/
#define IXL_TX_CLEANUP_THRESHOLD (que->num_tx_desc / 8)
#define IXL_TX_OP_THRESHOLD (que->num_tx_desc / 32)
#define MAX_MULTICAST_ADDR 128
#define IXL_MSIX_BAR 3
#define IXL_ADM_LIMIT 2
#define IXL_TSO_SIZE 65535
// TODO: Find out which TSO_SIZE to use
//#define IXL_TSO_SIZE 65535
#define IXL_TSO_SIZE ((255*1024)-1)
#define IXL_TX_BUF_SZ ((u32) 1514)
#define IXL_AQ_BUF_SZ ((u32) 4096)
#define IXL_RX_HDR 128
#define IXL_RX_LIMIT 512
#define IXL_RX_ITR 0
#define IXL_TX_ITR 1
#define IXL_ITR_NONE 3
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_RX_SEGS 5
#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 7
#define IXL_QUEUE_HUNG 0x80000000
#define IXL_MIN_TSO_MSS 64
#define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
@ -252,7 +165,6 @@ enum ixl_dbg_mask {
/* ERJ: hardware can support ~2k (SW5+) filters between all functions */
#define IXL_MAX_FILTERS 256
#define IXL_MAX_TX_BUSY 10
#define IXL_NVM_VERSION_LO_SHIFT 0
#define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT)
@ -288,12 +200,6 @@ enum ixl_dbg_mask {
#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
/* Misc flags for ixl_vsi.flags */
#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
#define IXL_FLAGS_USES_MSIX (1 << 2)
#define IXL_FLAGS_IS_VF (1 << 3)
#define IXL_VF_RESET_TIMEOUT 100
#define IXL_VSI_DATA_PORT 0x01
@ -304,6 +210,7 @@ enum ixl_dbg_mask {
#define IXL_RX_CTX_BASE_UNITS 128
#define IXL_TX_CTX_BASE_UNITS 128
#if 0
#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \
I40E_VPINT_LNKLSTN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
@ -311,6 +218,7 @@ enum ixl_dbg_mask {
#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \
I40E_VFINT_DYN_CTLN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
#endif
#define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA
@ -347,15 +255,20 @@ enum ixl_dbg_mask {
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
#define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx)
#define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED)
#define IXL_CAPS \
(IFCAP_TSO4 | IFCAP_TSO6 | \
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \
IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO)
#define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
#define IXL_CSUM_TCP \
(CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
#define IXL_CSUM_UDP \
(CSUM_IP_UDP|CSUM_IP6_UDP)
#define IXL_CSUM_SCTP \
(CSUM_IP_SCTP|CSUM_IP6_SCTP)
/* Pre-11 counter(9) compatibility */
#if __FreeBSD_version >= 1100036
@ -386,6 +299,9 @@ enum ixl_dbg_mask {
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
#define IXL_DEV_ERR(_dev, _format, ...) \
device_printf(_dev, "%s: " _format " (%s:%d)\n", __func__, ##__VA_ARGS__, __FILE__, __LINE__)
/*
*****************************************************************************
* vendor_info_array
@ -403,22 +319,6 @@ typedef struct _ixl_vendor_info_t {
unsigned int index;
} ixl_vendor_info_t;
struct ixl_tx_buf {
u32 eop_index;
struct mbuf *m_head;
bus_dmamap_t map;
bus_dma_tag_t tag;
};
struct ixl_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
struct mbuf *fmp;
bus_dmamap_t hmap;
bus_dmamap_t pmap;
};
/*
** This struct has multiple uses, multicast
** addresses, vlans, and mac filters all use it.
@ -434,34 +334,30 @@ struct ixl_mac_filter {
* The Transmit ring control struct
*/
struct tx_ring {
struct ixl_queue *que;
struct mtx mtx;
struct ixl_tx_queue *que;
u32 tail;
struct i40e_tx_desc *base;
struct i40e_dma_mem dma;
u16 next_avail;
u16 next_to_clean;
u16 atr_rate;
u16 atr_count;
u32 itr;
struct i40e_tx_desc *tx_base;
u64 tx_paddr;
u32 latency;
struct ixl_tx_buf *buffers;
volatile u16 avail;
u32 cmd;
bus_dma_tag_t tx_tag;
bus_dma_tag_t tso_tag;
char mtx_name[16];
struct buf_ring *br;
s32 watchdog_timer;
u32 packets;
u32 me;
/*
* For reporting completed packet status
* in descriptor writeback mode
*/
qidx_t *tx_rsq;
qidx_t tx_rs_cidx;
qidx_t tx_rs_pidx;
qidx_t tx_cidx_processed;
/* Used for Dynamic ITR calculation */
u32 packets;
u32 itr;
u32 bytes;
/* Soft Stats */
u64 tx_bytes;
u64 no_desc;
u64 total_packets;
u64 tx_packets;
u64 mss_too_small;
};
@ -469,68 +365,46 @@ struct tx_ring {
* The Receive ring control struct
*/
struct rx_ring {
struct ixl_queue *que;
struct mtx mtx;
union i40e_rx_desc *base;
struct i40e_dma_mem dma;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
struct ixl_rx_queue *que;
union i40e_rx_desc *rx_base;
uint64_t rx_paddr;
bool discard;
u32 next_refresh;
u32 next_check;
u32 itr;
u32 latency;
char mtx_name[16];
struct ixl_rx_buf *buffers;
u32 mbuf_sz;
u32 tail;
bus_dma_tag_t htag;
bus_dma_tag_t ptag;
u32 me;
/* Used for Dynamic ITR calculation */
u32 packets;
u32 bytes;
/* Soft stats */
u64 split;
u64 rx_packets;
u64 rx_bytes;
u64 desc_errs;
u64 not_done;
};
/*
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring pair.
** Driver queue structs
*/
struct ixl_queue {
struct ixl_tx_queue {
struct ixl_vsi *vsi;
u32 me;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
struct resource *res;
void *tag;
int num_tx_desc; /* both tx and rx */
int num_rx_desc; /* both tx and rx */
#ifdef DEV_NETMAP
int num_desc; /* for compatibility with current netmap code in kernel */
#endif
struct tx_ring txr;
struct rx_ring rxr;
struct task task;
struct task tx_task;
struct taskqueue *tq;
/* Queue stats */
struct if_irq que_irq;
u32 msix;
/* Stats */
u64 irqs;
u64 tso;
u64 mbuf_defrag_failed;
u64 mbuf_hdr_failed;
u64 mbuf_pkt_failed;
u64 tx_dmamap_failed;
u64 dropped_pkts;
u64 mss_too_small;
};
struct ixl_rx_queue {
struct ixl_vsi *vsi;
struct rx_ring rxr;
struct if_irq que_irq;
u32 msix; /* This queue's MSIX vector */
/* Stats */
u64 irqs;
};
/*
@ -538,29 +412,35 @@ struct ixl_queue {
*/
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
struct ixl_vsi {
void *back;
if_ctx_t ctx;
if_softc_ctx_t shared;
struct ifnet *ifp;
device_t dev;
//device_t dev;
struct i40e_hw *hw;
struct ifmedia media;
struct ifmedia *media;
#define num_rx_queues shared->isc_nrxqsets
#define num_tx_queues shared->isc_ntxqsets
void *back;
enum i40e_vsi_type type;
// TODO: Remove?
u64 que_mask;
int id;
u16 num_queues;
int num_tx_desc;
int num_rx_desc;
u32 rx_itr_setting;
u32 tx_itr_setting;
u16 max_frame_size;
bool enable_head_writeback;
struct ixl_queue *queues; /* head of queues */
u16 vsi_num;
bool link_active;
u16 seid;
u16 uplink_seid;
u16 downlink_seid;
struct ixl_tx_queue *tx_queues; /* TX queue array */
struct ixl_rx_queue *rx_queues; /* RX queue array */
struct if_irq irq;
u32 link_speed;
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
u16 num_macs;
@ -568,8 +448,6 @@ struct ixl_vsi {
/* Contains readylist & stat counter id */
struct i40e_aqc_vsi_properties_data info;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u16 num_vlans;
/* Per-VSI stats from hardware */
@ -595,37 +473,27 @@ struct ixl_vsi {
/* Misc. */
u64 flags;
/* Stats sysctls for this VSI */
struct sysctl_oid *vsi_node;
};
/*
** Find the number of unrefreshed RX descriptors
*/
static inline u16
ixl_rx_unrefreshed(struct ixl_queue *que)
{
struct rx_ring *rxr = &que->rxr;
if (rxr->next_check > rxr->next_refresh)
return (rxr->next_check - rxr->next_refresh - 1);
else
return ((que->num_rx_desc + rxr->next_check) -
rxr->next_refresh - 1);
}
/*
** Find the next available unused filter
** Creates new filter with given MAC address and VLAN ID
*/
static inline struct ixl_mac_filter *
ixl_get_filter(struct ixl_vsi *vsi)
ixl_new_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f;
/* create a new empty filter */
f = malloc(sizeof(struct ixl_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (f)
if (f) {
SLIST_INSERT_HEAD(&vsi->ftl, f, next);
bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
f->vlan = vlan;
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
}
return (f);
}
@ -636,14 +504,7 @@ ixl_get_filter(struct ixl_vsi *vsi)
static inline bool
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
{
bool cmp = FALSE;
if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
(ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
(ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
cmp = TRUE;
return (cmp);
return (bcmp(ea1, ea2, 6) == 0);
}
/*
@ -679,34 +540,11 @@ struct ixl_sysctl_info {
extern const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN];
/*********************************************************************
* TXRX Function prototypes
*********************************************************************/
int ixl_allocate_tx_data(struct ixl_queue *);
int ixl_allocate_rx_data(struct ixl_queue *);
void ixl_init_tx_ring(struct ixl_queue *);
int ixl_init_rx_ring(struct ixl_queue *);
bool ixl_rxeof(struct ixl_queue *, int);
bool ixl_txeof(struct ixl_queue *);
void ixl_free_que_tx(struct ixl_queue *);
void ixl_free_que_rx(struct ixl_queue *);
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
void ixl_vsi_setup_rings_size(struct ixl_vsi *, int, int);
int ixl_queue_hang_check(struct ixl_vsi *);
void ixl_free_vsi(struct ixl_vsi *);
void ixl_qflush(struct ifnet *);
/* Common function prototypes between PF/VF driver */
#if __FreeBSD_version >= 1100000
uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
#endif
void ixl_get_default_rss_key(u32 *);
void ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que);
void ixl_set_queue_rx_itr(struct ixl_rx_queue *que);
void ixl_get_default_rss_key(u32 *);
const char * i40e_vc_stat_str(struct i40e_hw *hw,
enum virtchnl_status_code stat_err);
void ixl_set_busmaster(device_t);
void ixl_set_msix_enable(device_t);
u64 ixl_max_aq_speed_to_value(u8);
#endif /* _IXL_H_ */

110
sys/dev/ixl/ixl_debug.h Normal file
View File

@ -0,0 +1,110 @@
/******************************************************************************
Copyright (c) 2013-2016, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXL_DEBUG_H_
#define _IXL_DEBUG_H_
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
(mac_addr)[4], (mac_addr)[5]
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
#ifdef IXL_DEBUG
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
/* Defines for printing generic debug information */
#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
/* Defines for printing specific debug information */
#define DEBUG_INIT 1
#define DEBUG_IOCTL 1
#define DEBUG_HW 1
#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
if_printf(ifp, S "\n", ##__VA_ARGS__)
#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
#else /* no IXL_DEBUG */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define DPRINTF(...)
#define DDPRINTF(...)
#define IDPRINTF(...)
#define INIT_DEBUGOUT(...)
#define INIT_DBG_DEV(...)
#define INIT_DBG_IF(...)
#define IOCTL_DEBUGOUT(...)
#define IOCTL_DBG_IF2(...)
#define IOCTL_DBG_IF(...)
#define HW_DEBUGOUT(...)
#endif /* IXL_DEBUG */
enum ixl_dbg_mask {
IXL_DBG_INFO = 0x00000001,
IXL_DBG_EN_DIS = 0x00000002,
IXL_DBG_AQ = 0x00000004,
IXL_DBG_NVMUPD = 0x00000008,
IXL_DBG_IOCTL_KNOWN = 0x00000010,
IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
IXL_DBG_IOCTL_ALL = 0x00000030,
I40E_DEBUG_RSS = 0x00000100,
IXL_DBG_IOV = 0x00001000,
IXL_DBG_IOV_VC = 0x00002000,
IXL_DBG_SWITCH_INFO = 0x00010000,
IXL_DBG_I2C = 0x00020000,
IXL_DBG_ALL = 0xFFFFFFFF
};
#endif /* _IXL_DEBUG_H_ */

View File

@ -45,16 +45,41 @@
#define VF_FLAG_PROMISC_CAP 0x08
#define VF_FLAG_MAC_ANTI_SPOOF 0x10
#define IXL_PF_STATE_EMPR_RESETTING (1 << 0)
#define IXL_PF_STATE_FW_LLDP_DISABLED (1 << 1)
#define IXL_ICR0_CRIT_ERR_MASK \
(I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
I40E_PFINT_ICR0_ECC_ERR_MASK | \
I40E_PFINT_ICR0_PE_CRITERR_MASK)
/* VF Interrupts */
#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \
I40E_VPINT_LNKLSTN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \
I40E_VFINT_DYN_CTLN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
/* Used in struct ixl_pf's state field */
enum ixl_pf_state {
IXL_PF_STATE_ADAPTER_RESETTING = (1 << 0),
IXL_PF_STATE_MDD_PENDING = (1 << 1),
IXL_PF_STATE_PF_RESET_REQ = (1 << 2),
IXL_PF_STATE_VF_RESET_REQ = (1 << 3),
IXL_PF_STATE_PF_CRIT_ERR = (1 << 4),
IXL_PF_STATE_CORE_RESET_REQ = (1 << 5),
IXL_PF_STATE_GLOB_RESET_REQ = (1 << 6),
IXL_PF_STATE_EMP_RESET_REQ = (1 << 7),
IXL_PF_STATE_FW_LLDP_DISABLED = (1 << 8),
};
struct ixl_vf {
struct ixl_vsi vsi;
uint32_t vf_flags;
u32 vf_flags;
u32 num_mdd_events;
uint8_t mac[ETHER_ADDR_LEN];
uint16_t vf_num;
uint32_t version;
u8 mac[ETHER_ADDR_LEN];
u16 vf_num;
u32 version;
struct ixl_pf_qtag qtag;
struct sysctl_ctx_list ctx;
@ -62,31 +87,24 @@ struct ixl_vf {
/* Physical controller structure */
struct ixl_pf {
/*
* This is first so that iflib_get_softc can return
* either the VSI or the PF structures.
*/
struct ixl_vsi vsi;
struct i40e_hw hw;
struct i40e_osdep osdep;
device_t dev;
struct ixl_vsi vsi;
struct resource *pci_mem;
struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSIX
*/
void *tag;
struct resource *res;
struct callout timer;
int msix;
#ifdef IXL_IW
int iw_msix;
bool iw_enabled;
#endif
int if_flags;
int state;
bool init_in_progress;
u32 state;
u8 supported_speeds;
struct ixl_pf_qmgr qmgr;
@ -101,13 +119,6 @@ struct ixl_pf {
int tx_itr;
int rx_itr;
struct mtx pf_mtx;
u32 qbase;
u32 admvec;
struct task adminq;
struct taskqueue *tq;
bool link_up;
u32 link_speed;
int advertised_speed;
@ -124,6 +135,13 @@ struct ixl_pf {
struct i40e_hw_port_stats stats_offsets;
bool stat_offsets_loaded;
/* I2C access methods */
u8 i2c_access_method;
s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
/* SR-IOV */
struct ixl_vf *vfs;
int num_vfs;
@ -177,13 +195,38 @@ struct ixl_pf {
#define IXL_SYSCTL_HELP_LINK_STATUS \
"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \
" the response." \
" the response."
#define IXL_SYSCTL_HELP_FW_LLDP \
"\nFW LLDP engine:\n" \
"\t0 - disable\n" \
"\t1 - enable\n"
#define IXL_SYSCTL_HELP_READ_I2C \
"\nRead a byte from I2C bus\n" \
"Input: 32-bit value\n" \
"\tbits 0-7: device address (0xA0 or 0xA2)\n" \
"\tbits 8-15: offset (0-255)\n" \
"\tbits 16-31: unused\n" \
"Output: 8-bit value read"
#define IXL_SYSCTL_HELP_WRITE_I2C \
"\nWrite a byte to the I2C bus\n" \
"Input: 32-bit value\n" \
"\tbits 0-7: device address (0xA0 or 0xA2)\n" \
"\tbits 8-15: offset (0-255)\n" \
"\tbits 16-23: value to write\n" \
"\tbits 24-31: unused\n" \
"Output: 8-bit value written"
#define IXL_SYSCTL_HELP_I2C_METHOD \
"\nI2C access method that driver will use:\n" \
"\t0 - best available method\n" \
"\t1 - bit bang via I2CPARAMS register\n" \
"\t2 - register read/write via I2CCMD register\n" \
"\t3 - Use Admin Queue command (best)\n" \
"Using the Admin Queue is only supported on 710 devices with FW version 1.7 or higher"
extern const char * const ixl_fc_string[6];
MALLOC_DECLARE(M_IXL);
@ -199,13 +242,6 @@ MALLOC_DECLARE(M_IXL);
#define i40e_send_vf_nack(pf, vf, op, st) \
ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
#define IXL_PF_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)
#define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx)
#define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
/* Debug printing */
#define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__)
void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
@ -216,11 +252,8 @@ void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
/* For netmap(4) compatibility */
#define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi)
/*
* PF-only function declarations
*/
int ixl_setup_interface(device_t, struct ixl_vsi *);
/* PF-only function declarations */
int ixl_setup_interface(device_t, struct ixl_pf *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
@ -230,9 +263,9 @@ void ixl_init(void *);
void ixl_local_timer(void *);
void ixl_register_vlan(void *, struct ifnet *, u16);
void ixl_unregister_vlan(void *, struct ifnet *, u16);
void ixl_intr(void *);
void ixl_msix_que(void *);
void ixl_msix_adminq(void *);
int ixl_intr(void *);
int ixl_msix_que(void *);
int ixl_msix_adminq(void *);
void ixl_do_adminq(void *, int);
int ixl_res_alloc_cmp(const void *, const void *);
@ -264,7 +297,6 @@ void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_c
int ixl_get_hw_capabilities(struct ixl_pf *);
void ixl_link_up_msg(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
int ixl_allocate_pci_resources(struct ixl_pf *);
int ixl_setup_stations(struct ixl_pf *);
int ixl_switch_config(struct ixl_pf *);
void ixl_stop_locked(struct ixl_pf *);
@ -304,8 +336,8 @@ void ixl_handle_empr_reset(struct ixl_pf *);
int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up);
void ixl_set_queue_rx_itr(struct ixl_queue *);
void ixl_set_queue_tx_itr(struct ixl_queue *);
void ixl_set_queue_rx_itr(struct ixl_rx_queue *);
void ixl_set_queue_tx_itr(struct ixl_tx_queue *);
void ixl_add_filter(struct ixl_vsi *, const u8 *, s16 vlan);
void ixl_del_filter(struct ixl_vsi *, const u8 *, s16 vlan);
@ -345,19 +377,31 @@ void ixl_free_mac_filters(struct ixl_vsi *vsi);
void ixl_update_vsi_stats(struct ixl_vsi *);
void ixl_vsi_reset_stats(struct ixl_vsi *);
int ixl_vsi_setup_queues(struct ixl_vsi *vsi);
void ixl_vsi_free_queues(struct ixl_vsi *vsi);
void ixl_if_init(if_ctx_t ctx);
void ixl_if_stop(if_ctx_t ctx);
/*
* I2C Function prototypes
*/
int ixl_find_i2c_interface(struct ixl_pf *);
s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
s32 ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
s32 ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
int ixl_get_fw_lldp_status(struct ixl_pf *pf);
int ixl_attach_get_link_status(struct ixl_pf *);
u64 ixl_max_aq_speed_to_value(u8);
void ixl_handle_vflr(void *, int);
#endif /* _IXL_PF_H_ */

View File

@ -46,9 +46,9 @@
#define IXL_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXL_I2C_REG(_hw) \
I40E_GLGEN_I2CPARAMS(((struct i40e_osdep *)(_hw)->back)->i2c_intfc_num)
I40E_GLGEN_I2CPARAMS(_hw->func_caps.mdio_port_num)
/* I2C bit-banging functions */
static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data);
static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl);
static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl);
@ -62,6 +62,8 @@ static void ixl_i2c_bus_clear(struct ixl_pf *pf);
static void ixl_i2c_start(struct ixl_pf *pf);
static void ixl_i2c_stop(struct ixl_pf *pf);
static s32 ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum);
/**
* ixl_i2c_bus_clear - Clears the I2C bus
* @hw: pointer to hardware structure
@ -449,10 +451,10 @@ ixl_i2c_start(struct ixl_pf *pf)
}
/**
* ixl_read_i2c_byte - Reads 8 bit word over I2C
* ixl_read_i2c_byte_bb - Reads 8 bit word over I2C
**/
s32
ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct i40e_hw *hw = &pf->hw;
@ -523,9 +525,9 @@ ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
i40e_msec_delay(100);
retry++;
if (retry < max_retry)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying.\n");
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying\n");
else
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error.\n");
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n");
} while (retry < max_retry);
done:
@ -538,10 +540,10 @@ ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
}
/**
* ixl_write_i2c_byte - Writes 8 bit word over I2C
* ixl_write_i2c_byte_bb - Writes 8 bit word over I2C
**/
s32
ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct i40e_hw *hw = &pf->hw;
@ -589,9 +591,9 @@ ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
i40e_msec_delay(100);
retry++;
if (retry < max_retry)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying.\n");
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying\n");
else
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error.\n");
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n");
} while (retry < max_retry);
write_byte_out:
@ -603,3 +605,139 @@ ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
return status;
}
/**
* ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register
**/
s32
ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct i40e_hw *hw = &pf->hw;
u32 reg = 0;
s32 status;
*data = 0;
reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT);
reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT);
reg |= I40E_GLGEN_I2CCMD_OP_MASK;
wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg);
status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num);
/* Get data from I2C register */
reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num));
/* Retrieve data readed from EEPROM */
*data = (u8)(reg & 0xff);
if (status)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n");
return status;
}
/**
* ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register
**/
s32
ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct i40e_hw *hw = &pf->hw;
s32 status = I40E_SUCCESS;
u32 reg = 0;
u8 upperbyte = 0;
u16 datai2c = 0;
status = ixl_read_i2c_byte_reg(pf, byte_offset + 1, dev_addr, &upperbyte);
datai2c = ((u16)upperbyte << 8) | (u16)data;
reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num));
/* Form write command */
reg &= ~I40E_GLGEN_I2CCMD_PHYADD_MASK;
reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT);
reg &= ~I40E_GLGEN_I2CCMD_REGADD_MASK;
reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT);
reg &= ~I40E_GLGEN_I2CCMD_DATA_MASK;
reg |= (datai2c << I40E_GLGEN_I2CCMD_DATA_SHIFT);
reg &= ~I40E_GLGEN_I2CCMD_OP_MASK;
/* Write command to registers controling I2C - data and address. */
wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg);
status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num);
if (status)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n");
return status;
}
/**
* ixl_wait_for_i2c_completion
**/
static s32
ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum)
{
s32 status = 0;
u32 timeout = 100;
u32 reg;
do {
reg = rd32(hw, I40E_GLGEN_I2CCMD(portnum));
if ((reg & I40E_GLGEN_I2CCMD_R_MASK) != 0)
break;
i40e_usec_delay(10);
} while (timeout-- > 0);
if (timeout == 0)
return I40E_ERR_TIMEOUT;
else
return status;
}
/**
* ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register
**/
s32
ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct i40e_hw *hw = &pf->hw;
s32 status = I40E_SUCCESS;
u32 reg;
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
dev_addr,
byte_offset,
&reg, NULL);
if (status)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
else
*data = (u8)reg;
return status;
}
/**
* ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register
**/
s32
ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct i40e_hw *hw = &pf->hw;
s32 status = I40E_SUCCESS;
status = i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
dev_addr,
byte_offset,
data, NULL);
if (status)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
return status;
}

View File

@ -83,15 +83,13 @@ static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
void
ixl_initialize_sriov(struct ixl_pf *pf)
{
return;
#if 0
device_t dev = pf->dev;
struct i40e_hw *hw = &pf->hw;
nvlist_t *pf_schema, *vf_schema;
int iov_error;
/* SR-IOV is only supported when MSI-X is in use. */
if (pf->msix <= 1)
return;
pf_schema = pci_iov_schema_alloc_node();
vf_schema = pci_iov_schema_alloc_node();
pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
@ -114,8 +112,10 @@ ixl_initialize_sriov(struct ixl_pf *pf)
device_printf(dev, "SR-IOV ready\n");
pf->vc_debug_lvl = 1;
#endif
}
/*
* Allocate the VSI for a VF.
*/
@ -165,15 +165,17 @@ ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
vsi_ctx.info.tc_mapping[0] = htole16(
(0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
if (code != I40E_SUCCESS)
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
vf->vsi.seid = vsi_ctx.seid;
vf->vsi.vsi_num = vsi_ctx.vsi_number;
// vf->vsi.first_queue = vf->qtag.qidx[0];
vf->vsi.num_queues = vf->qtag.num_active;
// TODO: How to deal with num tx queues / num rx queues split?
// I don't think just assigning this variable is going to work
vf->vsi.num_rx_queues = vf->qtag.num_active;
vf->vsi.num_tx_queues = vf->qtag.num_active;
code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
if (code != I40E_SUCCESS)
@ -204,7 +206,7 @@ ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
vf->vsi.hw_filters_add = 0;
vf->vsi.hw_filters_del = 0;
ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
// ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
ixl_reconfigure_filters(&vf->vsi);
return (0);
@ -253,7 +255,7 @@ ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
/* Program index of each VF queue into PF queue space
* (This is only needed if QTABLE is enabled) */
for (i = 0; i < vf->vsi.num_queues; i++) {
for (i = 0; i < vf->vsi.num_tx_queues; i++) {
qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
I40E_VPLAN_QTABLE_QINDEX_SHIFT;
@ -266,7 +268,7 @@ ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
/* Map queues allocated to VF to its VSI;
* This mapping matches the VF-wide mapping since the VF
* is only given a single VSI */
for (i = 0; i < vf->vsi.num_queues; i++)
for (i = 0; i < vf->vsi.num_tx_queues; i++)
ixl_vf_map_vsi_queue(hw, vf, i,
ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
@ -335,7 +337,8 @@ ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
ixl_vf_unregister_intr(hw, vpint_reg);
}
vf->vsi.num_queues = 0;
vf->vsi.num_tx_queues = 0;
vf->vsi.num_rx_queues = 0;
}
static int
@ -533,13 +536,13 @@ ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
VIRTCHNL_VF_OFFLOAD_VLAN);
reply.num_vsis = 1;
reply.num_queue_pairs = vf->vsi.num_queues;
reply.num_queue_pairs = vf->vsi.num_tx_queues;
reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
reply.rss_key_size = 52;
reply.rss_lut_size = 64;
reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
@ -674,9 +677,9 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
}
info = msg;
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
@ -705,7 +708,7 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (pair->txq.vsi_id != vf->vsi.vsi_num ||
pair->rxq.vsi_id != vf->vsi.vsi_num ||
pair->txq.queue_id != pair->rxq.queue_id ||
pair->txq.queue_id >= vf->vsi.num_queues) {
pair->txq.queue_id >= vf->vsi.num_tx_queues) {
i40e_send_vf_nack(pf, vf,
VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
@ -854,7 +857,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (vector->rxq_map != 0) {
largest_rxq = fls(vector->rxq_map) - 1;
if (largest_rxq >= vf->vsi.num_queues) {
if (largest_rxq >= vf->vsi.num_rx_queues) {
i40e_send_vf_nack(pf, vf,
VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
@ -864,7 +867,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (vector->txq_map != 0) {
largest_txq = fls(vector->txq_map) - 1;
if (largest_txq >= vf->vsi.num_queues) {
if (largest_txq >= vf->vsi.num_tx_queues) {
i40e_send_vf_nack(pf, vf,
VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
@ -911,7 +914,7 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->tx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
if (i >= vf->vsi.num_tx_queues) {
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@ -936,7 +939,7 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->rx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
if (i >= vf->vsi.num_rx_queues) {
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@ -990,7 +993,7 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->tx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
if (i >= vf->vsi.num_tx_queues) {
device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@ -1016,7 +1019,7 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
for (int i = 0; i < 32; i++) {
if ((1 << i) & select->rx_queues) {
/* Warn if queue is out of VF allocation range */
if (i >= vf->vsi.num_queues) {
if (i >= vf->vsi.num_rx_queues) {
device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
vf->vf_num, i);
break;
@ -1058,6 +1061,8 @@ ixl_zero_mac(const uint8_t *addr)
static bool
ixl_bcast_mac(const uint8_t *addr)
{
static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
return (cmp_etheraddr(addr, ixl_bcast_addr));
}
@ -1634,7 +1639,7 @@ ixl_handle_vflr(void *arg, int pending)
pf = arg;
hw = &pf->hw;
IXL_PF_LOCK(pf);
/* TODO: May need to lock this */
for (i = 0; i < pf->num_vfs; i++) {
global_vf_num = hw->func_caps.vf_base_id + i;
@ -1653,12 +1658,13 @@ ixl_handle_vflr(void *arg, int pending)
}
}
atomic_clear_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
ixl_flush(hw);
IXL_PF_UNLOCK(pf);
// IXL_PF_UNLOCK()
}
static int
@ -1728,7 +1734,7 @@ ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
hw = &pf->hw;
pf_vsi = &pf->vsi;
IXL_PF_LOCK(pf);
//IXL_PF_LOCK(pf);
pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
M_ZERO);
@ -1750,13 +1756,13 @@ ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
}
pf->num_vfs = num_vfs;
IXL_PF_UNLOCK(pf);
//IXL_PF_UNLOCK(pf);
return (0);
fail:
free(pf->vfs, M_IXL);
pf->vfs = NULL;
IXL_PF_UNLOCK(pf);
//IXL_PF_UNLOCK(pf);
return (error);
}
@ -1775,7 +1781,7 @@ ixl_iov_uninit(device_t dev)
vsi = &pf->vsi;
ifp = vsi->ifp;
IXL_PF_LOCK(pf);
//IXL_PF_LOCK(pf);
for (i = 0; i < pf->num_vfs; i++) {
if (pf->vfs[i].vsi.seid != 0)
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
@ -1796,7 +1802,7 @@ ixl_iov_uninit(device_t dev)
pf->vfs = NULL;
pf->num_vfs = 0;
IXL_PF_UNLOCK(pf);
//IXL_PF_UNLOCK(pf);
/* Do this after the unlock as sysctl_ctx_free might sleep. */
for (i = 0; i < num_vfs; i++)
@ -1849,7 +1855,7 @@ ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
pf = device_get_softc(dev);
vf = &pf->vfs[vfnum];
IXL_PF_LOCK(pf);
//IXL_PF_LOCK(pf);
vf->vf_num = vfnum;
vf->vsi.back = pf;
@ -1889,7 +1895,7 @@ ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
ixl_reset_vf(pf, vf);
out:
IXL_PF_UNLOCK(pf);
//IXL_PF_UNLOCK(pf);
if (error == 0) {
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);

File diff suppressed because it is too large Load Diff

View File

@ -83,8 +83,8 @@ ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qt
qtag->qmgr = qmgr;
qtag->type = IXL_PF_QALLOC_CONTIGUOUS;
qtag->qidx[0] = block_start;
qtag->num_allocated = num;
qtag->num_active = alloc_size;
qtag->num_allocated = alloc_size;
qtag->num_active = num;
return (0);
}

File diff suppressed because it is too large Load Diff

View File

@ -38,8 +38,7 @@
#include "ixlv_vc_mgr.h"
#define IXLV_AQ_MAX_ERR 30
#define IXLV_MAX_INIT_WAIT 120
#define IXLV_AQ_MAX_ERR 200
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
@ -79,6 +78,8 @@
"\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \
"\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF"
static MALLOC_DEFINE(M_IXLV, "ixlv", "ixlv driver allocations");
/* Driver state */
enum ixlv_state_t {
IXLV_START,
@ -144,9 +145,10 @@ struct ixlv_sc {
u32 qbase;
u32 admvec;
struct timeout_task timeout;
#ifdef notyet
struct task aq_irq;
struct task aq_sched;
struct taskqueue *tq;
#endif
struct ixl_vsi vsi;
@ -186,7 +188,6 @@ struct ixlv_sc {
u8 aq_buffer[IXL_AQ_BUF_SZ];
};
#define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
/*
** This checks for a zero mac addr, something that will be likely
** unless the Admin on the Host has created one.
@ -205,6 +206,8 @@ ixlv_check_ether_addr(u8 *addr)
/*
** VF Common function prototypes
*/
void ixlv_if_init(if_ctx_t ctx);
int ixlv_send_api_ver(struct ixlv_sc *);
int ixlv_verify_api_ver(struct ixlv_sc *);
int ixlv_send_vf_config_msg(struct ixlv_sc *);

View File

@ -386,7 +386,9 @@ ixlv_configure_queues(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
+ if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
+ struct ixl_tx_queue *tx_que = vsi->tx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
struct tx_ring *txr;
struct rx_ring *rxr;
int len, pairs;
@ -394,7 +396,9 @@ ixlv_configure_queues(struct ixlv_sc *sc)
struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi;
pairs = vsi->num_queues;
+ /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX
+ * queues of a pair need to be configured */
+ pairs = max(vsi->num_tx_queues, vsi->num_rx_queues);
len = sizeof(struct virtchnl_vsi_queue_config_info) +
(sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
@ -409,25 +413,24 @@ ixlv_configure_queues(struct ixlv_sc *sc)
/* Size check is not needed here - HW max is 16 queue pairs, and we
* can fit info for 31 of them into the AQ buffer before it overflows.
*/
for (int i = 0; i < pairs; i++, que++, vqpi++) {
txr = &que->txr;
rxr = &que->rxr;
+ for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) {
+ txr = &tx_que->txr;
+ rxr = &rx_que->rxr;
+
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = que->num_tx_desc;
vqpi->txq.dma_ring_addr = txr->dma.pa;
+ vqpi->txq.ring_len = scctx->isc_ntxd[0];
+ vqpi->txq.dma_ring_addr = txr->tx_paddr;
/* Enable Head writeback */
if (vsi->enable_head_writeback) {
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = txr->dma.pa +
(que->num_tx_desc * sizeof(struct i40e_tx_desc));
}
vqpi->txq.headwb_enabled = 0;
vqpi->txq.dma_headwb_addr = 0;
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = que->num_rx_desc;
vqpi->rxq.dma_ring_addr = rxr->dma.pa;
vqpi->rxq.max_pkt_size = vsi->max_frame_size;
+ vqpi->rxq.ring_len = scctx->isc_nrxd[0];
+ vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
+ vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size;
+ // TODO: Get this value from iflib, somehow
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
vqpi->rxq.splithdr_enabled = 0;
}
@ -448,6 +451,8 @@ ixlv_enable_queues(struct ixlv_sc *sc)
struct virtchnl_queue_select vqs;
vqs.vsi_id = sc->vsi_res->vsi_id;
+ /* XXX: In Linux PF, as long as neither of these is 0,
+ * every queue in VF VSI is enabled. */
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
@ -465,6 +470,8 @@ ixlv_disable_queues(struct ixlv_sc *sc)
struct virtchnl_queue_select vqs;
vqs.vsi_id = sc->vsi_res->vsi_id;
+ /* XXX: In Linux PF, as long as neither of these is 0,
+ * every queue in VF VSI is disabled. */
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
@ -483,27 +490,33 @@ ixlv_map_queues(struct ixlv_sc *sc)
struct virtchnl_irq_map_info *vm;
int i, q, len;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
+ if_softc_ctx_t scctx = vsi->shared;
+ device_t dev = sc->dev;
+
+ // XXX: What happens if we only get 1 MSI-X vector?
+ MPASS(scctx->isc_vectors > 1);
/* How many queue vectors, adminq uses one */
q = sc->msix - 1;
+ // XXX: How do we know how many interrupt vectors we have?
+ q = scctx->isc_vectors - 1;
len = sizeof(struct virtchnl_irq_map_info) +
(sc->msix * sizeof(struct virtchnl_vector_map));
+ (scctx->isc_vectors * sizeof(struct i40e_virtchnl_vector_map));
vm = malloc(len, M_DEVBUF, M_NOWAIT);
if (!vm) {
printf("%s: unable to allocate memory\n", __func__);
+ device_printf(dev, "%s: unable to allocate memory\n", __func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
vm->num_vectors = sc->msix;
+ vm->num_vectors = scctx->isc_vectors;
/* Queue vectors first */
for (i = 0; i < q; i++, que++) {
+ for (i = 0; i < q; i++, rx_que++) {
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
vm->vecmap[i].vector_id = i + 1; /* first is adminq */
vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << que->me);
// vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me);
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 1;
}
@ -811,8 +824,10 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
uint64_t tx_discards;
tx_discards = es->tx_discards;
#if 0
for (int i = 0; i < vsi->num_queues; i++)
tx_discards += sc->vsi.queues[i].txr.br->br_drops;
#endif
/* Update ifnet stats */
IXL_SET_IPACKETS(vsi, es->rx_unicast +
@ -875,8 +890,12 @@ void
ixlv_set_rss_hena(struct ixlv_sc *sc)
{
struct virtchnl_rss_hena hena;
+ struct i40e_hw *hw = &sc->hw;
hena.hena = IXL_DEFAULT_RSS_HENA_X722;
+ if (hw->mac.type == I40E_MAC_X722_VF)
+ hena.hena = IXL_DEFAULT_RSS_HENA_X722;
+ else
+ hena.hena = IXL_DEFAULT_RSS_HENA_XL710;
ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&hena, sizeof(hena));
@ -912,9 +931,9 @@ ixlv_config_rss_lut(struct ixlv_sc *sc)
* num_queues.)
*/
que_id = rss_get_indirection_to_bucket(i);
que_id = que_id % sc->vsi.num_queues;
+ que_id = que_id % sc->vsi.num_rx_queues;
#else
que_id = i % sc->vsi.num_queues;
+ que_id = i % sc->vsi.num_rx_queues;
#endif
lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
rss_lut_msg->lut[i] = lut;
@ -961,9 +980,9 @@ ixlv_vc_completion(struct ixlv_sc *sc,
case VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
sc->init_state = IXLV_RESET_PENDING;
mtx_unlock(&sc->mtx);
ixlv_init(vsi);
mtx_lock(&sc->mtx);
+ // mtx_unlock(&sc->mtx);
+ ixlv_if_init(sc->vsi.ctx);
+ // mtx_lock(&sc->mtx);
break;
default:
device_printf(dev, "%s: Unknown event %d from AQ\n",
@ -1023,7 +1042,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* Turn on all interrupts */
ixlv_enable_intr(vsi);
/* And inform the stack we're ready */
vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
// vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
/* TODO: Clear a state flag, so we know we're ready to run init again */
}
break;
@ -1161,7 +1180,6 @@ ixl_vc_cmd_timeout(void *arg)
{
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
IXLV_CORE_LOCK_ASSERT(mgr->sc);
ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
}
@ -1170,7 +1188,6 @@ ixl_vc_cmd_retry(void *arg)
{
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
IXLV_CORE_LOCK_ASSERT(mgr->sc);
ixl_vc_send_current(mgr);
}
@ -1213,8 +1230,6 @@ void
ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
uint32_t req, ixl_vc_callback_t *callback, void *arg)
{
IXLV_CORE_LOCK_ASSERT(mgr->sc);
if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
if (mgr->current == cmd)
mgr->current = NULL;
@ -1236,7 +1251,6 @@ ixl_vc_flush(struct ixl_vc_mgr *mgr)
{
struct ixl_vc_cmd *cmd;
IXLV_CORE_LOCK_ASSERT(mgr->sc);
KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
("ixlv: pending commands waiting but no command in progress"));

View File

@ -206,7 +206,6 @@ SUBDIR= \
${_ix} \
${_ixv} \
${_ixl} \
${_ixlv} \
jme \
joy \
kbdmux \

View File

@ -3,11 +3,10 @@
.PATH: ${SRCTOP}/sys/dev/ixl
KMOD = if_ixl
SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
SRCS += ixl_iw.c
SRCS.PCI_IOV= pci_iov_if.h ixl_pf_iov.c
SRCS.PCI_IOV = pci_iov_if.h ixl_pf_iov.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c i40e_dcb.c
@ -15,4 +14,7 @@ SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c i40e
# Debug messages / sysctls
# CFLAGS += -DIXL_DEBUG
#CFLAGS += -DIXL_IW
#SRCS += ixl_iw.c
.include <bsd.kmod.mk>

View File

@ -3,8 +3,8 @@
.PATH: ${SRCTOP}/sys/dev/ixl
KMOD = if_ixlv
SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source