Update the new 40G XL710 driver to Release version 1.0.0
This commit is contained in:
parent
e403cfdc57
commit
9d052f904b
@ -92,13 +92,23 @@
|
||||
#include "i40e_prototype.h"
|
||||
|
||||
#ifdef I40E_DEBUG
|
||||
#include <sys/sbuf.h>
|
||||
|
||||
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
|
||||
#define MAC_FORMAT_ARGS(mac_addr) \
|
||||
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
|
||||
(mac_addr)[4], (mac_addr)[5]
|
||||
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
|
||||
|
||||
#define DPRINTF(...) printf(__VA_ARGS__)
|
||||
#define DDPRINTF(dev, ...) device_printf(dev, __VA_ARGS__)
|
||||
#define IDPRINTF(ifp, ...) if_printf(ifp, __VA_ARGS__)
|
||||
|
||||
// static void i40e_dump_desc(void *, u8, u16);
|
||||
#else
|
||||
#define DPRINTF(...)
|
||||
#define DDPRINTF(...)
|
||||
#define IDPRINTF(...)
|
||||
#endif
|
||||
|
||||
/* Tunables */
|
||||
@ -173,10 +183,21 @@
|
||||
#define I40E_ITR_NONE 3
|
||||
#define I40E_QUEUE_EOL 0x7FF
|
||||
#define I40E_MAX_FRAME 0x2600
|
||||
#define I40E_MAX_SEGS 32
|
||||
#define I40E_MAX_FILTERS 256 /* This is artificial */
|
||||
#define I40E_MAX_TX_SEGS 8
|
||||
#define I40E_MAX_TSO_SEGS 66
|
||||
#define I40E_SPARSE_CHAIN 6
|
||||
#define I40E_QUEUE_HUNG 0x80000000
|
||||
|
||||
/* ERJ: hardware can support ~1.5k filters between all functions */
|
||||
#define I40E_MAX_FILTERS 256
|
||||
#define I40E_MAX_TX_BUSY 10
|
||||
|
||||
#define I40E_NVM_VERSION_LO_SHIFT 0
|
||||
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
|
||||
#define I40E_NVM_VERSION_HI_SHIFT 12
|
||||
#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
|
||||
|
||||
|
||||
/*
|
||||
* Interrupt Moderation parameters
|
||||
*/
|
||||
@ -200,7 +221,9 @@
|
||||
/* used in the vlan field of the filter when not a vlan */
|
||||
#define I40E_VLAN_ANY -1
|
||||
|
||||
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
|
||||
#define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
|
||||
#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
|
||||
#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
|
||||
|
||||
/* Misc flags for i40e_vsi.flags */
|
||||
#define I40E_FLAGS_KEEP_TSO4 (1 << 0)
|
||||
@ -238,6 +261,7 @@ struct i40e_tx_buf {
|
||||
u32 eop_index;
|
||||
struct mbuf *m_head;
|
||||
bus_dmamap_t map;
|
||||
bus_dma_tag_t tag;
|
||||
};
|
||||
|
||||
struct i40e_rx_buf {
|
||||
@ -248,15 +272,6 @@ struct i40e_rx_buf {
|
||||
bus_dmamap_t pmap;
|
||||
};
|
||||
|
||||
struct i40e_pkt_info {
|
||||
u16 etype;
|
||||
u32 elen;
|
||||
u32 iplen;
|
||||
struct ip *ip;
|
||||
struct ip6_hdr *ip6;
|
||||
struct tcphdr *th;
|
||||
};
|
||||
|
||||
/*
|
||||
** This struct has multiple uses, multicast
|
||||
** addresses, vlans, and mac filters all use it.
|
||||
@ -275,7 +290,7 @@ struct i40e_mac_filter {
|
||||
struct tx_ring {
|
||||
struct i40e_queue *que;
|
||||
struct mtx mtx;
|
||||
int watchdog;
|
||||
u32 tail;
|
||||
struct i40e_tx_desc *base;
|
||||
struct i40e_dma_mem dma;
|
||||
u16 next_avail;
|
||||
@ -287,7 +302,8 @@ struct tx_ring {
|
||||
struct i40e_tx_buf *buffers;
|
||||
volatile u16 avail;
|
||||
u32 cmd;
|
||||
bus_dma_tag_t tag;
|
||||
bus_dma_tag_t tx_tag;
|
||||
bus_dma_tag_t tso_tag;
|
||||
char mtx_name[16];
|
||||
struct buf_ring *br;
|
||||
|
||||
@ -318,6 +334,7 @@ struct rx_ring {
|
||||
char mtx_name[16];
|
||||
struct i40e_rx_buf *buffers;
|
||||
u32 mbuf_sz;
|
||||
u32 tail;
|
||||
bus_dma_tag_t htag;
|
||||
bus_dma_tag_t ptag;
|
||||
|
||||
@ -407,6 +424,7 @@ struct i40e_vsi {
|
||||
u64 hw_filters_add;
|
||||
|
||||
/* Misc. */
|
||||
u64 active_queues;
|
||||
u64 flags;
|
||||
};
|
||||
|
||||
@ -433,8 +451,9 @@ i40e_get_filter(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_mac_filter *f;
|
||||
|
||||
// create a new empty filter
|
||||
f = malloc(sizeof(struct i40e_mac_filter) , M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
/* create a new empty filter */
|
||||
f = malloc(sizeof(struct i40e_mac_filter),
|
||||
M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
SLIST_INSERT_HEAD(&vsi->ftl, f, next);
|
||||
|
||||
return (f);
|
||||
@ -467,6 +486,25 @@ struct i40e_sysctl_info {
|
||||
|
||||
extern int i40e_atr_rate;
|
||||
|
||||
/*
|
||||
** i40e_fw_version_str - format the FW and NVM version strings
|
||||
*/
|
||||
static inline char *
|
||||
i40e_fw_version_str(struct i40e_hw *hw)
|
||||
{
|
||||
static char buf[32];
|
||||
|
||||
snprintf(buf, sizeof(buf),
|
||||
"f%d.%d a%d.%d n%02x.%02x e%08x",
|
||||
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
|
||||
hw->aq.api_maj_ver, hw->aq.api_min_ver,
|
||||
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
|
||||
I40E_NVM_VERSION_HI_SHIFT,
|
||||
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
|
||||
I40E_NVM_VERSION_LO_SHIFT,
|
||||
hw->nvm.eetrack);
|
||||
return buf;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
* TXRX Function prototypes
|
||||
|
@ -61,16 +61,37 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
|
||||
hw->aq.asq.tail = I40E_VF_ATQT1;
|
||||
hw->aq.asq.head = I40E_VF_ATQH1;
|
||||
hw->aq.asq.len = I40E_VF_ATQLEN1;
|
||||
hw->aq.asq.bal = I40E_VF_ATQBAL1;
|
||||
hw->aq.asq.bah = I40E_VF_ATQBAH1;
|
||||
hw->aq.arq.tail = I40E_VF_ARQT1;
|
||||
hw->aq.arq.head = I40E_VF_ARQH1;
|
||||
hw->aq.arq.len = I40E_VF_ARQLEN1;
|
||||
hw->aq.arq.bal = I40E_VF_ARQBAL1;
|
||||
hw->aq.arq.bah = I40E_VF_ARQBAH1;
|
||||
#ifdef I40E_QV
|
||||
} else if (hw->aq_dbg_ena) {
|
||||
hw->aq.asq.tail = I40E_GL_ATQT;
|
||||
hw->aq.asq.head = I40E_GL_ATQH;
|
||||
hw->aq.asq.len = I40E_GL_ATQLEN;
|
||||
hw->aq.asq.bal = I40E_GL_ATQBAL;
|
||||
hw->aq.asq.bah = I40E_GL_ATQBAH;
|
||||
hw->aq.arq.tail = I40E_GL_ARQT;
|
||||
hw->aq.arq.head = I40E_GL_ARQH;
|
||||
hw->aq.arq.len = I40E_GL_ARQLEN;
|
||||
hw->aq.arq.bal = I40E_GL_ARQBAL;
|
||||
hw->aq.arq.bah = I40E_GL_ARQBAH;
|
||||
#endif
|
||||
} else {
|
||||
hw->aq.asq.tail = I40E_PF_ATQT;
|
||||
hw->aq.asq.head = I40E_PF_ATQH;
|
||||
hw->aq.asq.len = I40E_PF_ATQLEN;
|
||||
hw->aq.asq.bal = I40E_PF_ATQBAL;
|
||||
hw->aq.asq.bah = I40E_PF_ATQBAH;
|
||||
hw->aq.arq.tail = I40E_PF_ARQT;
|
||||
hw->aq.arq.head = I40E_PF_ARQH;
|
||||
hw->aq.arq.len = I40E_PF_ARQLEN;
|
||||
hw->aq.arq.bal = I40E_PF_ARQBAL;
|
||||
hw->aq.arq.bah = I40E_PF_ARQBAH;
|
||||
}
|
||||
}
|
||||
|
||||
@ -148,6 +169,10 @@ void i40e_free_adminq_arq(struct i40e_hw *hw)
|
||||
**/
|
||||
static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
|
||||
{
|
||||
#ifdef I40E_QV
|
||||
struct i40e_aq_desc qv_desc;
|
||||
struct i40e_aq_desc *qv_desc_on_ring;
|
||||
#endif
|
||||
enum i40e_status_code ret_code;
|
||||
struct i40e_aq_desc *desc;
|
||||
struct i40e_dma_mem *bi;
|
||||
@ -176,6 +201,13 @@ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
|
||||
|
||||
/* now configure the descriptors for use */
|
||||
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
|
||||
#ifdef I40E_QV
|
||||
/* swap the descriptor with userspace version */
|
||||
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
qv_desc_on_ring = desc;
|
||||
desc = &qv_desc;
|
||||
#endif
|
||||
|
||||
desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
|
||||
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
|
||||
@ -194,6 +226,11 @@ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
|
||||
CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
|
||||
desc->params.external.param0 = 0;
|
||||
desc->params.external.param1 = 0;
|
||||
#ifdef I40E_QV
|
||||
/* put the initialized descriptor back to the ring */
|
||||
i40e_memcpy(qv_desc_on_ring, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_NONDMA_TO_DMA);
|
||||
#endif
|
||||
}
|
||||
|
||||
alloc_arq_bufs:
|
||||
@ -306,27 +343,14 @@ static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.asq.head, 0);
|
||||
wr32(hw, hw->aq.asq.tail, 0);
|
||||
|
||||
if (hw->mac.type == I40E_MAC_VF) {
|
||||
/* configure the transmit queue */
|
||||
wr32(hw, I40E_VF_ATQBAH1,
|
||||
I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ATQBAL1,
|
||||
I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
|
||||
I40E_VF_ATQLEN1_ATQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_VF_ATQBAL1);
|
||||
} else {
|
||||
/* configure the transmit queue */
|
||||
wr32(hw, I40E_PF_ATQBAH,
|
||||
I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ATQBAL,
|
||||
I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
|
||||
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_PF_ATQBAL);
|
||||
}
|
||||
/* set starting point */
|
||||
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
|
||||
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
||||
wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
|
||||
wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
|
||||
|
||||
/* Check one register to verify that config was applied */
|
||||
reg = rd32(hw, hw->aq.asq.bal);
|
||||
if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
|
||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
||||
|
||||
@ -348,30 +372,17 @@ static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
|
||||
wr32(hw, hw->aq.arq.head, 0);
|
||||
wr32(hw, hw->aq.arq.tail, 0);
|
||||
|
||||
if (hw->mac.type == I40E_MAC_VF) {
|
||||
/* configure the receive queue */
|
||||
wr32(hw, I40E_VF_ARQBAH1,
|
||||
I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ARQBAL1,
|
||||
I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
|
||||
I40E_VF_ARQLEN1_ARQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_VF_ARQBAL1);
|
||||
} else {
|
||||
/* configure the receive queue */
|
||||
wr32(hw, I40E_PF_ARQBAH,
|
||||
I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ARQBAL,
|
||||
I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
|
||||
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
||||
reg = rd32(hw, I40E_PF_ARQBAL);
|
||||
}
|
||||
/* set starting point */
|
||||
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
|
||||
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
||||
wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
|
||||
wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
|
||||
|
||||
/* Update tail in the HW to post pre-allocated buffers */
|
||||
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
|
||||
|
||||
/* Check one register to verify that config was applied */
|
||||
reg = rd32(hw, hw->aq.arq.bal);
|
||||
if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
|
||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
||||
|
||||
@ -510,9 +521,22 @@ enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
|
||||
return I40E_ERR_NOT_READY;
|
||||
|
||||
/* Stop firmware AdminQ processing */
|
||||
#ifdef I40E_QV
|
||||
/* Do not reset registers, as Tools AQ is shared resource for QV */
|
||||
if (!hw->aq_dbg_ena) {
|
||||
wr32(hw, hw->aq.asq.head, 0);
|
||||
wr32(hw, hw->aq.asq.tail, 0);
|
||||
wr32(hw, hw->aq.asq.len, 0);
|
||||
wr32(hw, hw->aq.asq.bal, 0);
|
||||
wr32(hw, hw->aq.asq.bah, 0);
|
||||
}
|
||||
#else
|
||||
wr32(hw, hw->aq.asq.head, 0);
|
||||
wr32(hw, hw->aq.asq.tail, 0);
|
||||
wr32(hw, hw->aq.asq.len, 0);
|
||||
wr32(hw, hw->aq.asq.bal, 0);
|
||||
wr32(hw, hw->aq.asq.bah, 0);
|
||||
#endif
|
||||
|
||||
/* make sure spinlock is available */
|
||||
i40e_acquire_spinlock(&hw->aq.asq_spinlock);
|
||||
@ -541,9 +565,22 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
|
||||
return I40E_ERR_NOT_READY;
|
||||
|
||||
/* Stop firmware AdminQ processing */
|
||||
#ifdef I40E_QV
|
||||
/* Do not reset registers, as Tools AQ is shared resource for QV */
|
||||
if (!hw->aq_dbg_ena) {
|
||||
wr32(hw, hw->aq.arq.head, 0);
|
||||
wr32(hw, hw->aq.arq.tail, 0);
|
||||
wr32(hw, hw->aq.arq.len, 0);
|
||||
wr32(hw, hw->aq.arq.bal, 0);
|
||||
wr32(hw, hw->aq.arq.bah, 0);
|
||||
}
|
||||
#else
|
||||
wr32(hw, hw->aq.arq.head, 0);
|
||||
wr32(hw, hw->aq.arq.tail, 0);
|
||||
wr32(hw, hw->aq.arq.len, 0);
|
||||
wr32(hw, hw->aq.arq.bal, 0);
|
||||
wr32(hw, hw->aq.arq.bah, 0);
|
||||
#endif
|
||||
|
||||
/* make sure spinlock is available */
|
||||
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
|
||||
@ -591,6 +628,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
|
||||
/* Set up register offsets */
|
||||
i40e_adminq_init_regs(hw);
|
||||
|
||||
/* setup ASQ command write back timeout */
|
||||
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
|
||||
|
||||
/* allocate the ASQ */
|
||||
ret_code = i40e_init_asq(hw);
|
||||
if (ret_code != I40E_SUCCESS)
|
||||
@ -627,16 +667,19 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
|
||||
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
|
||||
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
|
||||
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
|
||||
!((hw->aq.api_min_ver == I40E_FW_API_VERSION_MINOR) ||
|
||||
(hw->aq.api_min_ver == I40E_FW_API_VERSION_A0_MINOR))) {
|
||||
#ifdef I40E_QV
|
||||
if (!hw->qv_force_init) {
|
||||
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
|
||||
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
|
||||
goto init_adminq_free_arq;
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
|
||||
#endif
|
||||
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
|
||||
goto init_adminq_free_arq;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* pre-emptive resource lock release */
|
||||
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
|
||||
@ -671,8 +714,16 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
|
||||
{
|
||||
enum i40e_status_code ret_code = I40E_SUCCESS;
|
||||
|
||||
#ifdef I40E_QV
|
||||
/* This command is not supported for Tools AQ */
|
||||
if (!hw->aq_dbg_ena) {
|
||||
if (i40e_check_asq_alive(hw))
|
||||
i40e_aq_queue_shutdown(hw, TRUE);
|
||||
}
|
||||
#else
|
||||
if (i40e_check_asq_alive(hw))
|
||||
i40e_aq_queue_shutdown(hw, TRUE);
|
||||
#endif
|
||||
|
||||
i40e_shutdown_asq(hw);
|
||||
i40e_shutdown_arq(hw);
|
||||
@ -692,6 +743,10 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
|
||||
**/
|
||||
u16 i40e_clean_asq(struct i40e_hw *hw)
|
||||
{
|
||||
#ifdef I40E_QV
|
||||
struct i40e_aq_desc qv_desc = {0};
|
||||
struct i40e_aq_desc *qv_desc_on_ring;
|
||||
#endif /* I40E_QV */
|
||||
struct i40e_adminq_ring *asq = &(hw->aq.asq);
|
||||
struct i40e_asq_cmd_details *details;
|
||||
u16 ntc = asq->next_to_clean;
|
||||
@ -700,6 +755,13 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
|
||||
|
||||
desc = I40E_ADMINQ_DESC(*asq, ntc);
|
||||
details = I40E_ADMINQ_DETAILS(*asq, ntc);
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from ring to userspace buffer */
|
||||
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
qv_desc_on_ring = desc;
|
||||
desc = &qv_desc;
|
||||
#endif /* I40E_QV */
|
||||
while (rd32(hw, hw->aq.asq.head) != ntc) {
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
"%s: ntc %d head %d.\n", __FUNCTION__, ntc,
|
||||
@ -714,11 +776,23 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
|
||||
}
|
||||
i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
|
||||
i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from userspace buffer to ring */
|
||||
i40e_memcpy(qv_desc_on_ring, desc,
|
||||
sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA);
|
||||
#endif /* I40E_QV */
|
||||
ntc++;
|
||||
if (ntc == asq->count)
|
||||
ntc = 0;
|
||||
desc = I40E_ADMINQ_DESC(*asq, ntc);
|
||||
details = I40E_ADMINQ_DETAILS(*asq, ntc);
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from ring to userspace buffer */
|
||||
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
qv_desc_on_ring = desc;
|
||||
desc = &qv_desc;
|
||||
#endif /* I40E_QV */
|
||||
}
|
||||
|
||||
asq->next_to_clean = ntc;
|
||||
@ -759,6 +833,10 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
u16 buff_size,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
#ifdef I40E_QV
|
||||
struct i40e_aq_desc qv_desc = {0};
|
||||
struct i40e_aq_desc *qv_desc_on_ring;
|
||||
#endif /* I40E_QV */
|
||||
enum i40e_status_code status = I40E_SUCCESS;
|
||||
struct i40e_dma_mem *dma_buff = NULL;
|
||||
struct i40e_asq_cmd_details *details;
|
||||
@ -855,6 +933,13 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
/* if the desc is available copy the temp desc to the right place */
|
||||
i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_NONDMA_TO_DMA);
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from ring to userspace buffer */
|
||||
i40e_memcpy(&qv_desc, desc_on_ring, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
qv_desc_on_ring = desc_on_ring;
|
||||
desc_on_ring = &qv_desc;
|
||||
#endif /* I40E_QV */
|
||||
|
||||
/* if buff is not NULL assume indirect command */
|
||||
if (buff != NULL) {
|
||||
@ -871,11 +956,17 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
|
||||
desc_on_ring->params.external.addr_low =
|
||||
CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from userspace buffer to ring */
|
||||
i40e_memcpy(qv_desc_on_ring, desc_on_ring,
|
||||
sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA);
|
||||
#endif /* I40E_QV */
|
||||
}
|
||||
|
||||
/* bump the tail */
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
|
||||
buff, buff_size);
|
||||
(hw->aq.asq.next_to_use)++;
|
||||
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
|
||||
hw->aq.asq.next_to_use = 0;
|
||||
@ -890,6 +981,11 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
u32 delay_len = 10;
|
||||
|
||||
do {
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from ring to user buffer */
|
||||
i40e_memcpy(desc_on_ring, qv_desc_on_ring,
|
||||
sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
|
||||
#endif /* I40E_QV */
|
||||
/* AQ designers suggest use of head for better
|
||||
* timing reliability than DD bit
|
||||
*/
|
||||
@ -898,11 +994,15 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
/* ugh! delay while spin_lock */
|
||||
i40e_usec_delay(delay_len);
|
||||
total_delay += delay_len;
|
||||
} while (total_delay < I40E_ASQ_CMD_TIMEOUT);
|
||||
} while (total_delay < hw->aq.asq_cmd_timeout);
|
||||
}
|
||||
|
||||
/* if ready, copy the desc back to temp */
|
||||
if (i40e_asq_done(hw)) {
|
||||
#ifdef I40E_QV
|
||||
/* Swap pointer back */
|
||||
desc_on_ring = qv_desc_on_ring;
|
||||
#endif /* I40E_QV */
|
||||
i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
if (buff != NULL)
|
||||
@ -926,11 +1026,9 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
|
||||
}
|
||||
|
||||
if (desc->datalen == buff_size) {
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
"AQTX: desc and buffer writeback:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
|
||||
}
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||
"AQTX: desc and buffer writeback:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
|
||||
|
||||
/* update the error if time out occurred */
|
||||
if ((!cmd_completed) &&
|
||||
@ -981,6 +1079,10 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
struct i40e_arq_event_info *e,
|
||||
u16 *pending)
|
||||
{
|
||||
#ifdef I40E_QV
|
||||
struct i40e_aq_desc qv_desc = {0};
|
||||
struct i40e_aq_desc *qv_desc_on_ring;
|
||||
#endif /* I40E_QV */
|
||||
enum i40e_status_code ret_code = I40E_SUCCESS;
|
||||
u16 ntc = hw->aq.arq.next_to_clean;
|
||||
struct i40e_aq_desc *desc;
|
||||
@ -1006,6 +1108,13 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
|
||||
/* now clean the next descriptor */
|
||||
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from ring to userspace buffer */
|
||||
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
qv_desc_on_ring = desc;
|
||||
desc = &qv_desc;
|
||||
#endif /* I40E_QV */
|
||||
desc_idx = ntc;
|
||||
|
||||
flags = LE16_TO_CPU(desc->flags);
|
||||
@ -1017,19 +1126,20 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
I40E_DEBUG_AQ_MESSAGE,
|
||||
"AQRX: Event received with error 0x%X.\n",
|
||||
hw->aq.arq_last_status);
|
||||
} else {
|
||||
i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
datalen = LE16_TO_CPU(desc->datalen);
|
||||
e->msg_size = min(datalen, e->msg_size);
|
||||
if (e->msg_buf != NULL && (e->msg_size != 0))
|
||||
i40e_memcpy(e->msg_buf,
|
||||
hw->aq.arq.r.arq_bi[desc_idx].va,
|
||||
e->msg_size, I40E_DMA_TO_NONDMA);
|
||||
}
|
||||
|
||||
i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
|
||||
I40E_DMA_TO_NONDMA);
|
||||
datalen = LE16_TO_CPU(desc->datalen);
|
||||
e->msg_size = min(datalen, e->msg_size);
|
||||
if (e->msg_buf != NULL && (e->msg_size != 0))
|
||||
i40e_memcpy(e->msg_buf,
|
||||
hw->aq.arq.r.arq_bi[desc_idx].va,
|
||||
e->msg_size, I40E_DMA_TO_NONDMA);
|
||||
|
||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
|
||||
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
|
||||
hw->aq.arq_buf_size);
|
||||
|
||||
/* Restore the original datalen and buffer address in the desc,
|
||||
* FW updates datalen to indicate the event message
|
||||
@ -1044,6 +1154,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
desc->datalen = CPU_TO_LE16((u16)bi->size);
|
||||
desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
|
||||
desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
|
||||
#ifdef I40E_QV
|
||||
/* copy the descriptor from userspace buffer to ring */
|
||||
i40e_memcpy(qv_desc_on_ring, desc,
|
||||
sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA);
|
||||
#endif /* I40E_QV */
|
||||
|
||||
/* set tail = the last cleaned desc index. */
|
||||
wr32(hw, hw->aq.arq.tail, ntc);
|
||||
|
@ -64,6 +64,8 @@ struct i40e_adminq_ring {
|
||||
u32 head;
|
||||
u32 tail;
|
||||
u32 len;
|
||||
u32 bah;
|
||||
u32 bal;
|
||||
};
|
||||
|
||||
/* ASQ transaction details */
|
||||
@ -90,6 +92,7 @@ struct i40e_arq_event_info {
|
||||
struct i40e_adminq_info {
|
||||
struct i40e_adminq_ring arq; /* receive queue */
|
||||
struct i40e_adminq_ring asq; /* send queue */
|
||||
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
|
||||
u16 num_arq_entries; /* receive queue depth */
|
||||
u16 num_asq_entries; /* send queue depth */
|
||||
u16 arq_buf_size; /* receive queue buffer size */
|
||||
@ -110,8 +113,8 @@ struct i40e_adminq_info {
|
||||
};
|
||||
|
||||
/* general information */
|
||||
#define I40E_AQ_LARGE_BUF 512
|
||||
#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
|
||||
#define I40E_AQ_LARGE_BUF 512
|
||||
#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
|
||||
|
||||
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
|
||||
u16 opcode);
|
||||
|
@ -43,9 +43,6 @@
|
||||
|
||||
#define I40E_FW_API_VERSION_MAJOR 0x0001
|
||||
#define I40E_FW_API_VERSION_MINOR 0x0002
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
#define I40E_FW_API_VERSION_A0_MINOR 0x0000
|
||||
#endif
|
||||
|
||||
struct i40e_aq_desc {
|
||||
__le16 flags;
|
||||
@ -698,9 +695,6 @@ struct i40e_aqc_add_get_update_vsi {
|
||||
#define I40E_AQ_VSI_TYPE_PF 0x2
|
||||
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
|
||||
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
|
||||
#endif
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
};
|
||||
@ -1223,11 +1217,6 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
|
||||
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
|
||||
#endif
|
||||
/* 0x0000 reserved */
|
||||
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
|
||||
/* 0x0002 reserved */
|
||||
@ -2012,22 +2001,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
|
||||
|
||||
/* Add Udp Tunnel command and completion (direct 0x0B00) */
|
||||
struct i40e_aqc_add_udp_tunnel {
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
__le16 udp_port;
|
||||
u8 header_len; /* in DWords, 1 to 15 */
|
||||
u8 protocol_type;
|
||||
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
|
||||
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
|
||||
#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
|
||||
u8 variable_udp_length;
|
||||
#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
|
||||
#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
|
||||
u8 udp_key_index;
|
||||
#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
|
||||
#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
|
||||
#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
|
||||
u8 reserved[10];
|
||||
#else
|
||||
__le16 udp_port;
|
||||
u8 reserved0[3];
|
||||
u8 protocol_type;
|
||||
@ -2035,7 +2008,6 @@ struct i40e_aqc_add_udp_tunnel {
|
||||
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
|
||||
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
|
||||
u8 reserved1[10];
|
||||
#endif
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
|
||||
@ -2056,13 +2028,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
|
||||
struct i40e_aqc_remove_udp_tunnel {
|
||||
u8 reserved[2];
|
||||
u8 index; /* 0 to 15 */
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
u8 pf_filters;
|
||||
u8 total_filters;
|
||||
u8 reserved2[11];
|
||||
#else
|
||||
u8 reserved2[13];
|
||||
#endif
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
|
||||
@ -2072,37 +2038,13 @@ struct i40e_aqc_del_udp_tunnel_completion {
|
||||
u8 index; /* 0 to 15 */
|
||||
u8 multiple_pfs;
|
||||
u8 total_filters_used;
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
u8 reserved;
|
||||
u8 tunnels_free;
|
||||
u8 reserved1[9];
|
||||
#else
|
||||
u8 reserved1[11];
|
||||
#endif
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
|
||||
|
||||
/* tunnel key structure 0x0B10 */
|
||||
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
struct i40e_aqc_tunnel_key_structure_A0 {
|
||||
__le16 key1_off;
|
||||
__le16 key1_len;
|
||||
__le16 key2_off;
|
||||
__le16 key2_len;
|
||||
__le16 flags;
|
||||
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
|
||||
/* response flags */
|
||||
#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
|
||||
#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
|
||||
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
|
||||
u8 resreved[6];
|
||||
};
|
||||
|
||||
I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
|
||||
|
||||
#endif
|
||||
struct i40e_aqc_tunnel_key_structure {
|
||||
u8 key1_off;
|
||||
u8 key2_off;
|
||||
|
@ -52,9 +52,6 @@ static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
|
||||
|
||||
if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
|
||||
switch (hw->device_id) {
|
||||
#if defined(FORTVILLE_A0_SUPPORT) || defined(I40E_FPGA_SUPPORT)
|
||||
case I40E_DEV_ID_FPGA_A:
|
||||
#endif
|
||||
case I40E_DEV_ID_SFP_XL710:
|
||||
case I40E_DEV_ID_QEMU:
|
||||
case I40E_DEV_ID_KX_A:
|
||||
@ -63,9 +60,6 @@ static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
|
||||
case I40E_DEV_ID_QSFP_A:
|
||||
case I40E_DEV_ID_QSFP_B:
|
||||
case I40E_DEV_ID_QSFP_C:
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
case I40E_DEV_ID_10G_BASE_T:
|
||||
#endif
|
||||
hw->mac.type = I40E_MAC_XL710;
|
||||
break;
|
||||
case I40E_DEV_ID_VF:
|
||||
@ -91,13 +85,15 @@ static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
|
||||
* @mask: debug mask
|
||||
* @desc: pointer to admin queue descriptor
|
||||
* @buffer: pointer to command buffer
|
||||
* @buf_len: max length of buffer
|
||||
*
|
||||
* Dumps debug log about adminq command with descriptor contents.
|
||||
**/
|
||||
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
|
||||
void *buffer)
|
||||
void *buffer, u16 buf_len)
|
||||
{
|
||||
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
|
||||
u16 len = LE16_TO_CPU(aq_desc->datalen);
|
||||
u8 *aq_buffer = (u8 *)buffer;
|
||||
u32 data[4];
|
||||
u32 i = 0;
|
||||
@ -121,7 +117,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
|
||||
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
|
||||
i40e_memset(data, 0, sizeof(data), I40E_NONDMA_MEM);
|
||||
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
|
||||
for (i = 0; i < LE16_TO_CPU(aq_desc->datalen); i++) {
|
||||
if (buf_len < len)
|
||||
len = buf_len;
|
||||
for (i = 0; i < len; i++) {
|
||||
data[((i % 16) / 4)] |=
|
||||
((u32)aq_buffer[i]) << (8 * (i % 4));
|
||||
if ((i % 16) == 15) {
|
||||
@ -572,7 +570,6 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
|
||||
break;
|
||||
default:
|
||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
||||
break;
|
||||
}
|
||||
|
||||
hw->phy.get_link_info = TRUE;
|
||||
@ -712,8 +709,10 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
|
||||
u32 reg_block = 0;
|
||||
u32 reg_val;
|
||||
|
||||
if (abs_queue_idx >= 128)
|
||||
if (abs_queue_idx >= 128) {
|
||||
reg_block = abs_queue_idx / 128;
|
||||
abs_queue_idx %= 128;
|
||||
}
|
||||
|
||||
reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
|
||||
reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
|
||||
@ -762,6 +761,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
|
||||
switch (hw->phy.link_info.phy_type) {
|
||||
case I40E_PHY_TYPE_10GBASE_SR:
|
||||
case I40E_PHY_TYPE_10GBASE_LR:
|
||||
case I40E_PHY_TYPE_1000BASE_SX:
|
||||
case I40E_PHY_TYPE_1000BASE_LX:
|
||||
case I40E_PHY_TYPE_40GBASE_SR4:
|
||||
case I40E_PHY_TYPE_40GBASE_LR4:
|
||||
media = I40E_MEDIA_TYPE_FIBER;
|
||||
@ -797,11 +798,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
|
||||
return media;
|
||||
}
|
||||
|
||||
#ifndef FORTVILLE_A0_SUPPORT
|
||||
#define I40E_PF_RESET_WAIT_COUNT 100
|
||||
#else
|
||||
#define I40E_PF_RESET_WAIT_COUNT 200
|
||||
#endif
|
||||
/**
|
||||
* i40e_pf_reset - Reset the PF
|
||||
* @hw: pointer to the hardware structure
|
||||
@ -877,6 +874,99 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
|
||||
return I40E_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_clear_hw - clear out any left over hw state
|
||||
* @hw: pointer to the hw struct
|
||||
*
|
||||
* Clear queues and interrupts, typically called at init time,
|
||||
* but after the capabilities have been found so we know how many
|
||||
* queues and msix vectors have been allocated.
|
||||
**/
|
||||
void i40e_clear_hw(struct i40e_hw *hw)
|
||||
{
|
||||
u32 num_queues, base_queue;
|
||||
u32 num_pf_int;
|
||||
u32 num_vf_int;
|
||||
u32 num_vfs;
|
||||
u32 i, j;
|
||||
u32 val;
|
||||
u32 eol = 0x7ff;
|
||||
|
||||
/* get number of interrupts, queues, and vfs */
|
||||
val = rd32(hw, I40E_GLPCI_CNF2);
|
||||
num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
|
||||
I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
|
||||
num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
|
||||
I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
|
||||
|
||||
val = rd32(hw, I40E_PFLAN_QALLOC);
|
||||
base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
|
||||
I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
|
||||
j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
|
||||
I40E_PFLAN_QALLOC_LASTQ_SHIFT;
|
||||
if (val & I40E_PFLAN_QALLOC_VALID_MASK)
|
||||
num_queues = (j - base_queue) + 1;
|
||||
else
|
||||
num_queues = 0;
|
||||
|
||||
val = rd32(hw, I40E_PF_VT_PFALLOC);
|
||||
i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
|
||||
I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
|
||||
j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
|
||||
I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
|
||||
if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
|
||||
num_vfs = (j - i) + 1;
|
||||
else
|
||||
num_vfs = 0;
|
||||
|
||||
/* stop all the interrupts */
|
||||
wr32(hw, I40E_PFINT_ICR0_ENA, 0);
|
||||
val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
|
||||
for (i = 0; i < num_pf_int - 2; i++)
|
||||
wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
|
||||
|
||||
/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
|
||||
val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
|
||||
wr32(hw, I40E_PFINT_LNKLST0, val);
|
||||
for (i = 0; i < num_pf_int - 2; i++)
|
||||
wr32(hw, I40E_PFINT_LNKLSTN(i), val);
|
||||
val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
|
||||
for (i = 0; i < num_vfs; i++)
|
||||
wr32(hw, I40E_VPINT_LNKLST0(i), val);
|
||||
for (i = 0; i < num_vf_int - 2; i++)
|
||||
wr32(hw, I40E_VPINT_LNKLSTN(i), val);
|
||||
|
||||
/* warn the HW of the coming Tx disables */
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
u32 abs_queue_idx = base_queue + i;
|
||||
u32 reg_block = 0;
|
||||
|
||||
if (abs_queue_idx >= 128) {
|
||||
reg_block = abs_queue_idx / 128;
|
||||
abs_queue_idx %= 128;
|
||||
}
|
||||
|
||||
val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
|
||||
val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
|
||||
val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
|
||||
val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
|
||||
|
||||
wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
|
||||
}
|
||||
i40e_usec_delay(400);
|
||||
|
||||
/* stop all the queues */
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
wr32(hw, I40E_QINT_TQCTL(i), 0);
|
||||
wr32(hw, I40E_QTX_ENA(i), 0);
|
||||
wr32(hw, I40E_QINT_RQCTL(i), 0);
|
||||
wr32(hw, I40E_QRX_ENA(i), 0);
|
||||
}
|
||||
|
||||
/* short wait for all queue disables to settle */
|
||||
i40e_usec_delay(50);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_clear_pxe_mode - clear pxe operations mode
|
||||
* @hw: pointer to the hw struct
|
||||
@ -886,16 +976,8 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
|
||||
**/
|
||||
void i40e_clear_pxe_mode(struct i40e_hw *hw)
|
||||
{
|
||||
#if defined(FORTVILLE_A0_SUPPORT) || defined(I40E_FPGA_SUPPORT)
|
||||
u32 reg;
|
||||
|
||||
/* Clear single descriptor fetch/write-back mode */
|
||||
reg = rd32(hw, I40E_GLLAN_RCTL_0);
|
||||
wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
|
||||
#else
|
||||
if (i40e_check_asq_alive(hw))
|
||||
i40e_aq_clear_pxe_mode(hw, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1120,7 +1202,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
|
||||
status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
|
||||
NULL);
|
||||
if (status) {
|
||||
*aq_failures |= I40E_SET_FC_AQ_FAIL_GET1;
|
||||
*aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1145,31 +1227,19 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
|
||||
|
||||
if (status)
|
||||
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
|
||||
|
||||
/* Get the abilities to set hw->fc.current_mode correctly */
|
||||
status = i40e_aq_get_phy_capabilities(hw, FALSE, false,
|
||||
&abilities, NULL);
|
||||
if (status) {
|
||||
/* Wait a little bit and try once more */
|
||||
i40e_msec_delay(1000);
|
||||
status = i40e_aq_get_phy_capabilities(hw, FALSE, false,
|
||||
&abilities, NULL);
|
||||
}
|
||||
if (status) {
|
||||
*aq_failures |= I40E_SET_FC_AQ_FAIL_GET2;
|
||||
return status;
|
||||
}
|
||||
}
|
||||
/* Copy the what was returned from get capabilities into fc */
|
||||
if ((abilities.abilities & I40E_AQ_PHY_FLAG_PAUSE_TX) &&
|
||||
(abilities.abilities & I40E_AQ_PHY_FLAG_PAUSE_RX))
|
||||
hw->fc.current_mode = I40E_FC_FULL;
|
||||
else if (abilities.abilities & I40E_AQ_PHY_FLAG_PAUSE_TX)
|
||||
hw->fc.current_mode = I40E_FC_TX_PAUSE;
|
||||
else if (abilities.abilities & I40E_AQ_PHY_FLAG_PAUSE_RX)
|
||||
hw->fc.current_mode = I40E_FC_RX_PAUSE;
|
||||
else
|
||||
hw->fc.current_mode = I40E_FC_NONE;
|
||||
/* Update the link info */
|
||||
status = i40e_update_link_info(hw, TRUE);
|
||||
if (status) {
|
||||
/* Wait a little bit (on 40G cards it sometimes takes a really
|
||||
* long time for link to come back from the atomic reset)
|
||||
* and try once more
|
||||
*/
|
||||
i40e_msec_delay(1000);
|
||||
status = i40e_update_link_info(hw, TRUE);
|
||||
}
|
||||
if (status)
|
||||
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1210,7 +1280,6 @@ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
|
||||
|
||||
return status;
|
||||
}
|
||||
#ifndef FORTVILLE_A0_SUPPORT
|
||||
|
||||
/**
|
||||
* i40e_aq_clear_pxe_mode
|
||||
@ -1238,17 +1307,17 @@ enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
|
||||
|
||||
return status;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* i40e_aq_set_link_restart_an
|
||||
* @hw: pointer to the hw struct
|
||||
* @enable_link: if TRUE: enable link, if FALSE: disable link
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Sets up the link and restarts the Auto-Negotiation over the link.
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
bool enable_link, struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_set_link_restart_an *cmd =
|
||||
@ -1259,6 +1328,10 @@ enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
|
||||
i40e_aqc_opc_set_link_restart_an);
|
||||
|
||||
cmd->command = I40E_AQ_PHY_RESTART_AN;
|
||||
if (enable_link)
|
||||
cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
|
||||
else
|
||||
cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
@ -1859,6 +1932,14 @@ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
|
||||
*api_major_version = LE16_TO_CPU(resp->api_major);
|
||||
if (api_minor_version != NULL)
|
||||
*api_minor_version = LE16_TO_CPU(resp->api_minor);
|
||||
|
||||
/* A workaround to fix the API version in SW */
|
||||
if (api_major_version && api_minor_version &&
|
||||
fw_major_version && fw_minor_version &&
|
||||
((*api_major_version == 1) && (*api_minor_version == 1)) &&
|
||||
(((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
|
||||
(*fw_major_version > 4)))
|
||||
*api_minor_version = 2;
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -2270,6 +2351,35 @@ enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_debug_write_register
|
||||
* @hw: pointer to the hw struct
|
||||
* @reg_addr: register address
|
||||
* @reg_val: register value
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Write to a register using the admin queue commands
|
||||
**/
|
||||
enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
|
||||
u32 reg_addr, u64 reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_debug_reg_read_write *cmd =
|
||||
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
|
||||
enum i40e_status_code status;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
|
||||
|
||||
cmd->address = CPU_TO_LE32(reg_addr);
|
||||
cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
|
||||
cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_get_hmc_resource_profile
|
||||
* @hw: pointer to the hw struct
|
||||
@ -3065,17 +3175,10 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
|
||||
* @filter_index: pointer to filter index
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
**/
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
|
||||
u16 udp_port, u8 header_len,
|
||||
u8 protocol_index, u8 *filter_index,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
#else
|
||||
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
|
||||
u16 udp_port, u8 protocol_index,
|
||||
u8 *filter_index,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
#endif
|
||||
{
|
||||
struct i40e_aq_desc desc;
|
||||
struct i40e_aqc_add_udp_tunnel *cmd =
|
||||
@ -3087,9 +3190,6 @@ enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
|
||||
|
||||
cmd->udp_port = CPU_TO_LE16(udp_port);
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
cmd->header_len = header_len;
|
||||
#endif
|
||||
cmd->protocol_type = protocol_index;
|
||||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
@ -3925,9 +4025,6 @@ static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
|
||||
u32 fcoe_cntx_size, fcoe_filt_size;
|
||||
u32 pe_cntx_size, pe_filt_size;
|
||||
u32 fcoe_fmax;
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
u32 pe_fmax;
|
||||
#endif
|
||||
|
||||
u32 val;
|
||||
|
||||
@ -4002,15 +4099,6 @@ static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
|
||||
>> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
|
||||
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
|
||||
return I40E_ERR_INVALID_SIZE;
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
|
||||
/* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
|
||||
val = rd32(hw, I40E_GLHMC_PEXFMAX);
|
||||
pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
|
||||
>> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
|
||||
if (pe_filt_size + pe_cntx_size > pe_fmax)
|
||||
return I40E_ERR_INVALID_SIZE;
|
||||
#endif
|
||||
|
||||
return I40E_SUCCESS;
|
||||
}
|
||||
@ -4081,31 +4169,6 @@ enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
|
||||
|
||||
return I40E_SUCCESS;
|
||||
}
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
|
||||
/**
|
||||
* i40e_set_tag_alloc_method
|
||||
* @hw: pointer to the hardware structure
|
||||
* @debug: a bool to indicates if the debug mode tag alloc needs to be set.
|
||||
*
|
||||
* Note: Enable debug mode tag allocation method if the Extended PCIE Tags are
|
||||
* disabled as a workaround to avoid Rx stall when the device comes up on PCI
|
||||
* Gen 2 slot or if the Extended Tags are disabled on Gen 3 slot. If the
|
||||
* Extended tags are enabled this workaround should not be applied since it
|
||||
* would cause unnecessary performance degradation.
|
||||
*/
|
||||
void i40e_set_tag_alloc_method(struct i40e_hw *hw, bool debug)
|
||||
{
|
||||
u32 val;
|
||||
val = rd32(hw, I40E_GLPCI_PCITEST2);
|
||||
|
||||
if (debug)
|
||||
val |= I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK;
|
||||
else
|
||||
val &= ~I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK;
|
||||
wr32(hw, I40E_GLPCI_PCITEST2, val);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
|
||||
|
@ -39,7 +39,6 @@
|
||||
|
||||
/* forward-declare the HW struct for the compiler */
|
||||
struct i40e_hw;
|
||||
enum i40e_status_code;
|
||||
|
||||
#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
|
||||
#define I40E_HMC_PD_CNT_IN_SD 512
|
||||
@ -136,7 +135,7 @@ struct i40e_hmc_info {
|
||||
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
|
||||
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
|
||||
(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
|
||||
val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
|
||||
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
|
||||
wr32((hw), I40E_PFHMC_SDCMD, val3); \
|
||||
@ -155,7 +154,7 @@ struct i40e_hmc_info {
|
||||
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
|
||||
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
|
||||
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
|
||||
val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
|
||||
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
|
||||
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
|
||||
wr32((hw), I40E_PFHMC_SDCMD, val3); \
|
||||
|
@ -425,7 +425,6 @@ enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
|
||||
default:
|
||||
ret_code = I40E_ERR_INVALID_SD_TYPE;
|
||||
goto exit;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -510,7 +509,6 @@ try_type_paged:
|
||||
DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
|
||||
ret_code);
|
||||
goto configure_lan_hmc_out;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Configure and program the FPM registers so objects can be created */
|
||||
@ -754,6 +752,381 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
/**
|
||||
* i40e_write_byte - replace HMC context byte
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be read from
|
||||
* @src: the struct to be read from
|
||||
**/
|
||||
static void i40e_write_byte(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *src)
|
||||
{
|
||||
u8 src_byte, dest_byte, mask;
|
||||
u8 *from, *dest;
|
||||
u16 shift_width;
|
||||
|
||||
/* copy from the next struct field */
|
||||
from = src + ce_info->offset;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
mask = ((u8)1 << ce_info->width) - 1;
|
||||
|
||||
src_byte = *from;
|
||||
src_byte &= mask;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
src_byte <<= shift_width;
|
||||
|
||||
/* get the current bits from the target bit string */
|
||||
dest = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
|
||||
|
||||
dest_byte &= ~mask; /* get the bits not changing */
|
||||
dest_byte |= src_byte; /* add in the new bits */
|
||||
|
||||
/* put it all back */
|
||||
i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_write_word - replace HMC context word
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be read from
|
||||
* @src: the struct to be read from
|
||||
**/
|
||||
static void i40e_write_word(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *src)
|
||||
{
|
||||
u16 src_word, mask;
|
||||
u8 *from, *dest;
|
||||
u16 shift_width;
|
||||
__le16 dest_word;
|
||||
|
||||
/* copy from the next struct field */
|
||||
from = src + ce_info->offset;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
mask = ((u16)1 << ce_info->width) - 1;
|
||||
|
||||
/* don't swizzle the bits until after the mask because the mask bits
|
||||
* will be in a different bit position on big endian machines
|
||||
*/
|
||||
src_word = *(u16 *)from;
|
||||
src_word &= mask;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
src_word <<= shift_width;
|
||||
|
||||
/* get the current bits from the target bit string */
|
||||
dest = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
|
||||
|
||||
dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
|
||||
dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
|
||||
|
||||
/* put it all back */
|
||||
i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_write_dword - replace HMC context dword
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be read from
|
||||
* @src: the struct to be read from
|
||||
**/
|
||||
static void i40e_write_dword(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *src)
|
||||
{
|
||||
u32 src_dword, mask;
|
||||
u8 *from, *dest;
|
||||
u16 shift_width;
|
||||
__le32 dest_dword;
|
||||
|
||||
/* copy from the next struct field */
|
||||
from = src + ce_info->offset;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
|
||||
/* if the field width is exactly 32 on an x86 machine, then the shift
|
||||
* operation will not work because the SHL instructions count is masked
|
||||
* to 5 bits so the shift will do nothing
|
||||
*/
|
||||
if (ce_info->width < 32)
|
||||
mask = ((u32)1 << ce_info->width) - 1;
|
||||
else
|
||||
mask = 0xFFFFFFFF;
|
||||
|
||||
/* don't swizzle the bits until after the mask because the mask bits
|
||||
* will be in a different bit position on big endian machines
|
||||
*/
|
||||
src_dword = *(u32 *)from;
|
||||
src_dword &= mask;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
src_dword <<= shift_width;
|
||||
|
||||
/* get the current bits from the target bit string */
|
||||
dest = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
|
||||
|
||||
dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
|
||||
dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
|
||||
|
||||
/* put it all back */
|
||||
i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_write_qword - replace HMC context qword
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be read from
|
||||
* @src: the struct to be read from
|
||||
**/
|
||||
static void i40e_write_qword(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *src)
|
||||
{
|
||||
u64 src_qword, mask;
|
||||
u8 *from, *dest;
|
||||
u16 shift_width;
|
||||
__le64 dest_qword;
|
||||
|
||||
/* copy from the next struct field */
|
||||
from = src + ce_info->offset;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
|
||||
/* if the field width is exactly 64 on an x86 machine, then the shift
|
||||
* operation will not work because the SHL instructions count is masked
|
||||
* to 6 bits so the shift will do nothing
|
||||
*/
|
||||
if (ce_info->width < 64)
|
||||
mask = ((u64)1 << ce_info->width) - 1;
|
||||
else
|
||||
mask = 0xFFFFFFFFFFFFFFFFUL;
|
||||
|
||||
/* don't swizzle the bits until after the mask because the mask bits
|
||||
* will be in a different bit position on big endian machines
|
||||
*/
|
||||
src_qword = *(u64 *)from;
|
||||
src_qword &= mask;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
src_qword <<= shift_width;
|
||||
|
||||
/* get the current bits from the target bit string */
|
||||
dest = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
|
||||
|
||||
dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
|
||||
dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
|
||||
|
||||
/* put it all back */
|
||||
i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_read_byte - read HMC context byte into struct
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be filled
|
||||
* @dest: the struct to be filled
|
||||
**/
|
||||
static void i40e_read_byte(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *dest)
|
||||
{
|
||||
u8 dest_byte, mask;
|
||||
u8 *src, *target;
|
||||
u16 shift_width;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
mask = ((u8)1 << ce_info->width) - 1;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
|
||||
/* get the current bits from the src bit string */
|
||||
src = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
|
||||
|
||||
dest_byte &= ~(mask);
|
||||
|
||||
dest_byte >>= shift_width;
|
||||
|
||||
/* get the address from the struct field */
|
||||
target = dest + ce_info->offset;
|
||||
|
||||
/* put it back in the struct */
|
||||
i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_read_word - read HMC context word into struct
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be filled
|
||||
* @dest: the struct to be filled
|
||||
**/
|
||||
static void i40e_read_word(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *dest)
|
||||
{
|
||||
u16 dest_word, mask;
|
||||
u8 *src, *target;
|
||||
u16 shift_width;
|
||||
__le16 src_word;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
mask = ((u16)1 << ce_info->width) - 1;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
|
||||
/* get the current bits from the src bit string */
|
||||
src = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
|
||||
|
||||
/* the data in the memory is stored as little endian so mask it
|
||||
* correctly
|
||||
*/
|
||||
src_word &= ~(CPU_TO_LE16(mask));
|
||||
|
||||
/* get the data back into host order before shifting */
|
||||
dest_word = LE16_TO_CPU(src_word);
|
||||
|
||||
dest_word >>= shift_width;
|
||||
|
||||
/* get the address from the struct field */
|
||||
target = dest + ce_info->offset;
|
||||
|
||||
/* put it back in the struct */
|
||||
i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_read_dword - read HMC context dword into struct
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be filled
|
||||
* @dest: the struct to be filled
|
||||
**/
|
||||
static void i40e_read_dword(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *dest)
|
||||
{
|
||||
u32 dest_dword, mask;
|
||||
u8 *src, *target;
|
||||
u16 shift_width;
|
||||
__le32 src_dword;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
|
||||
/* if the field width is exactly 32 on an x86 machine, then the shift
|
||||
* operation will not work because the SHL instructions count is masked
|
||||
* to 5 bits so the shift will do nothing
|
||||
*/
|
||||
if (ce_info->width < 32)
|
||||
mask = ((u32)1 << ce_info->width) - 1;
|
||||
else
|
||||
mask = 0xFFFFFFFF;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
|
||||
/* get the current bits from the src bit string */
|
||||
src = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
|
||||
|
||||
/* the data in the memory is stored as little endian so mask it
|
||||
* correctly
|
||||
*/
|
||||
src_dword &= ~(CPU_TO_LE32(mask));
|
||||
|
||||
/* get the data back into host order before shifting */
|
||||
dest_dword = LE32_TO_CPU(src_dword);
|
||||
|
||||
dest_dword >>= shift_width;
|
||||
|
||||
/* get the address from the struct field */
|
||||
target = dest + ce_info->offset;
|
||||
|
||||
/* put it back in the struct */
|
||||
i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
|
||||
I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_read_qword - read HMC context qword into struct
|
||||
* @hmc_bits: pointer to the HMC memory
|
||||
* @ce_info: a description of the struct to be filled
|
||||
* @dest: the struct to be filled
|
||||
**/
|
||||
static void i40e_read_qword(u8 *hmc_bits,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *dest)
|
||||
{
|
||||
u64 dest_qword, mask;
|
||||
u8 *src, *target;
|
||||
u16 shift_width;
|
||||
__le64 src_qword;
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info->lsb % 8;
|
||||
|
||||
/* if the field width is exactly 64 on an x86 machine, then the shift
|
||||
* operation will not work because the SHL instructions count is masked
|
||||
* to 6 bits so the shift will do nothing
|
||||
*/
|
||||
if (ce_info->width < 64)
|
||||
mask = ((u64)1 << ce_info->width) - 1;
|
||||
else
|
||||
mask = 0xFFFFFFFFFFFFFFFFUL;
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
|
||||
/* get the current bits from the src bit string */
|
||||
src = hmc_bits + (ce_info->lsb / 8);
|
||||
|
||||
i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
|
||||
|
||||
/* the data in the memory is stored as little endian so mask it
|
||||
* correctly
|
||||
*/
|
||||
src_qword &= ~(CPU_TO_LE64(mask));
|
||||
|
||||
/* get the data back into host order before shifting */
|
||||
dest_qword = LE64_TO_CPU(src_qword);
|
||||
|
||||
dest_qword >>= shift_width;
|
||||
|
||||
/* get the address from the struct field */
|
||||
target = dest + ce_info->offset;
|
||||
|
||||
/* put it back in the struct */
|
||||
i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
|
||||
I40E_NONDMA_TO_DMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_hmc_context - extract HMC context bits
|
||||
* @context_bytes: pointer to the context bit array
|
||||
@ -764,55 +1137,21 @@ static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *dest)
|
||||
{
|
||||
u16 shift_width;
|
||||
u8 bitfield[8];
|
||||
int i, f;
|
||||
u64 mask;
|
||||
u8 *p;
|
||||
int f;
|
||||
|
||||
for (f = 0; ce_info[f].width != 0; f++) {
|
||||
*(u64 *)bitfield = 0;
|
||||
|
||||
/* copy the bytes that contain the desired bits */
|
||||
p = context_bytes + (ce_info[f].lsb / 8);
|
||||
for (i = 0; i < ce_info[f].size_of; i++)
|
||||
bitfield[i] = p[i];
|
||||
|
||||
/* shift the bits to the right */
|
||||
shift_width = ce_info[f].lsb % 8;
|
||||
*(u64 *)bitfield >>= shift_width;
|
||||
|
||||
/* some fields might overlap into one more byte, so grab
|
||||
* the one more byte if needed and stick the extra bits
|
||||
* onto the top of the value
|
||||
* example: 62 bit field that starts in bit 5 of first byte
|
||||
* will overlap 3 bits into byte 9
|
||||
*/
|
||||
if ((shift_width + ce_info[f].width) >
|
||||
(ce_info[f].size_of * 8)) {
|
||||
u8 byte = p[ce_info[f].size_of];
|
||||
byte <<= (8 - shift_width);
|
||||
bitfield[ce_info[f].size_of - 1] |= byte;
|
||||
}
|
||||
|
||||
/* mask for the target bits */
|
||||
mask = ((u64)1 << ce_info[f].width) - 1;
|
||||
*(u64 *)bitfield &= mask;
|
||||
|
||||
/* copy into the appropriate struct field */
|
||||
p = dest + ce_info[f].offset;
|
||||
switch (ce_info[f].size_of) {
|
||||
case 1:
|
||||
*p = *(u8 *)&bitfield;
|
||||
i40e_read_byte(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
case 2:
|
||||
*(u16 *)p = LE16_TO_CPU(*(u16 *)&bitfield);
|
||||
i40e_read_word(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
case 4:
|
||||
*(u32 *)p = LE32_TO_CPU(*(u32 *)&bitfield);
|
||||
i40e_read_dword(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
case 8:
|
||||
*(u64 *)p = LE64_TO_CPU(*(u64 *)&bitfield);
|
||||
i40e_read_qword(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
default:
|
||||
/* nothing to do, just keep going */
|
||||
@ -850,71 +1189,28 @@ static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
|
||||
struct i40e_context_ele *ce_info,
|
||||
u8 *dest)
|
||||
{
|
||||
u16 shift_width;
|
||||
u64 bitfield;
|
||||
u8 hi_byte;
|
||||
u8 hi_mask;
|
||||
u64 t_bits;
|
||||
u64 mask;
|
||||
u8 *p;
|
||||
int f;
|
||||
|
||||
for (f = 0; ce_info[f].width != 0; f++) {
|
||||
/* clear out the field */
|
||||
bitfield = 0;
|
||||
|
||||
/* copy from the next struct field */
|
||||
p = dest + ce_info[f].offset;
|
||||
/* we have to deal with each element of the HMC using the
|
||||
* correct size so that we are correct regardless of the
|
||||
* endianness of the machine
|
||||
*/
|
||||
switch (ce_info[f].size_of) {
|
||||
case 1:
|
||||
bitfield = *p;
|
||||
i40e_write_byte(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
case 2:
|
||||
bitfield = CPU_TO_LE16(*(u16 *)p);
|
||||
i40e_write_word(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
case 4:
|
||||
bitfield = CPU_TO_LE32(*(u32 *)p);
|
||||
i40e_write_dword(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
case 8:
|
||||
bitfield = CPU_TO_LE64(*(u64 *)p);
|
||||
i40e_write_qword(context_bytes, &ce_info[f], dest);
|
||||
break;
|
||||
}
|
||||
|
||||
/* prepare the bits and mask */
|
||||
shift_width = ce_info[f].lsb % 8;
|
||||
mask = ((u64)1 << ce_info[f].width) - 1;
|
||||
|
||||
/* save upper bytes for special case */
|
||||
hi_mask = (u8)((mask >> 56) & 0xff);
|
||||
hi_byte = (u8)((bitfield >> 56) & 0xff);
|
||||
|
||||
/* shift to correct alignment */
|
||||
mask <<= shift_width;
|
||||
bitfield <<= shift_width;
|
||||
|
||||
/* get the current bits from the target bit string */
|
||||
p = context_bytes + (ce_info[f].lsb / 8);
|
||||
i40e_memcpy(&t_bits, p, sizeof(u64), I40E_DMA_TO_NONDMA);
|
||||
|
||||
t_bits &= ~mask; /* get the bits not changing */
|
||||
t_bits |= bitfield; /* add in the new bits */
|
||||
|
||||
/* put it all back */
|
||||
i40e_memcpy(p, &t_bits, sizeof(u64), I40E_NONDMA_TO_DMA);
|
||||
|
||||
/* deal with the special case if needed
|
||||
* example: 62 bit field that starts in bit 5 of first byte
|
||||
* will overlap 3 bits into byte 9
|
||||
*/
|
||||
if ((shift_width + ce_info[f].width) > 64) {
|
||||
u8 byte;
|
||||
|
||||
hi_mask >>= (8 - shift_width);
|
||||
hi_byte >>= (8 - shift_width);
|
||||
byte = p[8] & ~hi_mask; /* get the bits not changing */
|
||||
byte |= hi_byte; /* add in the new bits */
|
||||
p[8] = byte; /* put it back */
|
||||
}
|
||||
}
|
||||
|
||||
return I40E_SUCCESS;
|
||||
|
@ -37,20 +37,25 @@
|
||||
|
||||
/* forward-declare the HW struct for the compiler */
|
||||
struct i40e_hw;
|
||||
enum i40e_status_code;
|
||||
|
||||
/* HMC element context information */
|
||||
|
||||
/* Rx queue context data */
|
||||
/* Rx queue context data
|
||||
*
|
||||
* The sizes of the variables may be larger than needed due to crossing byte
|
||||
* boundaries. If we do not have the width of the variable set to the correct
|
||||
* size then we could end up shifting bits off the top of the variable when the
|
||||
* variable is at the top of a byte and crosses over into the next byte.
|
||||
*/
|
||||
struct i40e_hmc_obj_rxq {
|
||||
u16 head;
|
||||
u8 cpuid;
|
||||
u16 cpuid; /* bigger than needed, see above for reason */
|
||||
u64 base;
|
||||
u16 qlen;
|
||||
#define I40E_RXQ_CTX_DBUFF_SHIFT 7
|
||||
u8 dbuff;
|
||||
u16 dbuff; /* bigger than needed, see above for reason */
|
||||
#define I40E_RXQ_CTX_HBUFF_SHIFT 6
|
||||
u8 hbuff;
|
||||
u16 hbuff; /* bigger than needed, see above for reason */
|
||||
u8 dtype;
|
||||
u8 dsize;
|
||||
u8 crcstrip;
|
||||
@ -59,16 +64,22 @@ struct i40e_hmc_obj_rxq {
|
||||
u8 hsplit_0;
|
||||
u8 hsplit_1;
|
||||
u8 showiv;
|
||||
u16 rxmax;
|
||||
u32 rxmax; /* bigger than needed, see above for reason */
|
||||
u8 tphrdesc_ena;
|
||||
u8 tphwdesc_ena;
|
||||
u8 tphdata_ena;
|
||||
u8 tphhead_ena;
|
||||
u8 lrxqthresh;
|
||||
u16 lrxqthresh; /* bigger than needed, see above for reason */
|
||||
u8 prefena; /* NOTE: normally must be set to 1 at init */
|
||||
};
|
||||
|
||||
/* Tx queue context data */
|
||||
/* Tx queue context data
|
||||
*
|
||||
* The sizes of the variables may be larger than needed due to crossing byte
|
||||
* boundaries. If we do not have the width of the variable set to the correct
|
||||
* size then we could end up shifting bits off the top of the variable when the
|
||||
* variable is at the top of a byte and crosses over into the next byte.
|
||||
*/
|
||||
struct i40e_hmc_obj_txq {
|
||||
u16 head;
|
||||
u8 new_context;
|
||||
@ -78,7 +89,7 @@ struct i40e_hmc_obj_txq {
|
||||
u8 fd_ena;
|
||||
u8 alt_vlan_ena;
|
||||
u16 thead_wb;
|
||||
u16 cpuid;
|
||||
u8 cpuid;
|
||||
u8 head_wb_ena;
|
||||
u16 qlen;
|
||||
u8 tphrdesc_ena;
|
||||
@ -122,11 +133,7 @@ enum i40e_hmc_lan_object_size {
|
||||
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
|
||||
#define I40E_HMC_OBJ_SIZE_TXQ 128
|
||||
#define I40E_HMC_OBJ_SIZE_RXQ 32
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
|
||||
#else
|
||||
#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
|
||||
#endif
|
||||
#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
|
||||
|
||||
enum i40e_hmc_lan_rsrc_type {
|
||||
|
@ -457,13 +457,9 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
|
||||
|
||||
DEBUGFUNC("i40e_validate_nvm_checksum");
|
||||
|
||||
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
|
||||
if (ret_code != I40E_SUCCESS)
|
||||
goto i40e_validate_nvm_checksum_exit;
|
||||
|
||||
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
|
||||
if (ret_code != I40E_SUCCESS)
|
||||
goto i40e_validate_nvm_checksum_free;
|
||||
goto i40e_validate_nvm_checksum_exit;
|
||||
|
||||
/* Do not use i40e_read_nvm_word() because we do not want to take
|
||||
* the synchronization semaphores twice here.
|
||||
@ -480,9 +476,6 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
|
||||
if (checksum)
|
||||
*checksum = checksum_local;
|
||||
|
||||
i40e_validate_nvm_checksum_free:
|
||||
i40e_release_nvm(hw);
|
||||
|
||||
i40e_validate_nvm_checksum_exit:
|
||||
return ret_code;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ i40e_allocate_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma,
|
||||
goto fail_0;
|
||||
}
|
||||
err = bus_dmamem_alloc(dma->tag, (void **)&dma->va,
|
||||
BUS_DMA_NOWAIT | M_ZERO, &dma->map);
|
||||
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dma->map);
|
||||
if (err != 0) {
|
||||
device_printf(dev,
|
||||
"i40e_allocate_dma: bus_dmamem_alloc failed, "
|
||||
@ -116,6 +116,7 @@ fail_2:
|
||||
fail_1:
|
||||
bus_dma_tag_destroy(dma->tag);
|
||||
fail_0:
|
||||
dma->map = NULL;
|
||||
dma->tag = NULL;
|
||||
return (err);
|
||||
}
|
||||
|
@ -70,10 +70,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
|
||||
bool i40e_asq_done(struct i40e_hw *hw);
|
||||
|
||||
/* debug function for adminq */
|
||||
void i40e_debug_aq(struct i40e_hw *hw,
|
||||
enum i40e_debug_mask mask,
|
||||
void *desc,
|
||||
void *buffer);
|
||||
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
|
||||
void *desc, void *buffer, u16 buf_len);
|
||||
|
||||
void i40e_idle_aq(struct i40e_hw *hw);
|
||||
void i40e_resume_aq(struct i40e_hw *hw);
|
||||
@ -90,6 +88,9 @@ enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
|
||||
u16 *fw_major_version, u16 *fw_minor_version,
|
||||
u16 *api_major_version, u16 *api_minor_version,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
|
||||
u32 reg_addr, u64 reg_val,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
|
||||
@ -116,12 +117,10 @@ enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
#ifndef FORTVILLE_A0_SUPPORT
|
||||
enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
#endif
|
||||
enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
bool enable_link, struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
|
||||
bool enable_lse, struct i40e_link_status *link,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
@ -226,17 +225,10 @@ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
|
||||
u16 udp_port, u8 header_len,
|
||||
u8 protocol_index, u8 *filter_index,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
#else
|
||||
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
|
||||
u16 udp_port, u8 protocol_index,
|
||||
u8 *filter_index,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
#endif
|
||||
enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
|
||||
@ -361,6 +353,7 @@ enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
|
||||
/* i40e_common */
|
||||
enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
|
||||
enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
|
||||
void i40e_clear_hw(struct i40e_hw *hw);
|
||||
void i40e_clear_pxe_mode(struct i40e_hw *hw);
|
||||
bool i40e_get_link_status(struct i40e_hw *hw);
|
||||
enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
|
||||
@ -371,9 +364,6 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
|
||||
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
void i40e_set_tag_alloc_method(struct i40e_hw *hw, bool debug);
|
||||
#endif
|
||||
enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
|
||||
enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
|
||||
/* prototype for functions used for NVM access */
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -38,12 +38,15 @@
|
||||
** both the BASE and the VF drivers.
|
||||
*/
|
||||
|
||||
#ifdef HAVE_KERNEL_OPTION_HEADERS
|
||||
#include "opt_inet.h"
|
||||
#include "opt_inet6.h"
|
||||
#endif
|
||||
|
||||
#include "i40e.h"
|
||||
|
||||
/* Local Prototypes */
|
||||
static void i40e_rx_checksum(struct mbuf *, u32, u32, u32);
|
||||
static void i40e_rx_checksum(struct mbuf *, u32, u32, u8);
|
||||
static void i40e_refresh_mbufs(struct i40e_queue *, int);
|
||||
static int i40e_xmit(struct i40e_queue *, struct mbuf **);
|
||||
static int i40e_tx_setup_offload(struct i40e_queue *,
|
||||
@ -52,7 +55,7 @@ static bool i40e_tso_setup(struct i40e_queue *, struct mbuf *);
|
||||
|
||||
static __inline void i40e_rx_discard(struct rx_ring *, int);
|
||||
static __inline void i40e_rx_input(struct rx_ring *, struct ifnet *,
|
||||
struct mbuf *, u32);
|
||||
struct mbuf *, u8);
|
||||
|
||||
/*
|
||||
** Multiqueue Transmit driver
|
||||
@ -72,6 +75,10 @@ i40e_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
else
|
||||
i = curcpu % vsi->num_queues;
|
||||
|
||||
/* Check for a hung queue and pick alternative */
|
||||
if (((1 << i) & vsi->active_queues) == 0)
|
||||
i = ffsl(vsi->active_queues);
|
||||
|
||||
que = &vsi->queues[i];
|
||||
txr = &que->txr;
|
||||
|
||||
@ -79,12 +86,12 @@ i40e_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
if (err)
|
||||
return(err);
|
||||
if (I40E_TX_TRYLOCK(txr)) {
|
||||
err = i40e_mq_start_locked(ifp, txr);
|
||||
i40e_mq_start_locked(ifp, txr);
|
||||
I40E_TX_UNLOCK(txr);
|
||||
} else
|
||||
taskqueue_enqueue(que->tq, &que->tx_task);
|
||||
|
||||
return (err);
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
@ -159,6 +166,34 @@ i40e_qflush(struct ifnet *ifp)
|
||||
if_qflush(ifp);
|
||||
}
|
||||
|
||||
/*
|
||||
** Find mbuf chains passed to the driver
|
||||
** that are 'sparse', using more than 8
|
||||
** mbufs to deliver an mss-size chunk of data
|
||||
*/
|
||||
static inline bool
|
||||
i40e_tso_detect_sparse(struct mbuf *mp)
|
||||
{
|
||||
struct mbuf *m;
|
||||
int num = 0, mss;
|
||||
bool ret = FALSE;
|
||||
|
||||
mss = mp->m_pkthdr.tso_segsz;
|
||||
for (m = mp->m_next; m != NULL; m = m->m_next) {
|
||||
num++;
|
||||
mss -= m->m_len;
|
||||
if (mss < 1)
|
||||
break;
|
||||
if (m->m_next == NULL)
|
||||
break;
|
||||
}
|
||||
if (num > I40E_SPARSE_CHAIN)
|
||||
ret = TRUE;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
*
|
||||
* This routine maps the mbufs to tx descriptors, allowing the
|
||||
@ -176,22 +211,19 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
|
||||
struct tx_ring *txr = &que->txr;
|
||||
struct i40e_tx_buf *buf;
|
||||
struct i40e_tx_desc *txd = NULL;
|
||||
struct mbuf *m_head;
|
||||
int i, j, error, nsegs;
|
||||
struct mbuf *m_head, *m;
|
||||
int i, j, error, nsegs, maxsegs;
|
||||
int first, last = 0;
|
||||
u16 vtag = 0;
|
||||
u32 cmd, off;
|
||||
bus_dmamap_t map;
|
||||
bus_dma_segment_t segs[I40E_MAX_SEGS];
|
||||
bus_dma_tag_t tag;
|
||||
bus_dma_segment_t segs[I40E_MAX_TSO_SEGS];
|
||||
|
||||
|
||||
cmd = off = 0;
|
||||
m_head = *m_headp;
|
||||
|
||||
/* Grab the VLAN tag */
|
||||
if (m_head->m_flags & M_VLANTAG)
|
||||
vtag = htole16(m_head->m_pkthdr.ether_vtag);
|
||||
|
||||
/*
|
||||
* Important to capture the first descriptor
|
||||
* used because it will contain the index of
|
||||
@ -200,17 +232,29 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
|
||||
first = txr->next_avail;
|
||||
buf = &txr->buffers[first];
|
||||
map = buf->map;
|
||||
tag = txr->tx_tag;
|
||||
maxsegs = I40E_MAX_TX_SEGS;
|
||||
|
||||
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
|
||||
/* Use larger mapping for TSO */
|
||||
tag = txr->tso_tag;
|
||||
maxsegs = I40E_MAX_TSO_SEGS;
|
||||
if (i40e_tso_detect_sparse(m_head)) {
|
||||
m = m_defrag(m_head, M_NOWAIT);
|
||||
*m_headp = m;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the packet for DMA.
|
||||
*/
|
||||
error = bus_dmamap_load_mbuf_sg(txr->tag, map,
|
||||
error = bus_dmamap_load_mbuf_sg(tag, map,
|
||||
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
|
||||
|
||||
if (error == EFBIG) {
|
||||
struct mbuf *m;
|
||||
|
||||
m = m_defrag(*m_headp, M_NOWAIT);
|
||||
m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
|
||||
if (m == NULL) {
|
||||
que->mbuf_defrag_failed++;
|
||||
m_freem(*m_headp);
|
||||
@ -220,7 +264,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
|
||||
*m_headp = m;
|
||||
|
||||
/* Try it again */
|
||||
error = bus_dmamap_load_mbuf_sg(txr->tag, map,
|
||||
error = bus_dmamap_load_mbuf_sg(tag, map,
|
||||
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
|
||||
|
||||
if (error == ENOMEM) {
|
||||
@ -251,20 +295,25 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
|
||||
m_head = *m_headp;
|
||||
|
||||
/* Set up the TSO/CSUM offload */
|
||||
error = i40e_tx_setup_offload(que, m_head, &cmd, &off);
|
||||
if (error)
|
||||
goto xmit_fail;
|
||||
if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
|
||||
error = i40e_tx_setup_offload(que, m_head, &cmd, &off);
|
||||
if (error)
|
||||
goto xmit_fail;
|
||||
}
|
||||
|
||||
cmd |= I40E_TX_DESC_CMD_ICRC;
|
||||
/* Add vlan tag to each descriptor */
|
||||
if (m_head->m_flags & M_VLANTAG)
|
||||
/* Grab the VLAN tag */
|
||||
if (m_head->m_flags & M_VLANTAG) {
|
||||
cmd |= I40E_TX_DESC_CMD_IL2TAG1;
|
||||
vtag = htole16(m_head->m_pkthdr.ether_vtag);
|
||||
}
|
||||
|
||||
i = txr->next_avail;
|
||||
for (j = 0; j < nsegs; j++) {
|
||||
bus_size_t seglen;
|
||||
|
||||
buf = &txr->buffers[i];
|
||||
buf->tag = tag; /* Keep track of the type tag */
|
||||
txd = &txr->base[i];
|
||||
seglen = segs[j].ds_len;
|
||||
|
||||
@ -294,7 +343,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
|
||||
/* Swap the dma map between the first and last descriptor */
|
||||
txr->buffers[first].map = buf->map;
|
||||
buf->map = map;
|
||||
bus_dmamap_sync(txr->tag, map, BUS_DMASYNC_PREWRITE);
|
||||
bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
|
||||
|
||||
/* Set the index of the descriptor that will be marked done */
|
||||
buf = &txr->buffers[first];
|
||||
@ -307,8 +356,8 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
|
||||
* hardware that this frame is available to transmit.
|
||||
*/
|
||||
++txr->total_packets;
|
||||
wr32(hw, txr->tail, i);
|
||||
|
||||
wr32(hw, I40E_QTX_TAIL(que->me), i);
|
||||
i40e_flush(hw);
|
||||
/* Mark outstanding work */
|
||||
if (que->busy == 0)
|
||||
@ -316,7 +365,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
|
||||
return (0);
|
||||
|
||||
xmit_fail:
|
||||
bus_dmamap_unload(txr->tag, buf->map);
|
||||
bus_dmamap_unload(tag, buf->map);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -341,21 +390,38 @@ i40e_allocate_tx_data(struct i40e_queue *que)
|
||||
* Setup DMA descriptor areas.
|
||||
*/
|
||||
if ((error = bus_dma_tag_create(NULL, /* parent */
|
||||
1, 0, /* alignment, bounds */
|
||||
1, 0, /* alignment, bounds */
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
BUS_SPACE_MAXADDR, /* highaddr */
|
||||
NULL, NULL, /* filter, filterarg */
|
||||
I40E_TSO_SIZE, /* maxsize */
|
||||
32,
|
||||
I40E_MAX_TX_SEGS, /* nsegments */
|
||||
PAGE_SIZE, /* maxsegsize */
|
||||
0, /* flags */
|
||||
NULL, /* lockfunc */
|
||||
NULL, /* lockfuncarg */
|
||||
&txr->tag))) {
|
||||
&txr->tx_tag))) {
|
||||
device_printf(dev,"Unable to allocate TX DMA tag\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Make a special tag for TSO */
|
||||
if ((error = bus_dma_tag_create(NULL, /* parent */
|
||||
1, 0, /* alignment, bounds */
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
BUS_SPACE_MAXADDR, /* highaddr */
|
||||
NULL, NULL, /* filter, filterarg */
|
||||
I40E_TSO_SIZE, /* maxsize */
|
||||
I40E_MAX_TSO_SEGS, /* nsegments */
|
||||
PAGE_SIZE, /* maxsegsize */
|
||||
0, /* flags */
|
||||
NULL, /* lockfunc */
|
||||
NULL, /* lockfuncarg */
|
||||
&txr->tso_tag))) {
|
||||
device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!(txr->buffers =
|
||||
(struct i40e_tx_buf *) malloc(sizeof(struct i40e_tx_buf) *
|
||||
que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
|
||||
@ -364,10 +430,11 @@ i40e_allocate_tx_data(struct i40e_queue *que)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Create the descriptor buffer dma maps */
|
||||
/* Create the descriptor buffer default dma maps */
|
||||
buf = txr->buffers;
|
||||
for (int i = 0; i < que->num_desc; i++, buf++) {
|
||||
error = bus_dmamap_create(txr->tag, 0, &buf->map);
|
||||
buf->tag = txr->tx_tag;
|
||||
error = bus_dmamap_create(buf->tag, 0, &buf->map);
|
||||
if (error != 0) {
|
||||
device_printf(dev, "Unable to create TX DMA map\n");
|
||||
goto fail;
|
||||
@ -410,9 +477,9 @@ i40e_init_tx_ring(struct i40e_queue *que)
|
||||
buf = txr->buffers;
|
||||
for (int i = 0; i < que->num_desc; i++, buf++) {
|
||||
if (buf->m_head != NULL) {
|
||||
bus_dmamap_sync(txr->tag, buf->map,
|
||||
bus_dmamap_sync(buf->tag, buf->map,
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txr->tag, buf->map);
|
||||
bus_dmamap_unload(buf->tag, buf->map);
|
||||
m_freem(buf->m_head);
|
||||
buf->m_head = NULL;
|
||||
}
|
||||
@ -445,21 +512,21 @@ i40e_free_que_tx(struct i40e_queue *que)
|
||||
for (int i = 0; i < que->num_desc; i++) {
|
||||
buf = &txr->buffers[i];
|
||||
if (buf->m_head != NULL) {
|
||||
bus_dmamap_sync(txr->tag, buf->map,
|
||||
bus_dmamap_sync(buf->tag, buf->map,
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txr->tag,
|
||||
bus_dmamap_unload(buf->tag,
|
||||
buf->map);
|
||||
m_freem(buf->m_head);
|
||||
buf->m_head = NULL;
|
||||
if (buf->map != NULL) {
|
||||
bus_dmamap_destroy(txr->tag,
|
||||
bus_dmamap_destroy(buf->tag,
|
||||
buf->map);
|
||||
buf->map = NULL;
|
||||
}
|
||||
} else if (buf->map != NULL) {
|
||||
bus_dmamap_unload(txr->tag,
|
||||
bus_dmamap_unload(buf->tag,
|
||||
buf->map);
|
||||
bus_dmamap_destroy(txr->tag,
|
||||
bus_dmamap_destroy(buf->tag,
|
||||
buf->map);
|
||||
buf->map = NULL;
|
||||
}
|
||||
@ -470,9 +537,13 @@ i40e_free_que_tx(struct i40e_queue *que)
|
||||
free(txr->buffers, M_DEVBUF);
|
||||
txr->buffers = NULL;
|
||||
}
|
||||
if (txr->tag != NULL) {
|
||||
bus_dma_tag_destroy(txr->tag);
|
||||
txr->tag = NULL;
|
||||
if (txr->tx_tag != NULL) {
|
||||
bus_dma_tag_destroy(txr->tx_tag);
|
||||
txr->tx_tag = NULL;
|
||||
}
|
||||
if (txr->tso_tag != NULL) {
|
||||
bus_dma_tag_destroy(txr->tso_tag);
|
||||
txr->tso_tag = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -549,8 +620,7 @@ i40e_tx_setup_offload(struct i40e_queue *que,
|
||||
switch (ipproto) {
|
||||
case IPPROTO_TCP:
|
||||
tcp_hlen = th->th_off << 2;
|
||||
if (mp->m_pkthdr.csum_flags & CSUM_TCP ||
|
||||
mp->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
|
||||
if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
|
||||
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
|
||||
*off |= (tcp_hlen >> 2) <<
|
||||
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
|
||||
@ -560,8 +630,7 @@ i40e_tx_setup_offload(struct i40e_queue *que,
|
||||
#endif
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
if (mp->m_pkthdr.csum_flags & CSUM_UDP ||
|
||||
mp->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
|
||||
if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
|
||||
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
|
||||
*off |= (sizeof(struct udphdr) >> 2) <<
|
||||
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
|
||||
@ -569,7 +638,7 @@ i40e_tx_setup_offload(struct i40e_queue *que,
|
||||
break;
|
||||
|
||||
case IPPROTO_SCTP:
|
||||
if (mp->m_pkthdr.csum_flags & CSUM_SCTP) {
|
||||
if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
|
||||
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
|
||||
*off |= (sizeof(struct sctphdr) >> 2) <<
|
||||
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
|
||||
@ -679,6 +748,18 @@ i40e_tso_setup(struct i40e_queue *que, struct mbuf *mp)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/*
|
||||
** i40e_get_tx_head - Retrieve the value from the
|
||||
** location the HW records its HEAD index
|
||||
*/
|
||||
static inline u32
|
||||
i40e_get_tx_head(struct i40e_queue *que)
|
||||
{
|
||||
struct tx_ring *txr = &que->txr;
|
||||
void *head = &txr->base[que->num_desc];
|
||||
return LE32_TO_CPU(*(volatile __le32 *)head);
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
*
|
||||
* Examine each tx_buffer in the used queue. If the hardware is done
|
||||
@ -692,7 +773,7 @@ i40e_txeof(struct i40e_queue *que)
|
||||
struct i40e_vsi *vsi = que->vsi;
|
||||
struct ifnet *ifp = vsi->ifp;
|
||||
struct tx_ring *txr = &que->txr;
|
||||
u32 first, last, done, processed;
|
||||
u32 first, last, head, done, processed;
|
||||
struct i40e_tx_buf *buf;
|
||||
struct i40e_tx_desc *tx_desc, *eop_desc;
|
||||
|
||||
@ -715,6 +796,9 @@ i40e_txeof(struct i40e_queue *que)
|
||||
return FALSE;
|
||||
eop_desc = (struct i40e_tx_desc *)&txr->base[last];
|
||||
|
||||
/* Get the Head WB value */
|
||||
head = i40e_get_tx_head(que);
|
||||
|
||||
/*
|
||||
** Get the index of the first descriptor
|
||||
** BEYOND the EOP and call that 'done'.
|
||||
@ -727,25 +811,24 @@ i40e_txeof(struct i40e_queue *que)
|
||||
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
|
||||
BUS_DMASYNC_POSTREAD);
|
||||
/*
|
||||
** Only the EOP descriptor of a packet now has the DD
|
||||
** bit set, this is what we look for...
|
||||
** The HEAD index of the ring is written in a
|
||||
** defined location, this rather than a done bit
|
||||
** is what is used to keep track of what must be
|
||||
** 'cleaned'.
|
||||
*/
|
||||
while (eop_desc->cmd_type_offset_bsz &
|
||||
htole32(I40E_TX_DESC_DTYPE_DESC_DONE)) {
|
||||
while (first != head) {
|
||||
/* We clean the range of the packet */
|
||||
while (first != done) {
|
||||
tx_desc->cmd_type_offset_bsz &=
|
||||
~I40E_TXD_QW1_DTYPE_MASK;
|
||||
++txr->avail;
|
||||
++processed;
|
||||
|
||||
if (buf->m_head) {
|
||||
txr->bytes +=
|
||||
buf->m_head->m_pkthdr.len;
|
||||
bus_dmamap_sync(txr->tag,
|
||||
bus_dmamap_sync(buf->tag,
|
||||
buf->map,
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txr->tag,
|
||||
bus_dmamap_unload(buf->tag,
|
||||
buf->map);
|
||||
m_freem(buf->m_head);
|
||||
buf->m_head = NULL;
|
||||
@ -786,10 +869,11 @@ i40e_txeof(struct i40e_queue *que)
|
||||
** be considered hung. If anything has been
|
||||
** cleaned then reset the state.
|
||||
*/
|
||||
if (!processed)
|
||||
if ((processed == 0) && (que->busy != I40E_QUEUE_HUNG))
|
||||
++que->busy;
|
||||
else
|
||||
que->busy = 1;
|
||||
|
||||
if (processed)
|
||||
que->busy = 1; /* Note this turns off HUNG */
|
||||
|
||||
/*
|
||||
* If there are no pending descriptors, clear the timeout.
|
||||
@ -884,6 +968,7 @@ no_split:
|
||||
BUS_DMASYNC_PREREAD);
|
||||
rxr->base[i].read.pkt_addr =
|
||||
htole64(pseg[0].ds_addr);
|
||||
/* Used only when doing header split */
|
||||
rxr->base[i].read.hdr_addr = 0;
|
||||
|
||||
refreshed = TRUE;
|
||||
@ -895,7 +980,7 @@ no_split:
|
||||
}
|
||||
update:
|
||||
if (refreshed) /* Update hardware tail index */
|
||||
wr32(vsi->hw, I40E_QRX_TAIL(que->me), rxr->next_refresh);
|
||||
wr32(vsi->hw, rxr->tail, rxr->next_refresh);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1020,6 +1105,7 @@ i40e_init_rx_ring(struct i40e_queue *que)
|
||||
buf->m_pack = NULL;
|
||||
}
|
||||
|
||||
/* header split is off */
|
||||
rxr->hdr_split = FALSE;
|
||||
|
||||
/* Now replenish the mbufs */
|
||||
@ -1170,7 +1256,7 @@ i40e_free_que_rx(struct i40e_queue *que)
|
||||
}
|
||||
|
||||
static __inline void
|
||||
i40e_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
|
||||
i40e_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
|
||||
{
|
||||
/*
|
||||
* ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
|
||||
@ -1260,9 +1346,10 @@ i40e_rxeof(struct i40e_queue *que, int count)
|
||||
|
||||
for (i = rxr->next_check; count != 0;) {
|
||||
struct mbuf *sendmp, *mh, *mp;
|
||||
u32 rsc, ptype, status, error;
|
||||
u32 rsc, status, error;
|
||||
u16 hlen, plen, vtag;
|
||||
u64 qword;
|
||||
u8 ptype;
|
||||
bool eop;
|
||||
|
||||
/* Sync the ring. */
|
||||
@ -1478,8 +1565,11 @@ next_desc:
|
||||
*
|
||||
*********************************************************************/
|
||||
static void
|
||||
i40e_rx_checksum(struct mbuf * mp, u32 status, u32 error, u32 ptype)
|
||||
i40e_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
|
||||
{
|
||||
struct i40e_rx_ptype_decoded decoded;
|
||||
|
||||
decoded = decode_rx_desc_ptype(ptype);
|
||||
|
||||
/* Errors? */
|
||||
if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
|
||||
@ -1488,6 +1578,16 @@ i40e_rx_checksum(struct mbuf * mp, u32 status, u32 error, u32 ptype)
|
||||
return;
|
||||
}
|
||||
|
||||
/* IPv6 with extension headers likely have bad csum */
|
||||
if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
|
||||
decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
|
||||
if (status &
|
||||
(1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
|
||||
mp->m_pkthdr.csum_flags = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* IP Checksum Good */
|
||||
mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
|
||||
mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
|
||||
|
@ -56,9 +56,6 @@
|
||||
#define I40E_DEV_ID_QSFP_A 0x1583
|
||||
#define I40E_DEV_ID_QSFP_B 0x1584
|
||||
#define I40E_DEV_ID_QSFP_C 0x1585
|
||||
#ifdef FORTVILLE_A0_SUPPORT
|
||||
#define I40E_DEV_ID_10G_BASE_T 0x1586
|
||||
#endif
|
||||
#define I40E_DEV_ID_VF 0x154C
|
||||
#define I40E_DEV_ID_VF_HV 0x1571
|
||||
|
||||
@ -66,8 +63,10 @@
|
||||
(d) == I40E_DEV_ID_QSFP_B || \
|
||||
(d) == I40E_DEV_ID_QSFP_C)
|
||||
|
||||
#ifndef I40E_MASK
|
||||
/* I40E_MASK is a macro used on 32 bit registers */
|
||||
#define I40E_MASK(mask, shift) (mask << shift)
|
||||
#endif
|
||||
|
||||
#define I40E_MAX_PF 16
|
||||
#define I40E_MAX_PF_VSI 64
|
||||
@ -214,10 +213,10 @@ enum i40e_fc_mode {
|
||||
|
||||
enum i40e_set_fc_aq_failures {
|
||||
I40E_SET_FC_AQ_FAIL_NONE = 0,
|
||||
I40E_SET_FC_AQ_FAIL_GET1 = 1,
|
||||
I40E_SET_FC_AQ_FAIL_GET = 1,
|
||||
I40E_SET_FC_AQ_FAIL_SET = 2,
|
||||
I40E_SET_FC_AQ_FAIL_GET2 = 4,
|
||||
I40E_SET_FC_AQ_FAIL_SET_GET = 6
|
||||
I40E_SET_FC_AQ_FAIL_UPDATE = 4,
|
||||
I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
|
||||
};
|
||||
|
||||
enum i40e_vsi_type {
|
||||
@ -533,6 +532,10 @@ struct i40e_hw {
|
||||
|
||||
/* Admin Queue info */
|
||||
struct i40e_adminq_info aq;
|
||||
#ifdef I40E_QV
|
||||
bool aq_dbg_ena; /* use Tools AQ instead of PF AQ */
|
||||
bool qv_force_init;
|
||||
#endif
|
||||
|
||||
/* state of nvm update process */
|
||||
enum i40e_nvmupd_state nvmupd_state;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -11,12 +11,10 @@ SRCS += if_i40e.c i40e_txrx.c i40e_osdep.c
|
||||
# Shared source
|
||||
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
|
||||
|
||||
CFLAGS += -DSMP -DFORTVILLE_HW
|
||||
CFLAGS += -DSMP
|
||||
|
||||
# Add Flow Director support
|
||||
# CFLAGS += -DI40E_FDIR
|
||||
# A0 hardware support
|
||||
# CFLAGS += -DFORTVILLE_A0_SUPPORT
|
||||
# Debug messages / sysctls
|
||||
# CFLAGS += -DI40E_DEBUG
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user