Update to the Intel Base driver for the Intel XL710 Ethernet Controller Family

- It was decided to change the driver name to if_ixl for FreeBSD
	- This release adds the VF Driver to the tree, it can be built into
	  the kernel or as the if_ixlv module
	- The VF driver is independent for the first time, this will be
	  desireable when full SRIOV capability is added to the OS.
	- Thanks to my new coworker Eric Joyner for his superb work in
	  both the core and vf driver code.

Enjoy everyone!

Submitted by:	jack.vogel@intel.com and eric.joyner@intel.com
MFC after:	3 days (hoping to make 10.1)
This commit is contained in:
Jack F Vogel 2014-08-22 18:59:19 +00:00
parent fbb6eca60f
commit 61ae650d55
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=270346
30 changed files with 7527 additions and 3387 deletions

View File

@ -1424,22 +1424,26 @@ dev/hptiop/hptiop.c optional hptiop scbus
dev/hwpmc/hwpmc_logging.c optional hwpmc
dev/hwpmc/hwpmc_mod.c optional hwpmc
dev/hwpmc/hwpmc_soft.c optional hwpmc
dev/i40e/if_i40e.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/i40e/i40e_txrx.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/i40e/i40e_osdep.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/i40e/i40e_nvm.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/i40e/i40e_lan_hmc.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/i40e/i40e_hmc.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/i40e/i40e_common.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/i40e/i40e_adminq.c optional i40e inet \
compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP"
dev/ixl/if_ixl.c optional ixl inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/if_ixlv.c optional ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixlvc.c optional ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_txrx.c optional ixl ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_osdep.c optional ixl ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_lan_hmc.c optional ixl ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_hmc.c optional ixl ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_common.c optional ixl ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_nvm.c optional ixl ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_adminq.c optional ixl ixlv inet \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ichsmb/ichsmb.c optional ichsmb
dev/ichsmb/ichsmb_pci.c optional ichsmb pci
dev/ida/ida.c optional ida

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
FreeBSD Base Driver for the Intel® XL710 Ethernet Controller Family
================================================================
ixl FreeBSD* Base Driver for the Intel® XL710 Ethernet Controller Family
/*$FreeBSD$*/
================================================================
July 21, 2014
@ -19,7 +19,7 @@ Contents
Overview
========
This file describes the i40e FreeBSD* Base driver for the XL710 Ethernet Family of Adapters. The Driver has been developed for use with FreeBSD 10.0 or later, but should be compatible with any supported release.
This file describes the IXL FreeBSD* Base driver for the XL710 Ethernet Family of Adapters. The Driver has been developed for use with FreeBSD 10.0 or later, but should be compatible with any supported release.
For questions related to hardware requirements, refer to the documentation supplied with your Intel XL710 adapter. All hardware requirements listed apply for use with FreeBSD.
@ -60,17 +60,17 @@ NOTE: You must have kernel sources installed to compile the driver module.
In the instructions below, x.x.x is the driver version
as indicated in thename of the driver tar.
1. Move the base driver tar file to the directory of your choice. For example, use /home/username/i40e or /usr/local/src/i40e.
1. Move the base driver tar file to the directory of your choice. For example, use /home/username/ixl or /usr/local/src/ixl.
2. Untar/unzip the archive:
tar xfz i40e-x.x.x.tar.gz
tar xfz ixl-x.x.x.tar.gz
3. To install man page:
cd i40e-x.x.x
gzip -c i40e.4 > /usr/share/man/man4/i40e.4.gz
cd ixl-x.x.x
gzip -c ixl.4 > /usr/share/man/man4/ixl.4.gz
4. To load the driver onto a running system:
cd i40e-x.x.x/src
cd ixl-x.x.x/src
make load
5. To assign an IP address to the interface, enter the following:
@ -82,12 +82,12 @@ as indicated in thename of the driver tar.
7. If you want the driver to load automatically when the system is booted:
cd i40e-x.x.x/src
cd ixl-x.x.x/src
make
make install
Edit /boot/loader.conf, and add the following line:
if_i40e_load="YES"
if_ixl_load="YES"
Edit /etc/rc.conf, and create the appropriate
ifconfig_ixl<interface_num> entry:
@ -304,7 +304,7 @@ Also, increasing the follwing in /etc/sysctl.conf could help increase network
UDP Stress Test Dropped Packet Issue
------------------------------------
Under small packet UDP stress test with the i40e driver, the FreeBSD system will drop UDP packets due to the fullness of socket buffers. You may want to change the driver's Flow Control variables to the minimum value for controlling packet reception.
Under small packet UDP stress test with the ixl driver, the FreeBSD system will drop UDP packets due to the fullness of socket buffers. You may want to change the driver's Flow Control variables to the minimum value for controlling packet reception.
Disable LRO when routing/bridging

View File

@ -57,7 +57,7 @@ static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
if (hw->mac.type == I40E_MAC_VF) {
if (i40e_is_vf(hw)) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
hw->aq.asq.len = I40E_VF_ATQLEN1;
@ -68,19 +68,6 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
hw->aq.arq.len = I40E_VF_ARQLEN1;
hw->aq.arq.bal = I40E_VF_ARQBAL1;
hw->aq.arq.bah = I40E_VF_ARQBAH1;
#ifdef I40E_QV
} else if (hw->aq_dbg_ena) {
hw->aq.asq.tail = I40E_GL_ATQT;
hw->aq.asq.head = I40E_GL_ATQH;
hw->aq.asq.len = I40E_GL_ATQLEN;
hw->aq.asq.bal = I40E_GL_ATQBAL;
hw->aq.asq.bah = I40E_GL_ATQBAH;
hw->aq.arq.tail = I40E_GL_ARQT;
hw->aq.arq.head = I40E_GL_ARQH;
hw->aq.arq.len = I40E_GL_ARQLEN;
hw->aq.arq.bal = I40E_GL_ARQBAL;
hw->aq.arq.bah = I40E_GL_ARQBAH;
#endif
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
@ -169,10 +156,6 @@ void i40e_free_adminq_arq(struct i40e_hw *hw)
**/
static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
#ifdef I40E_QV
struct i40e_aq_desc qv_desc;
struct i40e_aq_desc *qv_desc_on_ring;
#endif
enum i40e_status_code ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
@ -201,13 +184,6 @@ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
/* now configure the descriptors for use */
desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
#ifdef I40E_QV
/* swap the descriptor with userspace version */
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
I40E_DMA_TO_NONDMA);
qv_desc_on_ring = desc;
desc = &qv_desc;
#endif
desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
@ -226,11 +202,6 @@ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
desc->params.external.param0 = 0;
desc->params.external.param1 = 0;
#ifdef I40E_QV
/* put the initialized descriptor back to the ring */
i40e_memcpy(qv_desc_on_ring, desc, sizeof(struct i40e_aq_desc),
I40E_NONDMA_TO_DMA);
#endif
}
alloc_arq_bufs:
@ -521,22 +492,11 @@ enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
#ifdef I40E_QV
/* Do not reset registers, as Tools AQ is shared resource for QV */
if (!hw->aq_dbg_ena) {
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
wr32(hw, hw->aq.asq.len, 0);
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
}
#else
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
wr32(hw, hw->aq.asq.len, 0);
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
#endif
/* make sure spinlock is available */
i40e_acquire_spinlock(&hw->aq.asq_spinlock);
@ -565,22 +525,11 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
#ifdef I40E_QV
/* Do not reset registers, as Tools AQ is shared resource for QV */
if (!hw->aq_dbg_ena) {
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
wr32(hw, hw->aq.arq.len, 0);
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
}
#else
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
wr32(hw, hw->aq.arq.len, 0);
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
#endif
/* make sure spinlock is available */
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
@ -611,7 +560,6 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
enum i40e_status_code ret_code;
u16 eetrack_lo, eetrack_hi;
int retry = 0;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
@ -641,7 +589,10 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
if (ret_code != I40E_SUCCESS)
goto init_adminq_free_asq;
/* There are some cases where the firmware may not be quite ready
if (i40e_is_vf(hw)) /* VF has no need of firmware */
goto init_adminq_exit;
/* There are some cases where the firmware may not be quite ready
* for AdminQ operations, so we retry the AdminQ setup a few times
* if we see timeouts in this first AQ call.
*/
@ -667,19 +618,10 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
#ifdef I40E_QV
if (!hw->qv_force_init) {
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
}
}
#else
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
}
#endif
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
@ -714,16 +656,8 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
#ifdef I40E_QV
/* This command is not supported for Tools AQ */
if (!hw->aq_dbg_ena) {
if (i40e_check_asq_alive(hw))
i40e_aq_queue_shutdown(hw, TRUE);
}
#else
if (i40e_check_asq_alive(hw))
i40e_aq_queue_shutdown(hw, TRUE);
#endif
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
@ -743,10 +677,6 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
**/
u16 i40e_clean_asq(struct i40e_hw *hw)
{
#ifdef I40E_QV
struct i40e_aq_desc qv_desc = {0};
struct i40e_aq_desc *qv_desc_on_ring;
#endif /* I40E_QV */
struct i40e_adminq_ring *asq = &(hw->aq.asq);
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
@ -755,13 +685,6 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
#ifdef I40E_QV
/* copy the descriptor from ring to userspace buffer */
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
I40E_DMA_TO_NONDMA);
qv_desc_on_ring = desc;
desc = &qv_desc;
#endif /* I40E_QV */
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"%s: ntc %d head %d.\n", __FUNCTION__, ntc,
@ -776,23 +699,11 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
}
i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
#ifdef I40E_QV
/* copy the descriptor from userspace buffer to ring */
i40e_memcpy(qv_desc_on_ring, desc,
sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA);
#endif /* I40E_QV */
ntc++;
if (ntc == asq->count)
ntc = 0;
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
#ifdef I40E_QV
/* copy the descriptor from ring to userspace buffer */
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
I40E_DMA_TO_NONDMA);
qv_desc_on_ring = desc;
desc = &qv_desc;
#endif /* I40E_QV */
}
asq->next_to_clean = ntc;
@ -833,10 +744,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
#ifdef I40E_QV
struct i40e_aq_desc qv_desc = {0};
struct i40e_aq_desc *qv_desc_on_ring;
#endif /* I40E_QV */
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
@ -933,13 +840,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
/* if the desc is available copy the temp desc to the right place */
i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
I40E_NONDMA_TO_DMA);
#ifdef I40E_QV
/* copy the descriptor from ring to userspace buffer */
i40e_memcpy(&qv_desc, desc_on_ring, sizeof(struct i40e_aq_desc),
I40E_DMA_TO_NONDMA);
qv_desc_on_ring = desc_on_ring;
desc_on_ring = &qv_desc;
#endif /* I40E_QV */
/* if buff is not NULL assume indirect command */
if (buff != NULL) {
@ -956,11 +856,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
desc_on_ring->params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
#ifdef I40E_QV
/* copy the descriptor from userspace buffer to ring */
i40e_memcpy(qv_desc_on_ring, desc_on_ring,
sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA);
#endif /* I40E_QV */
}
/* bump the tail */
@ -978,31 +873,21 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (!details->async && !details->postpone) {
u32 total_delay = 0;
u32 delay_len = 10;
do {
#ifdef I40E_QV
/* copy the descriptor from ring to user buffer */
i40e_memcpy(desc_on_ring, qv_desc_on_ring,
sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
#endif /* I40E_QV */
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
if (i40e_asq_done(hw))
break;
/* ugh! delay while spin_lock */
i40e_usec_delay(delay_len);
total_delay += delay_len;
i40e_msec_delay(1);
total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
/* if ready, copy the desc back to temp */
if (i40e_asq_done(hw)) {
#ifdef I40E_QV
/* Swap pointer back */
desc_on_ring = qv_desc_on_ring;
#endif /* I40E_QV */
i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
I40E_DMA_TO_NONDMA);
if (buff != NULL)
@ -1079,10 +964,6 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *pending)
{
#ifdef I40E_QV
struct i40e_aq_desc qv_desc = {0};
struct i40e_aq_desc *qv_desc_on_ring;
#endif /* I40E_QV */
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
@ -1099,22 +980,12 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Queue is empty.\n");
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
goto clean_arq_element_out;
}
/* now clean the next descriptor */
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
#ifdef I40E_QV
/* copy the descriptor from ring to userspace buffer */
i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc),
I40E_DMA_TO_NONDMA);
qv_desc_on_ring = desc;
desc = &qv_desc;
#endif /* I40E_QV */
desc_idx = ntc;
flags = LE16_TO_CPU(desc->flags);
@ -1131,11 +1002,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
I40E_DMA_TO_NONDMA);
datalen = LE16_TO_CPU(desc->datalen);
e->msg_size = min(datalen, e->msg_size);
if (e->msg_buf != NULL && (e->msg_size != 0))
e->msg_len = min(datalen, e->buf_len);
if (e->msg_buf != NULL && (e->msg_len != 0))
i40e_memcpy(e->msg_buf,
hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_size, I40E_DMA_TO_NONDMA);
e->msg_len, I40E_DMA_TO_NONDMA);
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
@ -1154,11 +1025,6 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
desc->datalen = CPU_TO_LE16((u16)bi->size);
desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
#ifdef I40E_QV
/* copy the descriptor from userspace buffer to ring */
i40e_memcpy(qv_desc_on_ring, desc,
sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA);
#endif /* I40E_QV */
/* set tail = the last cleaned desc index. */
wr32(hw, hw->aq.arq.tail, ntc);

View File

@ -84,7 +84,8 @@ struct i40e_asq_cmd_details {
/* ARQ event information */
struct i40e_arq_event_info {
struct i40e_aq_desc desc;
u16 msg_size;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
};
@ -114,7 +115,7 @@ struct i40e_adminq_info {
/* general information */
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);

2180
sys/dev/ixl/i40e_adminq_cmd.h Executable file

File diff suppressed because it is too large Load Diff

View File

@ -44,7 +44,7 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
{
enum i40e_status_code status = I40E_SUCCESS;
@ -60,6 +60,7 @@ static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
@ -4686,3 +4687,101 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
*
* Send message to PF driver using admin queue. By default, this message
* is sent asynchronously, i.e. i40e_asq_send_command() does not wait for
* completion before returning.
**/
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_asq_cmd_details details;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
desc.cookie_high = CPU_TO_LE32(v_opcode);
desc.cookie_low = CPU_TO_LE32(v_retval);
if (msglen) {
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF
| I40E_AQ_FLAG_RD));
if (msglen > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
desc.datalen = CPU_TO_LE16(msglen);
}
if (!cmd_details) {
i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM);
details.async = TRUE;
cmd_details = &details;
}
status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
msglen, cmd_details);
return status;
}
/**
* i40e_vf_parse_hw_config
* @hw: pointer to the hardware structure
* @msg: pointer to the virtual channel VF resource structure
*
* Given a VF resource message from the PF, populate the hw struct
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg)
{
struct i40e_virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
hw->dev_caps.num_vsis = msg->num_vsis;
hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.fcoe = (msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
hw->dev_caps.iwarp = (msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
i40e_memcpy(hw->mac.perm_addr,
vsi_res->default_mac_addr,
I40E_ETH_LENGTH_OF_ADDRESS,
I40E_NONDMA_TO_NONDMA);
i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
I40E_ETH_LENGTH_OF_ADDRESS,
I40E_NONDMA_TO_NONDMA);
}
vsi_res++;
}
}
/**
* i40e_vf_reset
* @hw: pointer to the hardware structure
*
* Send a VF_RESET message to the PF. Does not wait for response from PF
* as none will be forthcoming. Immediately after calling this function,
* the admin queue should be shut down and (optionally) reinitialized.
**/
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
{
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}

View File

@ -34,7 +34,7 @@
#include <machine/stdarg.h>
#include "i40e.h"
#include "ixl.h"
/********************************************************************
* Manage DMA'able memory.

View File

@ -54,9 +54,8 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#define ASSERT(x) if(!(x)) panic("I40E: x")
#define ASSERT(x) if(!(x)) panic("IXL: x")
/* The happy-fun DELAY macro is defined in /usr/src/sys/i386/include/clock.h */
#define i40e_usec_delay(x) DELAY(x)
#define i40e_msec_delay(x) DELAY(1000*(x))
@ -146,9 +145,10 @@ void prefetch(void *x)
struct i40e_osdep
{
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
struct device *dev;
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
struct device *dev;
};
struct i40e_dma_mem {
@ -166,8 +166,6 @@ struct i40e_hw; /* forward decl */
u16 i40e_read_pci_cfg(struct i40e_hw *, u32);
void i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
#define I40E_READ_PCIE_WORD i40e_read_pci_cfg
#define i40e_allocate_dma_mem(h, m, unused, s, a) i40e_allocate_dma(h, m, s, a)
#define i40e_free_dma_mem(h, m) i40e_free_dma(h, m)
@ -181,17 +179,38 @@ struct i40e_virt_mem {
#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt(h, m, s)
#define i40e_free_virt_mem(h, m) i40e_free_virt(h, m)
/*
** This hardware supports either 16 or 32 byte rx descriptors
** we default here to the larger size.
*/
#define i40e_rx_desc i40e_32byte_rx_desc
#define rd32(a, reg) (\
bus_space_read_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
reg))
static __inline uint32_t
rd32_osdep(struct i40e_osdep *osdep, uint32_t reg)
{
#define wr32(a, reg, value) (\
bus_space_write_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
reg, value))
KASSERT(reg < osdep->mem_bus_space_size,
("ixl: register offset %#jx too large (max is %#jx",
(uintmax_t)a, (uintmax_t)osdep->mem_bus_space_size));
return (bus_space_read_4(osdep->mem_bus_space_tag,
osdep->mem_bus_space_handle, reg));
}
static __inline void
wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value)
{
KASSERT(reg < osdep->mem_bus_space_size,
("ixl: register offset %#jx too large (max is %#jx",
(uintmax_t)a, (uintmax_t)osdep->mem_bus_space_size));
bus_space_write_4(osdep->mem_bus_space_tag,
osdep->mem_bus_space_handle, reg, value);
}
#define rd32(a, reg) rd32_osdep((a)->back, (reg))
#define wr32(a, reg, value) wr32_osdep((a)->back, (reg), (value))
#define rd64(a, reg) (\
bus_space_read_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
@ -203,7 +222,7 @@ struct i40e_virt_mem {
((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
reg, value))
#define i40e_flush(a) (\
#define ixl_flush(a) (\
bus_space_read_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
I40E_GLGEN_STAT))

View File

@ -104,11 +104,11 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
bool atomic_reset);
enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
u16 max_frame_size, bool crc_en, u16 pacing,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
u64 *advt_reg,
struct i40e_asq_cmd_details *cmd_details);
@ -393,10 +393,8 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
u8 *bytes, int *);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
#if defined(I40E_QV) || defined(VF_DRIVER)
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
#endif
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)

View File

@ -56,6 +56,7 @@
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@ -532,10 +533,6 @@ struct i40e_hw {
/* Admin Queue info */
struct i40e_adminq_info aq;
#ifdef I40E_QV
bool aq_dbg_ena; /* use Tools AQ instead of PF AQ */
bool qv_force_init;
#endif
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
@ -553,6 +550,7 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
};
#define i40e_is_vf(_hw) ((_hw)->mac.type == I40E_MAC_VF)
struct i40e_driver_version {
u8 major_version;

View File

@ -87,6 +87,7 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
I40E_VIRTCHNL_OP_GET_STATS,
I40E_VIRTCHNL_OP_FCOE,
I40E_VIRTCHNL_OP_CONFIG_RSS,
/* PF sends status change events to vfs using
* the following op.
*/

File diff suppressed because it is too large Load Diff

2742
sys/dev/ixl/if_ixlv.c Normal file

File diff suppressed because it is too large Load Diff

280
sys/dev/i40e/i40e.h → sys/dev/ixl/ixl.h Executable file → Normal file
View File

@ -33,8 +33,8 @@
/*$FreeBSD$*/
#ifndef _I40E_H_
#define _I40E_H_
#ifndef _IXL_H_
#define _IXL_H_
#include <sys/param.h>
@ -91,7 +91,7 @@
#include "i40e_type.h"
#include "i40e_prototype.h"
#ifdef I40E_DEBUG
#ifdef IXL_DEBUG
#include <sys/sbuf.h>
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
@ -100,15 +100,48 @@
(mac_addr)[4], (mac_addr)[5]
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
#define DPRINTF(...) printf(__VA_ARGS__)
#define DDPRINTF(dev, ...) device_printf(dev, __VA_ARGS__)
#define IDPRINTF(ifp, ...) if_printf(ifp, __VA_ARGS__)
// static void i40e_dump_desc(void *, u8, u16);
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
/* Defines for printing generic debug information */
#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
/* Defines for printing specific debug information */
#define DEBUG_INIT 1
#define DEBUG_IOCTL 1
#define DEBUG_HW 1
#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
if_printf(ifp, S "\n", ##__VA_ARGS__)
#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
#else
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define DPRINTF(...)
#define DDPRINTF(...)
#define IDPRINTF(...)
#define INIT_DEBUGOUT(...)
#define INIT_DBG_DEV(...)
#define INIT_DBG_IF(...)
#define IOCTL_DEBUGOUT(...)
#define IOCTL_DBG_IF2(...)
#define IOCTL_DBG_IF(...)
#define HW_DEBUGOUT(...)
#endif
/* Tunables */
@ -124,6 +157,11 @@
#define MAX_RING 4096
#define MIN_RING 32
/*
** Default number of entries in Tx queue buf_ring.
*/
#define DEFAULT_TXBRSZ (4096 * 4096)
/* Alignment for rings */
#define DBA_ALIGN 128
@ -138,106 +176,91 @@
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*/
#define I40E_WATCHDOG (10 * hz)
#define IXL_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
* transmit descriptors.
*/
#define I40E_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
#define I40E_TX_OP_THRESHOLD (que->num_desc / 32)
#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
#define IXL_TX_OP_THRESHOLD (que->num_desc / 32)
/* Flow control constants */
#define I40E_FC_PAUSE 0xFFFF
#define I40E_FC_HI 0x20000
#define I40E_FC_LO 0x10000
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
#define IXL_FC_PAUSE 0xFFFF
#define IXL_FC_HI 0x20000
#define IXL_FC_LO 0x10000
#define MAX_MULTICAST_ADDR 128
#define I40E_BAR 3
#define I40E_ADM_LIMIT 2
#define I40E_TSO_SIZE 65535
#define I40E_TX_BUF_SZ ((u32) 1514)
#define I40E_AQ_BUF_SZ ((u32) 4096)
#define I40E_RX_HDR 128
#define I40E_AQ_LEN 32
#define I40E_AQ_BUFSZ 4096
#define I40E_RX_LIMIT 512
#define I40E_RX_ITR 0
#define I40E_TX_ITR 1
#define I40E_ITR_NONE 3
#define I40E_QUEUE_EOL 0x7FF
#define I40E_MAX_FRAME 0x2600
#define I40E_MAX_TX_SEGS 8
#define I40E_MAX_TSO_SEGS 66
#define I40E_SPARSE_CHAIN 6
#define I40E_QUEUE_HUNG 0x80000000
#define IXL_BAR 3
#define IXL_ADM_LIMIT 2
#define IXL_TSO_SIZE 65535
#define IXL_TX_BUF_SZ ((u32) 1514)
#define IXL_AQ_BUF_SZ ((u32) 4096)
#define IXL_RX_HDR 128
#define IXL_AQ_LEN 256
#define IXL_AQ_BUFSZ 4096
#define IXL_RX_LIMIT 512
#define IXL_RX_ITR 0
#define IXL_TX_ITR 1
#define IXL_ITR_NONE 3
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MAX_FRAME 0x2600
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TSO_SEGS 66
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
/* ERJ: hardware can support ~1.5k filters between all functions */
#define I40E_MAX_FILTERS 256
#define I40E_MAX_TX_BUSY 10
#define IXL_MAX_FILTERS 256
#define IXL_MAX_TX_BUSY 10
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
#define IXL_NVM_VERSION_LO_SHIFT 0
#define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT)
#define IXL_NVM_VERSION_HI_SHIFT 12
#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
/*
* Interrupt Moderation parameters
*/
#define I40E_MAX_ITR 0x07FF
#define I40E_ITR_100K 0x0005
#define I40E_ITR_20K 0x0019
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_ITR_DYNAMIC 0x8000
#define I40E_LOW_LATENCY 0
#define I40E_AVE_LATENCY 1
#define I40E_BULK_LATENCY 2
#define IXL_MAX_ITR 0x07FF
#define IXL_ITR_100K 0x0005
#define IXL_ITR_20K 0x0019
#define IXL_ITR_8K 0x003E
#define IXL_ITR_4K 0x007A
#define IXL_ITR_DYNAMIC 0x8000
#define IXL_LOW_LATENCY 0
#define IXL_AVE_LATENCY 1
#define IXL_BULK_LATENCY 2
/* MacVlan Flags */
#define I40E_FILTER_USED (u16)(1 << 0)
#define I40E_FILTER_VLAN (u16)(1 << 1)
#define I40E_FILTER_ADD (u16)(1 << 2)
#define I40E_FILTER_DEL (u16)(1 << 3)
#define I40E_FILTER_MC (u16)(1 << 4)
#define IXL_FILTER_USED (u16)(1 << 0)
#define IXL_FILTER_VLAN (u16)(1 << 1)
#define IXL_FILTER_ADD (u16)(1 << 2)
#define IXL_FILTER_DEL (u16)(1 << 3)
#define IXL_FILTER_MC (u16)(1 << 4)
/* used in the vlan field of the filter when not a vlan */
#define I40E_VLAN_ANY -1
#define IXL_VLAN_ANY -1
#define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
/* Misc flags for i40e_vsi.flags */
#define I40E_FLAGS_KEEP_TSO4 (1 << 0)
#define I40E_FLAGS_KEEP_TSO6 (1 << 1)
/* Misc flags for ixl_vsi.flags */
#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
#define I40E_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define I40E_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define I40E_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
#define I40E_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx)
#define I40E_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED)
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
#define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx)
#define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED)
#define I40E_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define I40E_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define I40E_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
#define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
/*
*****************************************************************************
@ -248,36 +271,39 @@
*
*****************************************************************************
*/
typedef struct _i40e_vendor_info_t {
typedef struct _ixl_vendor_info_t {
unsigned int vendor_id;
unsigned int device_id;
unsigned int subvendor_id;
unsigned int subdevice_id;
unsigned int index;
} i40e_vendor_info_t;
} ixl_vendor_info_t;
struct i40e_tx_buf {
struct ixl_tx_buf {
u32 eop_index;
struct mbuf *m_head;
bus_dmamap_t map;
bus_dma_tag_t tag;
};
struct i40e_rx_buf {
struct ixl_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
struct mbuf *fmp;
bus_dmamap_t hmap;
bus_dmamap_t pmap;
#ifdef DEV_NETMAP
u64 addr;
#endif
};
/*
** This struct has multiple uses, multicast
** addresses, vlans, and mac filters all use it.
*/
struct i40e_mac_filter {
SLIST_ENTRY(i40e_mac_filter) next;
struct ixl_mac_filter {
SLIST_ENTRY(ixl_mac_filter) next;
u8 macaddr[ETHER_ADDR_LEN];
s16 vlan;
u16 flags;
@ -288,7 +314,7 @@ struct i40e_mac_filter {
* The Transmit ring control struct
*/
struct tx_ring {
struct i40e_queue *que;
struct ixl_queue *que;
struct mtx mtx;
u32 tail;
struct i40e_tx_desc *base;
@ -299,7 +325,7 @@ struct tx_ring {
u16 atr_count;
u16 itr;
u16 latency;
struct i40e_tx_buf *buffers;
struct ixl_tx_buf *buffers;
volatile u16 avail;
u32 cmd;
bus_dma_tag_t tx_tag;
@ -307,9 +333,12 @@ struct tx_ring {
char mtx_name[16];
struct buf_ring *br;
/* Soft Stats */
/* Used for Dynamic ITR calculation */
u32 packets;
u32 bytes;
/* Soft Stats */
u64 tx_bytes;
u64 no_desc;
u64 total_packets;
};
@ -319,7 +348,7 @@ struct tx_ring {
* The Receive ring control struct
*/
struct rx_ring {
struct i40e_queue *que;
struct ixl_queue *que;
struct mtx mtx;
union i40e_rx_desc *base;
struct i40e_dma_mem dma;
@ -332,16 +361,17 @@ struct rx_ring {
u16 itr;
u16 latency;
char mtx_name[16];
struct i40e_rx_buf *buffers;
struct ixl_rx_buf *buffers;
u32 mbuf_sz;
u32 tail;
bus_dma_tag_t htag;
bus_dma_tag_t ptag;
/* Soft stats */
/* Used for Dynamic ITR calculation */
u32 packets;
u32 bytes;
/* Soft stats */
u64 split;
u64 rx_packets;
u64 rx_bytes;
@ -353,8 +383,8 @@ struct rx_ring {
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring pair.
*/
struct i40e_queue {
struct i40e_vsi *vsi;
struct ixl_queue {
struct ixl_vsi *vsi;
u32 me;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
@ -384,8 +414,8 @@ struct i40e_queue {
** there would be one of these per traffic class/type
** for now just one, and its embedded in the pf
*/
SLIST_HEAD(i40e_ftl_head, i40e_mac_filter);
struct i40e_vsi {
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
struct ixl_vsi {
void *back;
struct ifnet *ifp;
struct device *dev;
@ -397,7 +427,7 @@ struct i40e_vsi {
u16 num_queues;
u16 rx_itr_setting;
u16 tx_itr_setting;
struct i40e_queue *queues; /* head of queues */
struct ixl_queue *queues; /* head of queues */
bool link_active;
u16 seid;
u16 max_frame_size;
@ -406,7 +436,7 @@ struct i40e_vsi {
u32 fc; /* local flow ctrl setting */
/* MAC/VLAN Filter list */
struct i40e_ftl_head ftl;
struct ixl_ftl_head ftl;
struct i40e_aqc_vsi_properties_data info;
@ -432,7 +462,7 @@ struct i40e_vsi {
** Find the number of unrefreshed RX descriptors
*/
static inline u16
i40e_rx_unrefreshed(struct i40e_queue *que)
ixl_rx_unrefreshed(struct ixl_queue *que)
{
struct rx_ring *rxr = &que->rxr;
@ -446,13 +476,13 @@ i40e_rx_unrefreshed(struct i40e_queue *que)
/*
** Find the next available unused filter
*/
static inline struct i40e_mac_filter *
i40e_get_filter(struct i40e_vsi *vsi)
static inline struct ixl_mac_filter *
ixl_get_filter(struct ixl_vsi *vsi)
{
struct i40e_mac_filter *f;
struct ixl_mac_filter *f;
/* create a new empty filter */
f = malloc(sizeof(struct i40e_mac_filter),
f = malloc(sizeof(struct ixl_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
SLIST_INSERT_HEAD(&vsi->ftl, f, next);
@ -478,19 +508,19 @@ cmp_etheraddr(u8 *ea1, u8 *ea2)
/*
* Info for stats sysctls
*/
struct i40e_sysctl_info {
struct ixl_sysctl_info {
u64 *stat;
char *name;
char *description;
};
extern int i40e_atr_rate;
extern int ixl_atr_rate;
/*
** i40e_fw_version_str - format the FW and NVM version strings
** ixl_fw_version_str - format the FW and NVM version strings
*/
static inline char *
i40e_fw_version_str(struct i40e_hw *hw)
ixl_fw_version_str(struct i40e_hw *hw)
{
static char buf[32];
@ -498,10 +528,10 @@ i40e_fw_version_str(struct i40e_hw *hw)
"f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
I40E_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
I40E_NVM_VERSION_LO_SHIFT,
(hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
IXL_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
IXL_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
return buf;
}
@ -509,21 +539,21 @@ i40e_fw_version_str(struct i40e_hw *hw)
/*********************************************************************
* TXRX Function prototypes
*********************************************************************/
int i40e_allocate_tx_data(struct i40e_queue *);
int i40e_allocate_rx_data(struct i40e_queue *);
void i40e_init_tx_ring(struct i40e_queue *);
int i40e_init_rx_ring(struct i40e_queue *);
bool i40e_rxeof(struct i40e_queue *, int);
bool i40e_txeof(struct i40e_queue *);
int i40e_mq_start(struct ifnet *, struct mbuf *);
int i40e_mq_start_locked(struct ifnet *, struct tx_ring *);
void i40e_deferred_mq_start(void *, int);
void i40e_qflush(struct ifnet *);
void i40e_free_vsi(struct i40e_vsi *);
void i40e_free_que_tx(struct i40e_queue *);
void i40e_free_que_rx(struct i40e_queue *);
#ifdef I40E_FDIR
void i40e_atr(struct i40e_queue *, struct tcphdr *, int);
int ixl_allocate_tx_data(struct ixl_queue *);
int ixl_allocate_rx_data(struct ixl_queue *);
void ixl_init_tx_ring(struct ixl_queue *);
int ixl_init_rx_ring(struct ixl_queue *);
bool ixl_rxeof(struct ixl_queue *, int);
bool ixl_txeof(struct ixl_queue *);
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
void ixl_qflush(struct ifnet *);
void ixl_free_vsi(struct ixl_vsi *);
void ixl_free_que_tx(struct ixl_queue *);
void ixl_free_que_rx(struct ixl_queue *);
#ifdef IXL_FDIR
void ixl_atr(struct ixl_queue *, struct tcphdr *, int);
#endif
#endif /* _I40E_H_ */
#endif /* _IXL_H_ */

24
sys/dev/i40e/i40e_pf.h → sys/dev/ixl/ixl_pf.h Executable file → Normal file
View File

@ -33,11 +33,11 @@
/*$FreeBSD$*/
#ifndef _I40E_PF_H_
#define _I40E_PF_H_
#ifndef _IXL_PF_H_
#define _IXL_PF_H_
/* Physical controller structure */
struct i40e_pf {
struct ixl_pf {
struct i40e_hw hw;
struct i40e_osdep osdep;
struct device *dev;
@ -64,6 +64,8 @@ struct i40e_pf {
struct task adminq;
struct taskqueue *tq;
int advertised_speed;
/*
** VSI - Stations:
** These are the traffic class holders, and
@ -71,7 +73,7 @@ struct i40e_pf {
** associated with them.
** NOTE: for now using just one, so embed it.
*/
struct i40e_vsi vsi;
struct ixl_vsi vsi;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@ -84,11 +86,11 @@ struct i40e_pf {
};
#define I40E_PF_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->pf_mtx, _name, "I40E PF Lock", MTX_DEF)
#define I40E_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx)
#define I40E_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx)
#define I40E_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define I40E_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
#define IXL_PF_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)
#define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx)
#define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
#endif /* _I40E_PF_H_ */
#endif /* _IXL_PF_H_ */

View File

@ -33,28 +33,25 @@
/*$FreeBSD$*/
/*
** I40E driver TX/RX Routines:
** IXL driver TX/RX Routines:
** This was seperated to allow usage by
** both the BASE and the VF drivers.
*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_inet.h"
#include "opt_inet6.h"
#endif
#include "i40e.h"
#include "ixl.h"
/* Local Prototypes */
static void i40e_rx_checksum(struct mbuf *, u32, u32, u8);
static void i40e_refresh_mbufs(struct i40e_queue *, int);
static int i40e_xmit(struct i40e_queue *, struct mbuf **);
static int i40e_tx_setup_offload(struct i40e_queue *,
static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
static void ixl_refresh_mbufs(struct ixl_queue *, int);
static int ixl_xmit(struct ixl_queue *, struct mbuf **);
static int ixl_tx_setup_offload(struct ixl_queue *,
struct mbuf *, u32 *, u32 *);
static bool i40e_tso_setup(struct i40e_queue *, struct mbuf *);
static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
static __inline void i40e_rx_discard(struct rx_ring *, int);
static __inline void i40e_rx_input(struct rx_ring *, struct ifnet *,
static __inline void ixl_rx_discard(struct rx_ring *, int);
static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
struct mbuf *, u8);
/*
@ -62,10 +59,10 @@ static __inline void i40e_rx_input(struct rx_ring *, struct ifnet *,
**
*/
int
i40e_mq_start(struct ifnet *ifp, struct mbuf *m)
ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct i40e_vsi *vsi = ifp->if_softc;
struct i40e_queue *que;
struct ixl_vsi *vsi = ifp->if_softc;
struct ixl_queue *que;
struct tx_ring *txr;
int err, i;
@ -85,9 +82,9 @@ i40e_mq_start(struct ifnet *ifp, struct mbuf *m)
err = drbr_enqueue(ifp, txr->br, m);
if (err)
return(err);
if (I40E_TX_TRYLOCK(txr)) {
i40e_mq_start_locked(ifp, txr);
I40E_TX_UNLOCK(txr);
if (IXL_TX_TRYLOCK(txr)) {
ixl_mq_start_locked(ifp, txr);
IXL_TX_UNLOCK(txr);
} else
taskqueue_enqueue(que->tq, &que->tx_task);
@ -95,10 +92,10 @@ i40e_mq_start(struct ifnet *ifp, struct mbuf *m)
}
int
i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
{
struct i40e_queue *que = txr->que;
struct i40e_vsi *vsi = que->vsi;
struct ixl_queue *que = txr->que;
struct ixl_vsi *vsi = que->vsi;
struct mbuf *next;
int err = 0;
@ -109,7 +106,7 @@ i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
/* Process the transmit queue */
while ((next = drbr_peek(ifp, txr->br)) != NULL) {
if ((err = i40e_xmit(que, &next)) != 0) {
if ((err = ixl_xmit(que, &next)) != 0) {
if (next == NULL)
drbr_advance(ifp, txr->br);
else
@ -123,8 +120,8 @@ i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
break;
}
if (txr->avail < I40E_TX_CLEANUP_THRESHOLD)
i40e_txeof(que);
if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
ixl_txeof(que);
return (err);
}
@ -133,35 +130,35 @@ i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
* Called from a taskqueue to drain queued transmit packets.
*/
void
i40e_deferred_mq_start(void *arg, int pending)
ixl_deferred_mq_start(void *arg, int pending)
{
struct i40e_queue *que = arg;
struct ixl_queue *que = arg;
struct tx_ring *txr = &que->txr;
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
struct ifnet *ifp = vsi->ifp;
I40E_TX_LOCK(txr);
IXL_TX_LOCK(txr);
if (!drbr_empty(ifp, txr->br))
i40e_mq_start_locked(ifp, txr);
I40E_TX_UNLOCK(txr);
ixl_mq_start_locked(ifp, txr);
IXL_TX_UNLOCK(txr);
}
/*
** Flush all queue ring buffers
*/
void
i40e_qflush(struct ifnet *ifp)
ixl_qflush(struct ifnet *ifp)
{
struct i40e_vsi *vsi = ifp->if_softc;
struct ixl_vsi *vsi = ifp->if_softc;
for (int i = 0; i < vsi->num_queues; i++) {
struct i40e_queue *que = &vsi->queues[i];
struct ixl_queue *que = &vsi->queues[i];
struct tx_ring *txr = &que->txr;
struct mbuf *m;
I40E_TX_LOCK(txr);
IXL_TX_LOCK(txr);
while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
m_freem(m);
I40E_TX_UNLOCK(txr);
IXL_TX_UNLOCK(txr);
}
if_qflush(ifp);
}
@ -172,7 +169,7 @@ i40e_qflush(struct ifnet *ifp)
** mbufs to deliver an mss-size chunk of data
*/
static inline bool
i40e_tso_detect_sparse(struct mbuf *mp)
ixl_tso_detect_sparse(struct mbuf *mp)
{
struct mbuf *m;
int num = 0, mss;
@ -187,7 +184,7 @@ i40e_tso_detect_sparse(struct mbuf *mp)
if (m->m_next == NULL)
break;
}
if (num > I40E_SPARSE_CHAIN)
if (num > IXL_SPARSE_CHAIN)
ret = TRUE;
return (ret);
@ -201,15 +198,15 @@ i40e_tso_detect_sparse(struct mbuf *mp)
* - return 0 on success, positive on failure
*
**********************************************************************/
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
static int
i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
{
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
struct i40e_hw *hw = vsi->hw;
struct tx_ring *txr = &que->txr;
struct i40e_tx_buf *buf;
struct ixl_tx_buf *buf;
struct i40e_tx_desc *txd = NULL;
struct mbuf *m_head, *m;
int i, j, error, nsegs, maxsegs;
@ -218,7 +215,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
u32 cmd, off;
bus_dmamap_t map;
bus_dma_tag_t tag;
bus_dma_segment_t segs[I40E_MAX_TSO_SEGS];
bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
cmd = off = 0;
@ -233,13 +230,13 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
buf = &txr->buffers[first];
map = buf->map;
tag = txr->tx_tag;
maxsegs = I40E_MAX_TX_SEGS;
maxsegs = IXL_MAX_TX_SEGS;
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
/* Use larger mapping for TSO */
tag = txr->tso_tag;
maxsegs = I40E_MAX_TSO_SEGS;
if (i40e_tso_detect_sparse(m_head)) {
maxsegs = IXL_MAX_TSO_SEGS;
if (ixl_tso_detect_sparse(m_head)) {
m = m_defrag(m_head, M_NOWAIT);
*m_headp = m;
}
@ -296,7 +293,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
/* Set up the TSO/CSUM offload */
if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
error = i40e_tx_setup_offload(que, m_head, &cmd, &off);
error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
if (error)
goto xmit_fail;
}
@ -335,7 +332,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
}
/* Set the last descriptor for report */
txd->cmd_type_offset_bsz |=
htole64(((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
txr->avail -= nsegs;
txr->next_avail = i;
@ -358,7 +355,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
++txr->total_packets;
wr32(hw, txr->tail, i);
i40e_flush(hw);
ixl_flush(hw);
/* Mark outstanding work */
if (que->busy == 0)
que->busy = 1;
@ -378,12 +375,12 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp)
*
**********************************************************************/
int
i40e_allocate_tx_data(struct i40e_queue *que)
ixl_allocate_tx_data(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
device_t dev = vsi->dev;
struct i40e_tx_buf *buf;
struct ixl_tx_buf *buf;
int error = 0;
/*
@ -394,8 +391,8 @@ i40e_allocate_tx_data(struct i40e_queue *que)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
I40E_TSO_SIZE, /* maxsize */
I40E_MAX_TX_SEGS, /* nsegments */
IXL_TSO_SIZE, /* maxsize */
IXL_MAX_TX_SEGS, /* nsegments */
PAGE_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
@ -411,8 +408,8 @@ i40e_allocate_tx_data(struct i40e_queue *que)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
I40E_TSO_SIZE, /* maxsize */
I40E_MAX_TSO_SEGS, /* nsegments */
IXL_TSO_SIZE, /* maxsize */
IXL_MAX_TSO_SEGS, /* nsegments */
PAGE_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
@ -423,7 +420,7 @@ i40e_allocate_tx_data(struct i40e_queue *que)
}
if (!(txr->buffers =
(struct i40e_tx_buf *) malloc(sizeof(struct i40e_tx_buf) *
(struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
error = ENOMEM;
@ -453,13 +450,21 @@ i40e_allocate_tx_data(struct i40e_queue *que)
*
**********************************************************************/
void
i40e_init_tx_ring(struct i40e_queue *que)
ixl_init_tx_ring(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
struct i40e_tx_buf *buf;
struct ixl_tx_buf *buf;
#ifdef DEV_NETMAP
struct ixl_vsi *vsi = que->vsi;
struct netmap_adapter *na = NA(vsi->ifp);
struct netmap_slot *slot;
#endif /* DEV_NETMAP */
/* Clear the old ring contents */
I40E_TX_LOCK(txr);
IXL_TX_LOCK(txr);
#ifdef DEV_NETMAP
slot = netmap_reset(na, NR_TX, que->me, 0);
#endif
bzero((void *)txr->base,
(sizeof(struct i40e_tx_desc)) * que->num_desc);
@ -467,9 +472,9 @@ i40e_init_tx_ring(struct i40e_queue *que)
txr->next_avail = 0;
txr->next_to_clean = 0;
#ifdef I40E_FDIR
#ifdef IXL_FDIR
/* Initialize flow director */
txr->atr_rate = i40e_atr_rate;
txr->atr_rate = ixl_atr_rate;
txr->atr_count = 0;
#endif
@ -483,6 +488,13 @@ i40e_init_tx_ring(struct i40e_queue *que)
m_freem(buf->m_head);
buf->m_head = NULL;
}
#ifdef DEV_NETMAP
if (slot)
{
int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
netmap_load_map(txr->tag, buf->map, NMB(slot + si));
}
#endif
/* Clear the EOP index */
buf->eop_index = -1;
}
@ -492,7 +504,7 @@ i40e_init_tx_ring(struct i40e_queue *que)
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
I40E_TX_UNLOCK(txr);
IXL_TX_UNLOCK(txr);
}
@ -502,12 +514,12 @@ i40e_init_tx_ring(struct i40e_queue *que)
*
**********************************************************************/
void
i40e_free_que_tx(struct i40e_queue *que)
ixl_free_que_tx(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
struct i40e_tx_buf *buf;
struct ixl_tx_buf *buf;
INIT_DEBUGOUT("i40e_free_que_tx: begin");
INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
for (int i = 0; i < que->num_desc; i++) {
buf = &txr->buffers[i];
@ -545,6 +557,8 @@ i40e_free_que_tx(struct i40e_queue *que)
bus_dma_tag_destroy(txr->tso_tag);
txr->tso_tag = NULL;
}
INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
return;
}
@ -555,7 +569,7 @@ i40e_free_que_tx(struct i40e_queue *que)
**********************************************************************/
static int
i40e_tx_setup_offload(struct i40e_queue *que,
ixl_tx_setup_offload(struct ixl_queue *que,
struct mbuf *mp, u32 *cmd, u32 *off)
{
struct ether_vlan_header *eh;
@ -570,7 +584,7 @@ i40e_tx_setup_offload(struct i40e_queue *que,
/* Set up the TSO context descriptor if required */
if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
tso = i40e_tso_setup(que, mp);
tso = ixl_tso_setup(que, mp);
if (tso)
++que->tso;
else
@ -625,8 +639,8 @@ i40e_tx_setup_offload(struct i40e_queue *que,
*off |= (tcp_hlen >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
#ifdef I40E_FDIR
i40e_atr(que, th, etype);
#ifdef IXL_FDIR
ixl_atr(que, th, etype);
#endif
break;
case IPPROTO_UDP:
@ -658,11 +672,11 @@ i40e_tx_setup_offload(struct i40e_queue *que,
*
**********************************************************************/
static bool
i40e_tso_setup(struct i40e_queue *que, struct mbuf *mp)
ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
{
struct tx_ring *txr = &que->txr;
struct i40e_tx_context_desc *TXD;
struct i40e_tx_buf *buf;
struct ixl_tx_buf *buf;
u32 cmd, mss, type, tsolen;
u16 etype;
int idx, elen, ip_hlen, tcp_hlen;
@ -749,11 +763,11 @@ i40e_tso_setup(struct i40e_queue *que, struct mbuf *mp)
}
/*
** i40e_get_tx_head - Retrieve the value from the
** ixl_get_tx_head - Retrieve the value from the
** location the HW records its HEAD index
*/
static inline u32
i40e_get_tx_head(struct i40e_queue *que)
ixl_get_tx_head(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
void *head = &txr->base[que->num_desc];
@ -768,18 +782,47 @@ i40e_get_tx_head(struct i40e_queue *que)
*
**********************************************************************/
bool
i40e_txeof(struct i40e_queue *que)
ixl_txeof(struct ixl_queue *que)
{
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
struct ifnet *ifp = vsi->ifp;
struct tx_ring *txr = &que->txr;
u32 first, last, head, done, processed;
struct i40e_tx_buf *buf;
struct ixl_tx_buf *buf;
struct i40e_tx_desc *tx_desc, *eop_desc;
mtx_assert(&txr->mtx, MA_OWNED);
#ifdef DEV_NETMAP
if (ifp->if_capenable & IFCAP_NETMAP) {
struct netmap_adapter *na = NA(ifp);
struct netmap_kring *kring = &na->tx_rings[que->me];
tx_desc = txr->base;
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
BUS_DMASYNC_POSTREAD);
if (!netmap_mitigate ||
(kring->nr_kflags < kring->nkr_num_slots &&
tx_desc[kring->nr_kflags].cmd_type_offset_bsz &
htole32(I40E_TX_DESC_DTYPE_DESC_DONE)))
{
#if NETMAP_API < 4
struct ixl_pf *pf = vsi->pf;
kring->nr_kflags = kring->nkr_num_slots;
selwakeuppri(&na->tx_rings[que->me].si, PI_NET);
IXL_TX_UNLOCK(txr);
IXL_PF_LOCK(pf);
selwakeuppri(&na->tx_si, PI_NET);
IXL_PF_UNLOCK(pf);
IXL_TX_LOCK(txr);
#else /* NETMAP_API >= 4 */
netmap_tx_irq(ifp, txr->que->me);
#endif /* NETMAP_API */
}
// XXX guessing there is no more work to be done
return FALSE;
}
#endif /* DEV_NETMAP */
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
@ -797,7 +840,7 @@ i40e_txeof(struct i40e_queue *que)
eop_desc = (struct i40e_tx_desc *)&txr->base[last];
/* Get the Head WB value */
head = i40e_get_tx_head(que);
head = ixl_get_tx_head(que);
/*
** Get the index of the first descriptor
@ -823,7 +866,9 @@ i40e_txeof(struct i40e_queue *que)
++processed;
if (buf->m_head) {
txr->bytes +=
txr->bytes += /* for ITR adjustment */
buf->m_head->m_pkthdr.len;
txr->tx_bytes += /* for TX stats */
buf->m_head->m_pkthdr.len;
bus_dmamap_sync(buf->tag,
buf->map,
@ -869,7 +914,7 @@ i40e_txeof(struct i40e_queue *que)
** be considered hung. If anything has been
** cleaned then reset the state.
*/
if ((processed == 0) && (que->busy != I40E_QUEUE_HUNG))
if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
++que->busy;
if (processed)
@ -896,13 +941,13 @@ i40e_txeof(struct i40e_queue *que)
*
**********************************************************************/
static void
i40e_refresh_mbufs(struct i40e_queue *que, int limit)
ixl_refresh_mbufs(struct ixl_queue *que, int limit)
{
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
struct rx_ring *rxr = &que->rxr;
bus_dma_segment_t hseg[1];
bus_dma_segment_t pseg[1];
struct i40e_rx_buf *buf;
struct ixl_rx_buf *buf;
struct mbuf *mh, *mp;
int i, j, nsegs, error;
bool refreshed = FALSE;
@ -966,8 +1011,12 @@ i40e_refresh_mbufs(struct i40e_queue *que, int limit)
buf->m_pack = mp;
bus_dmamap_sync(rxr->ptag, buf->pmap,
BUS_DMASYNC_PREREAD);
#ifdef DEV_NETMAP
rxr->base[i].read.pkt_addr = buf->addr;
#else /* !DEV_NETMAP */
rxr->base[i].read.pkt_addr =
htole64(pseg[0].ds_addr);
#endif /* DEV_NETMAP */
/* Used only when doing header split */
rxr->base[i].read.hdr_addr = 0;
@ -994,17 +1043,17 @@ i40e_refresh_mbufs(struct i40e_queue *que, int limit)
*
**********************************************************************/
int
i40e_allocate_rx_data(struct i40e_queue *que)
ixl_allocate_rx_data(struct ixl_queue *que)
{
struct rx_ring *rxr = &que->rxr;
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
device_t dev = vsi->dev;
struct i40e_rx_buf *buf;
struct ixl_rx_buf *buf;
int i, bsize, error;
bsize = sizeof(struct i40e_rx_buf) * que->num_desc;
bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
if (!(rxr->buffers =
(struct i40e_rx_buf *) malloc(bsize,
(struct ixl_rx_buf *) malloc(bsize,
M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
error = ENOMEM;
@ -1069,17 +1118,24 @@ i40e_allocate_rx_data(struct i40e_queue *que)
*
**********************************************************************/
int
i40e_init_rx_ring(struct i40e_queue *que)
ixl_init_rx_ring(struct ixl_queue *que)
{
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
struct ifnet *ifp = vsi->ifp;
struct rx_ring *rxr = &que->rxr;
struct lro_ctrl *lro = &rxr->lro;
struct i40e_rx_buf *buf;
struct ixl_rx_buf *buf;
bus_dma_segment_t pseg[1], hseg[1];
int rsize, nsegs, error = 0;
#ifdef DEV_NETMAP
struct netmap_adapter *na = NA(ifp);
struct netmap_slot *slot;
#endif /* DEV_NETMAP */
I40E_RX_LOCK(rxr);
IXL_RX_LOCK(rxr);
#ifdef DEV_NETMAP
slot = netmap_reset(na, NR_RX, que->me, 0);
#endif
/* Clear the ring contents */
rsize = roundup2(que->num_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
@ -1113,6 +1169,21 @@ i40e_init_rx_ring(struct i40e_queue *que)
struct mbuf *mh, *mp;
buf = &rxr->buffers[j];
#ifdef DEV_NETMAP
if (slot)
{
int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
u64 paddr;
void *addr;
addr = PNMB(slot + sj, &paddr);
netmap_load_map(rxr->ptag, buf->pmap, addr);
/* Update descriptor and cached value */
rxr->base[j].read.pkt_addr = htole64(paddr);
buf->addr = htole64(paddr);
continue;
}
#endif /* DEV_NETMAP */
/*
** Don't allocate mbufs if not
** doing header split, its wasteful
@ -1179,10 +1250,10 @@ i40e_init_rx_ring(struct i40e_queue *que)
if (ifp->if_capenable & IFCAP_LRO) {
int err = tcp_lro_init(lro);
if (err) {
printf("LRO Initialization failed!\n");
if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
goto fail;
}
INIT_DEBUGOUT("RX Soft LRO Initialized\n");
INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
rxr->lro_enabled = TRUE;
lro->ifp = vsi->ifp;
}
@ -1191,7 +1262,7 @@ i40e_init_rx_ring(struct i40e_queue *que)
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
fail:
I40E_RX_UNLOCK(rxr);
IXL_RX_UNLOCK(rxr);
return (error);
}
@ -1202,12 +1273,12 @@ i40e_init_rx_ring(struct i40e_queue *que)
*
**********************************************************************/
void
i40e_free_que_rx(struct i40e_queue *que)
ixl_free_que_rx(struct ixl_queue *que)
{
struct rx_ring *rxr = &que->rxr;
struct i40e_rx_buf *buf;
struct ixl_rx_buf *buf;
INIT_DEBUGOUT("free_que_rx: begin");
INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
/* Cleanup any existing buffers */
if (rxr->buffers != NULL) {
@ -1252,11 +1323,13 @@ i40e_free_que_rx(struct i40e_queue *que)
bus_dma_tag_destroy(rxr->ptag);
rxr->ptag = NULL;
}
INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
return;
}
static __inline void
i40e_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
{
/*
* ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
@ -1277,16 +1350,16 @@ i40e_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
return;
}
I40E_RX_UNLOCK(rxr);
IXL_RX_UNLOCK(rxr);
(*ifp->if_input)(ifp, m);
I40E_RX_LOCK(rxr);
IXL_RX_LOCK(rxr);
}
static __inline void
i40e_rx_discard(struct rx_ring *rxr, int i)
ixl_rx_discard(struct rx_ring *rxr, int i)
{
struct i40e_rx_buf *rbuf;
struct ixl_rx_buf *rbuf;
rbuf = &rxr->buffers[i];
@ -1329,20 +1402,42 @@ i40e_rx_discard(struct rx_ring *rxr, int i)
* Return TRUE for more work, FALSE for all clean.
*********************************************************************/
bool
i40e_rxeof(struct i40e_queue *que, int count)
ixl_rxeof(struct ixl_queue *que, int count)
{
struct i40e_vsi *vsi = que->vsi;
struct ixl_vsi *vsi = que->vsi;
struct rx_ring *rxr = &que->rxr;
struct ifnet *ifp = vsi->ifp;
struct lro_ctrl *lro = &rxr->lro;
struct lro_entry *queued;
int i, nextp, processed = 0;
union i40e_rx_desc *cur;
struct i40e_rx_buf *rbuf, *nbuf;
struct ixl_rx_buf *rbuf, *nbuf;
I40E_RX_LOCK(rxr);
IXL_RX_LOCK(rxr);
#ifdef DEV_NETMAP
#if NETMAP_API < 4
if (ifp->if_capenable & IFCAP_NETMAP)
{
struct netmap_adapter *na = NA(ifp);
na->rx_rings[que->me].nr_kflags |= NKR_PENDINTR;
selwakeuppri(&na->rx_rings[que->me].si, PI_NET);
IXL_RX_UNLOCK(rxr);
IXL_PF_LOCK(vsi->pf);
selwakeuppri(&na->rx_si, PI_NET);
IXL_PF_UNLOCK(vsi->pf);
return (FALSE);
}
#else /* NETMAP_API >= 4 */
if (netmap_rx_irq(ifp, que->me, &processed))
{
IXL_RX_UNLOCK(rxr);
return (FALSE);
}
#endif /* NETMAP_API */
#endif /* DEV_NETMAP */
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
@ -1398,7 +1493,7 @@ i40e_rxeof(struct i40e_queue *que, int count)
if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
ifp->if_ierrors++;
rxr->discarded++;
i40e_rx_discard(rxr, i);
ixl_rx_discard(rxr, i);
goto next_desc;
}
@ -1423,8 +1518,8 @@ i40e_rxeof(struct i40e_queue *que, int count)
** descriptor to the next, until we get EOP.
*/
if (rxr->hdr_split && (rbuf->fmp == NULL)) {
if (hlen > I40E_RX_HDR)
hlen = I40E_RX_HDR;
if (hlen > IXL_RX_HDR)
hlen = IXL_RX_HDR;
mh->m_len = hlen;
mh->m_flags |= M_PKTHDR;
mh->m_next = NULL;
@ -1512,7 +1607,7 @@ i40e_rxeof(struct i40e_queue *que, int count)
rxr->packets++;
rxr->bytes += sendmp->m_pkthdr.len;
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
i40e_rx_checksum(sendmp, status, error, ptype);
ixl_rx_checksum(sendmp, status, error, ptype);
sendmp->m_pkthdr.flowid = que->msix;
sendmp->m_flags |= M_FLOWID;
}
@ -1527,20 +1622,20 @@ i40e_rxeof(struct i40e_queue *que, int count)
/* Now send to the stack or do LRO */
if (sendmp != NULL) {
rxr->next_check = i;
i40e_rx_input(rxr, ifp, sendmp, ptype);
ixl_rx_input(rxr, ifp, sendmp, ptype);
i = rxr->next_check;
}
/* Every 8 descriptors we go to refresh mbufs */
if (processed == 8) {
i40e_refresh_mbufs(que, i);
ixl_refresh_mbufs(que, i);
processed = 0;
}
}
/* Refresh any remaining buf structs */
if (i40e_rx_unrefreshed(que))
i40e_refresh_mbufs(que, i);
if (ixl_rx_unrefreshed(que))
ixl_refresh_mbufs(que, i);
rxr->next_check = i;
@ -1552,7 +1647,7 @@ i40e_rxeof(struct i40e_queue *que, int count)
tcp_lro_flush(lro, queued);
}
I40E_RX_UNLOCK(rxr);
IXL_RX_UNLOCK(rxr);
return (FALSE);
}
@ -1565,7 +1660,7 @@ i40e_rxeof(struct i40e_queue *que, int count)
*
*********************************************************************/
static void
i40e_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
{
struct i40e_rx_ptype_decoded decoded;

205
sys/dev/ixl/ixlv.h Normal file
View File

@ -0,0 +1,205 @@
/******************************************************************************
Copyright (c) 2013-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXLV_H_
#define _IXLV_H_
#define IXLV_AQ_MAX_ERR 100
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
#define IXLV_CALLOUT_TIMO (hz / 50) // 20 msec
#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1)
#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
#define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
#define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
#define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
/* printf %b arg */
#define IXLV_FLAGS \
"\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \
"\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS"
/* Driver state */
enum ixlv_state_t {
IXLV_START,
IXLV_FAILED,
IXLV_RESET_REQUIRED,
IXLV_RESET_PENDING,
IXLV_VERSION_CHECK,
IXLV_GET_RESOURCES,
IXLV_INIT_READY,
IXLV_INIT_START,
IXLV_INIT_CONFIG,
IXLV_INIT_MAPPING,
IXLV_INIT_ENABLE,
IXLV_INIT_COMPLETE,
IXLV_RUNNING,
};
struct ixlv_mac_filter {
SLIST_ENTRY(ixlv_mac_filter) next;
u8 macaddr[ETHER_ADDR_LEN];
u16 flags;
};
SLIST_HEAD(mac_list, ixlv_mac_filter);
struct ixlv_vlan_filter {
SLIST_ENTRY(ixlv_vlan_filter) next;
u16 vlan;
u16 flags;
};
SLIST_HEAD(vlan_list, ixlv_vlan_filter);
/* Software controller structure */
struct ixlv_sc {
struct i40e_hw hw;
struct i40e_osdep osdep;
struct device *dev;
struct resource *pci_mem;
struct resource *msix_mem;
enum ixlv_state_t init_state;
/*
* Interrupt resources
*/
void *tag;
struct resource *res; /* For the AQ */
struct ifmedia media;
struct callout timer;
struct callout aq_task;
int msix;
int if_flags;
struct mtx mtx;
struct mtx aq_task_mtx;
u32 qbase;
u32 admvec;
struct timeout_task timeout;
struct task aq_irq;
struct task aq_sched;
struct taskqueue *tq;
struct ixl_vsi vsi;
/* Mac Filter List */
struct mac_list *mac_filters;
/* Vlan Filter List */
struct vlan_list *vlan_filters;
/* Promiscuous mode */
u32 promiscuous_flags;
/* Admin queue task flags */
u32 aq_wait_count;
u32 aq_required;
u32 aq_pending;
/* Virtual comm channel */
enum i40e_virtchnl_ops current_op;
struct i40e_virtchnl_vf_resource *vf_res;
struct i40e_virtchnl_vsi_resource *vsi_res;
/* Misc stats maintained by the driver */
u64 watchdog_events;
u64 admin_irq;
/* Signaling channels */
u8 init_done;
u8 config_queues_done;
u8 map_vectors_done;
u8 enable_queues_done;
u8 disable_queues_done;
u8 add_ether_done;
u8 del_ether_done;
};
/*
** This checks for a zero mac addr, something that will be likely
** unless the Admin on the Host has created one.
*/
static inline bool
ixlv_check_ether_addr(u8 *addr)
{
bool status = TRUE;
if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
status = FALSE;
return (status);
}
/*
** VF Common function prototypes
*/
int ixlv_send_api_ver(struct ixlv_sc *);
int ixlv_verify_api_ver(struct ixlv_sc *);
int ixlv_send_vf_config_msg(struct ixlv_sc *);
int ixlv_get_vf_config(struct ixlv_sc *);
void ixlv_init(void *);
int ixlv_reinit_locked(struct ixlv_sc *);
void ixlv_configure_queues(struct ixlv_sc *);
void ixlv_enable_queues(struct ixlv_sc *);
void ixlv_disable_queues(struct ixlv_sc *);
void ixlv_map_queues(struct ixlv_sc *);
void ixlv_enable_intr(struct ixl_vsi *);
void ixlv_disable_intr(struct ixl_vsi *);
void ixlv_add_ether_filters(struct ixlv_sc *);
void ixlv_del_ether_filters(struct ixlv_sc *);
void ixlv_request_stats(struct ixlv_sc *);
void ixlv_request_reset(struct ixlv_sc *);
void ixlv_vc_completion(struct ixlv_sc *,
enum i40e_virtchnl_ops, i40e_status, u8 *, u16);
void ixlv_add_ether_filter(struct ixlv_sc *);
void ixlv_add_vlans(struct ixlv_sc *);
void ixlv_del_vlans(struct ixlv_sc *);
void ixlv_update_stats_counters(struct ixlv_sc *,
struct i40e_eth_stats *);
#endif /* _IXLV_H_ */

976
sys/dev/ixl/ixlvc.c Normal file
View File

@ -0,0 +1,976 @@
/******************************************************************************
Copyright (c) 2013-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
/*
** Virtual Channel support
** These are support functions to communication
** between the VF and PF drivers.
*/
#include "ixl.h"
#include "ixlv.h"
#include "i40e_prototype.h"
/* busy wait delay in msec */
#define IXLV_BUSY_WAIT_DELAY 10
#define IXLV_BUSY_WAIT_COUNT 50
/*
** Validate VF messages
*/
static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
int valid_len;
/* Validate message length. */
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
valid_len = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
break;
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_rxq_info);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_vsi_queue_config_info *vqc =
(struct i40e_virtchnl_vsi_queue_config_info *)msg;
valid_len += (vqc->num_queue_pairs *
sizeof(struct
i40e_virtchnl_queue_pair_info));
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_irq_map_info *vimi =
(struct i40e_virtchnl_irq_map_info *)msg;
valid_len += (vimi->num_vectors *
sizeof(struct i40e_virtchnl_vector_map));
if (vimi->num_vectors == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct i40e_virtchnl_ether_addr_list *veal =
(struct i40e_virtchnl_ether_addr_list *)msg;
valid_len += veal->num_elements *
sizeof(struct i40e_virtchnl_ether_addr);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
case I40E_VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct i40e_virtchnl_vlan_filter_list *vfl =
(struct i40e_virtchnl_vlan_filter_list *)msg;
valid_len += vfl->num_elements * sizeof(u16);
if (vfl->num_elements == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct i40e_virtchnl_promisc_info);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
return EPERM;
break;
}
/* few more checks */
if ((valid_len != msglen) || (err_msg_format))
return EINVAL;
else
return 0;
}
/*
** ixlv_send_pf_msg
**
** Send message to PF and print status if failure.
*/
static int
ixlv_send_pf_msg(struct ixlv_sc *sc,
enum i40e_virtchnl_ops op, u8 *msg, u16 len)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
i40e_status err;
int val_err;
/*
** Pre-validating messages to the PF, this might be
** removed for performance later?
*/
val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
if (val_err)
device_printf(dev, "Error validating msg to PF for op %d,"
" msglen %d: error %d\n", op, len, val_err);
err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
if (err)
device_printf(dev, "Unable to send opcode %d to PF, "
"error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
return err;
}
/*
** ixlv_send_api_ver
**
** Send API version admin queue message to the PF. The reply is not checked
** in this function. Returns 0 if the message was successfully
** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
*/
int
ixlv_send_api_ver(struct ixlv_sc *sc)
{
struct i40e_virtchnl_version_info vvi;
vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
(u8 *)&vvi, sizeof(vvi));
}
/*
** ixlv_verify_api_ver
**
** Compare API versions with the PF. Must be called after admin queue is
** initialized. Returns 0 if API versions match, EIO if
** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
*/
int ixlv_verify_api_ver(struct ixlv_sc *sc)
{
struct i40e_virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
i40e_status err;
int retries = 0;
event.buf_len = IXL_AQ_BUFSZ;
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
if (!event.msg_buf) {
err = ENOMEM;
goto out;
}
do {
if (++retries > IXLV_AQ_MAX_ERR)
goto out_alloc;
/* NOTE: initial delay is necessary */
i40e_msec_delay(100);
err = i40e_clean_arq_element(hw, &event, NULL);
} while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
if (err)
goto out_alloc;
err = (i40e_status)le32toh(event.desc.cookie_low);
if (err) {
err = EIO;
goto out_alloc;
}
if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_VERSION) {
err = EIO;
goto out_alloc;
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
(pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
err = EIO;
out_alloc:
free(event.msg_buf, M_DEVBUF);
out:
return err;
}
/*
** ixlv_send_vf_config_msg
**
** Send VF configuration request admin queue message to the PF. The reply
** is not checked in this function. Returns 0 if the message was
** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
*/
int
ixlv_send_vf_config_msg(struct ixlv_sc *sc)
{
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0);
}
/*
** ixlv_get_vf_config
**
** Get VF configuration from PF and populate hw structure. Must be called after
** admin queue is initialized. Busy waits until response is received from PF,
** with maximum timeout. Response from PF is returned in the buffer for further
** processing by the caller.
*/
int
ixlv_get_vf_config(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct i40e_arq_event_info event;
u16 len;
i40e_status err = 0;
u32 retries = 0;
/* Note this assumes a single VSI */
len = sizeof(struct i40e_virtchnl_vf_resource) +
sizeof(struct i40e_virtchnl_vsi_resource);
event.buf_len = len;
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
if (!event.msg_buf) {
err = ENOMEM;
goto out;
}
do {
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
i40e_msec_delay(100);
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
device_printf(dev, "%s: Received a response from PF,"
" opcode %d, error %d\n", __func__,
le32toh(event.desc.cookie_high),
le32toh(event.desc.cookie_low));
retries++;
continue;
} else {
err = (i40e_status)le32toh(event.desc.cookie_low);
if (err) {
device_printf(dev, "%s: Error returned from PF,"
" opcode %d, error %d\n", __func__,
le32toh(event.desc.cookie_high),
le32toh(event.desc.cookie_low));
err = EIO;
goto out_alloc;
}
break;
}
if (retries > IXLV_AQ_MAX_ERR) {
INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
retries);
goto out_alloc;
}
} while (err);
memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
i40e_vf_parse_hw_config(hw, sc->vf_res);
out_alloc:
free(event.msg_buf, M_DEVBUF);
out:
return err;
}
/*
** ixlv_configure_queues
**
** Request that the PF set up our queues.
*/
void
ixlv_configure_queues(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
struct tx_ring *txr;
struct rx_ring *rxr;
int len, pairs;;
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
#ifdef IXL_DEBUG
device_printf(dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
pairs = vsi->num_queues;
sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!vqci) {
device_printf(dev, "%s: unable to allocate memory\n", __func__);
return;
}
vqci->vsi_id = sc->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
/* Size check is not needed here - HW max is 16 queue pairs, and we
* can fit info for 31 of them into the AQ buffer before it overflows.
*/
for (int i = 0; i < pairs; i++, que++) {
txr = &que->txr;
rxr = &que->rxr;
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = que->num_desc;
vqpi->txq.dma_ring_addr = txr->dma.pa;
/* Enable Head writeback */
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = txr->dma.pa +
(que->num_desc * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = que->num_desc;
vqpi->rxq.dma_ring_addr = rxr->dma.pa;
vqpi->rxq.max_pkt_size = vsi->max_frame_size;
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
vqpi++;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
free(vqci, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES;
}
/*
** ixlv_enable_queues
**
** Request that the PF enable all of our queues.
*/
void
ixlv_enable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* we already have a command pending */
#ifdef IXL_DEBUG
device_printf(sc->dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES;
sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES;
}
/*
** ixlv_disable_queues
**
** Request that the PF disable all of our queues.
*/
void
ixlv_disable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* we already have a command pending */
#ifdef IXL_DEBUG
device_printf(sc->dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES;
sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES;
}
/*
** ixlv_map_queues
**
** Request that the PF map queues to interrupt vectors. Misc causes, including
** admin queue, are always mapped to vector 0.
*/
void
ixlv_map_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_irq_map_info *vm;
int i, q, len;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* we already have a command pending */
#ifdef IXL_DEBUG
device_printf(sc->dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
/* How many queue vectors, adminq uses one */
q = sc->msix - 1;
len = sizeof(struct i40e_virtchnl_irq_map_info) +
(sc->msix * sizeof(struct i40e_virtchnl_vector_map));
vm = malloc(len, M_DEVBUF, M_NOWAIT);
if (!vm) {
printf("%s: unable to allocate memory\n", __func__);
return;
}
vm->num_vectors = sc->msix;
/* Queue vectors first */
for (i = 0; i < q; i++, que++) {
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
vm->vecmap[i].vector_id = i + 1; /* first is adminq */
vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << que->me);
}
/* Misc vector last - this is only for AdminQ messages */
vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
vm->vecmap[i].vector_id = 0;
vm->vecmap[i].txq_map = 0;
vm->vecmap[i].rxq_map = 0;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vm, len);
free(vm, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS;
sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS;
}
/*
** Scan the Filter List looking for vlans that need
** to be added, then create the data to hand to the AQ
** for handling.
*/
void
ixlv_add_vlans(struct ixlv_sc *sc)
{
struct i40e_virtchnl_vlan_filter_list *v;
struct ixlv_vlan_filter *f, *ftmp;
device_t dev = sc->dev;
int len, i = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
/* Get count of VLAN filters to add */
SLIST_FOREACH(f, sc->vlan_filters, next) {
if (f->flags & IXL_FILTER_ADD)
cnt++;
}
if (!cnt) { /* no work... */
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
return;
}
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(cnt * sizeof(u16));
if (len > IXL_AQ_BUF_SZ) {
device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
__func__);
return;
}
v = malloc(len, M_DEVBUF, M_NOWAIT);
if (!v) {
device_printf(dev, "%s: unable to allocate memory\n",
__func__);
return;
}
v->vsi_id = sc->vsi_res->vsi_id;
v->num_elements = cnt;
/* Scan the filter array */
SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
if (f->flags & IXL_FILTER_ADD) {
bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
f->flags = IXL_FILTER_USED;
i++;
}
if (i == cnt)
break;
}
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
return;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
}
/*
** Scan the Filter Table looking for vlans that need
** to be removed, then create the data to hand to the AQ
** for handling.
*/
void
ixlv_del_vlans(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct i40e_virtchnl_vlan_filter_list *v;
struct ixlv_vlan_filter *f, *ftmp;
int len, i = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
/* Get count of VLAN filters to delete */
SLIST_FOREACH(f, sc->vlan_filters, next) {
if (f->flags & IXL_FILTER_DEL)
cnt++;
}
if (!cnt) { /* no work... */
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
return;
}
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(cnt * sizeof(u16));
if (len > IXL_AQ_BUF_SZ) {
device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
__func__);
return;
}
v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!v) {
device_printf(dev, "%s: unable to allocate memory\n",
__func__);
return;
}
v->vsi_id = sc->vsi_res->vsi_id;
v->num_elements = cnt;
/* Scan the filter array */
SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
if (f->flags & IXL_FILTER_DEL) {
bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
i++;
SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
free(f, M_DEVBUF);
}
if (i == cnt)
break;
}
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
return;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
}
/*
** This routine takes additions to the vsi filter
** table and creates an Admin Queue call to create
** the filters in the hardware.
*/
void
ixlv_add_ether_filters(struct ixlv_sc *sc)
{
struct i40e_virtchnl_ether_addr_list *a;
struct ixlv_mac_filter *f;
device_t dev = sc->dev;
int len, j = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
/* Get count of MAC addresses to add */
SLIST_FOREACH(f, sc->mac_filters, next) {
if (f->flags & IXL_FILTER_ADD)
cnt++;
}
if (cnt == 0) { /* Should not happen... */
DDPRINTF(dev, "cnt == 0, exiting...");
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
wakeup(&sc->add_ether_done);
return;
}
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(cnt * sizeof(struct i40e_virtchnl_ether_addr));
a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (a == NULL) {
device_printf(dev, "%s: Failed to get memory for "
"virtchnl_ether_addr_list\n", __func__);
return;
}
a->vsi_id = sc->vsi.id;
a->num_elements = cnt;
/* Scan the filter array */
SLIST_FOREACH(f, sc->mac_filters, next) {
if (f->flags & IXL_FILTER_ADD) {
bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
f->flags &= ~IXL_FILTER_ADD;
j++;
DDPRINTF(dev, "ADD: " MAC_FORMAT,
MAC_FORMAT_ARGS(f->macaddr));
}
if (j == cnt)
break;
}
DDPRINTF(dev, "len %d, j %d, cnt %d",
len, j, cnt);
ixlv_send_pf_msg(sc,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
/* add stats? */
free(a, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
return;
}
/*
** This routine takes filters flagged for deletion in the
** sc MAC filter list and creates an Admin Queue call
** to delete those filters in the hardware.
*/
void
ixlv_del_ether_filters(struct ixlv_sc *sc)
{
struct i40e_virtchnl_ether_addr_list *d;
device_t dev = sc->dev;
struct ixlv_mac_filter *f, *f_temp;
int len, j = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
/* Get count of MAC addresses to delete */
SLIST_FOREACH(f, sc->mac_filters, next) {
if (f->flags & IXL_FILTER_DEL)
cnt++;
}
if (cnt == 0) {
DDPRINTF(dev, "cnt == 0, exiting...");
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
wakeup(&sc->del_ether_done);
return;
}
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(cnt * sizeof(struct i40e_virtchnl_ether_addr));
d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (d == NULL) {
device_printf(dev, "%s: Failed to get memory for "
"virtchnl_ether_addr_list\n", __func__);
return;
}
d->vsi_id = sc->vsi.id;
d->num_elements = cnt;
/* Scan the filter array */
SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
if (f->flags & IXL_FILTER_DEL) {
bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
DDPRINTF(dev, "DEL: " MAC_FORMAT,
MAC_FORMAT_ARGS(f->macaddr));
j++;
SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
free(f, M_DEVBUF);
}
if (j == cnt)
break;
}
ixlv_send_pf_msg(sc,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
/* add stats? */
free(d, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
return;
}
/*
** ixlv_request_reset
** Request that the PF reset this VF. No response is expected.
*/
void
ixlv_request_reset(struct ixlv_sc *sc)
{
/*
** Set the reset status to "in progress" before
** the request, this avoids any possibility of
** a mistaken early detection of completion.
*/
wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
/*
** ixlv_request_stats
** Request the statistics for this VF's VSI from PF.
*/
void
ixlv_request_stats(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
int error = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = sc->vsi_res->vsi_id;
error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs));
/* Low priority, ok if it fails */
if (error)
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
/*
** Updates driver's stats counters with VSI stats returned from PF.
*/
void
ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
{
struct ifnet *ifp = sc->vsi.ifp;
ifp->if_ipackets = es->rx_unicast +
es->rx_multicast +
es->rx_broadcast;
ifp->if_opackets = es->tx_unicast +
es->tx_multicast +
es->tx_broadcast;
ifp->if_ibytes = es->rx_bytes;
ifp->if_obytes = es->tx_bytes;
ifp->if_imcasts = es->rx_multicast;
ifp->if_omcasts = es->tx_multicast;
ifp->if_oerrors = es->tx_errors;
ifp->if_iqdrops = es->rx_discards;
ifp->if_noproto = es->rx_unknown_protocol;
sc->vsi.eth_stats = *es;
}
/*
** ixlv_vc_completion
**
** Asynchronous completion function for admin queue messages. Rather than busy
** wait, we fire off our requests and assume that no errors will be returned.
** This function handles the reply messages.
*/
void
ixlv_vc_completion(struct ixlv_sc *sc,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
struct i40e_virtchnl_pf_event *vpe =
(struct i40e_virtchnl_pf_event *)msg;
switch (vpe->event) {
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
vsi->link_up =
vpe->event_data.link_event.link_status;
vsi->link_speed =
vpe->event_data.link_event.link_speed;
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
sc->init_state = IXLV_RESET_PENDING;
ixlv_init(sc);
break;
default:
device_printf(dev, "%s: Unknown event %d from AQ\n",
__func__, vpe->event);
break;
}
return;
}
if (v_opcode != sc->current_op
&& sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) {
device_printf(dev, "%s: Pending op is %d, received %d.\n",
__func__, sc->current_op, v_opcode);
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
return;
}
/* Catch-all error response */
if (v_retval) {
device_printf(dev,
"%s: AQ returned error %d to our request %d!\n",
__func__, v_retval, v_opcode);
}
#ifdef IXL_DEBUG
if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
DDPRINTF(dev, "opcode %d", v_opcode);
#endif
switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS:
ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER);
if (v_retval) {
device_printf(dev, "WARNING: Error adding VF mac filter!\n");
device_printf(dev, "WARNING: Device may not receive traffic!\n");
}
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES);
if (v_retval == 0) {
/* Turn on all interrupts */
ixlv_enable_intr(vsi);
/* And inform the stack we're ready */
vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES);
if (v_retval == 0) {
/* Turn off all interrupts */
ixlv_disable_intr(vsi);
/* Tell the stack that the interface is no longer active */
vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
}
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS);
break;
default:
device_printf(dev,
"%s: Received unexpected message %d from PF.\n",
__func__, v_opcode);
break;
}
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
return;
}

View File

@ -1,10 +1,11 @@
#$FreeBSD$
.PATH: ${.CURDIR}/../../dev/i40e
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_i40e
KMOD = if_ixl
SRCS = device_if.h bus_if.h pci_if.h opt_bdg.h
SRCS += if_i40e.c i40e_txrx.c i40e_osdep.c
SRCS += opt_inet.h opt_inet6.h
SRCS += if_ixl.c ixl_txrx.c i40e_osdep.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
@ -12,8 +13,8 @@ SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
CFLAGS += -DSMP
# Add Flow Director support
# CFLAGS += -DI40E_FDIR
# CFLAGS += -DIXL_FDIR
# Debug messages / sysctls
# CFLAGS += -DI40E_DEBUG
# CFLAGS += -DIXLE_DEBUG
.include <bsd.kmod.mk>

20
sys/modules/ixlv/Makefile Executable file
View File

@ -0,0 +1,20 @@
#$FreeBSD$
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_ixlv
SRCS = device_if.h bus_if.h pci_if.h opt_bdg.h
SRCS += opt_inet.h opt_inet6.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source
SRCS += i40e_common.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
CFLAGS += -DSMP
# Add Flow Director support
# CFLAGS += -DIXL_FDIR
# Debug messages / sysctls
# CFLAGS += -DIXLE_DEBUG
.include <bsd.kmod.mk>