net/avf/base: add base code for avf PMD

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
This commit is contained in:
Jingjing Wu 2018-01-10 21:01:53 +08:00 committed by Ferruh Yigit
parent 597d2ce5b4
commit e5b2a9e957
17 changed files with 10121 additions and 0 deletions

View File

@ -430,6 +430,11 @@ T: git://dpdk.org/next/dpdk-next-net-intel
F: drivers/net/fm10k/
F: doc/guides/nics/features/fm10k*.ini
Intel avf
M: Jingjing Wu <jingjing.wu@intel.com>
M: Wenzhuo Lu <wenzhuo.lu@intel.com>
F: drivers/net/avf/
Mellanox mlx4
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
T: git://dpdk.org/next/dpdk-next-net-mlx

23
drivers/net/avf/avf_log.h Normal file
View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
#ifndef _AVF_LOG_H_
#define _AVF_LOG_H_
extern int avf_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, avf_logtype_init, "%s(): " fmt "\n", \
__func__, ## args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
extern int avf_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, avf_logtype_driver, "%s(): " fmt, \
__func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, " >>")
#endif /* _AVF_LOG_H_ */

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
Intel® AVF driver
=================
This directory contains source code of FreeBSD AVF driver of version
cid-avf.2018.01.02.tar.gz released by the team which develops
basic drivers for any AVF NIC. The directory of base/ contains the
original source package.
Updating the driver
===================
NOTE: The source code in this directory should not be modified apart from
the following file(s):
avf_osdep.h

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,166 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_ADMINQ_H_
#define _AVF_ADMINQ_H_
#include "avf_osdep.h"
#include "avf_status.h"
#include "avf_adminq_cmd.h"
#define AVF_ADMINQ_DESC(R, i) \
(&(((struct avf_aq_desc *)((R).desc_buf.va))[i]))
#define AVF_ADMINQ_DESC_ALIGNMENT 4096
struct avf_adminq_ring {
struct avf_virt_mem dma_head; /* space for dma structures */
struct avf_dma_mem desc_buf; /* descriptor ring memory */
struct avf_virt_mem cmd_buf; /* command buffer memory */
union {
struct avf_dma_mem *asq_bi;
struct avf_dma_mem *arq_bi;
} r;
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
u32 bah;
u32 bal;
};
/* ASQ transaction details */
struct avf_asq_cmd_details {
void *callback; /* cast from type AVF_ADMINQ_CALLBACK */
u64 cookie;
u16 flags_ena;
u16 flags_dis;
bool async;
bool postpone;
struct avf_aq_desc *wb_desc;
};
#define AVF_ADMINQ_DETAILS(R, i) \
(&(((struct avf_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct avf_arq_event_info {
struct avf_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
};
/* Admin Queue information */
struct avf_adminq_info {
struct avf_adminq_ring arq; /* receive queue */
struct avf_adminq_ring asq; /* send queue */
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
u16 num_arq_entries; /* receive queue depth */
u16 num_asq_entries; /* send queue depth */
u16 arq_buf_size; /* receive queue buffer size */
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
struct avf_spinlock asq_spinlock; /* Send queue spinlock */
struct avf_spinlock arq_spinlock; /* Receive queue spinlock */
/* last status values on send and receive queues */
enum avf_admin_queue_err asq_last_status;
enum avf_admin_queue_err arq_last_status;
};
/**
* avf_aq_rc_to_posix - convert errors to user-land codes
* aq_ret: AdminQ handler error code can override aq_rc
* aq_rc: AdminQ firmware error code to convert
**/
STATIC INLINE int avf_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
0, /* AVF_AQ_RC_OK */
-EPERM, /* AVF_AQ_RC_EPERM */
-ENOENT, /* AVF_AQ_RC_ENOENT */
-ESRCH, /* AVF_AQ_RC_ESRCH */
-EINTR, /* AVF_AQ_RC_EINTR */
-EIO, /* AVF_AQ_RC_EIO */
-ENXIO, /* AVF_AQ_RC_ENXIO */
-E2BIG, /* AVF_AQ_RC_E2BIG */
-EAGAIN, /* AVF_AQ_RC_EAGAIN */
-ENOMEM, /* AVF_AQ_RC_ENOMEM */
-EACCES, /* AVF_AQ_RC_EACCES */
-EFAULT, /* AVF_AQ_RC_EFAULT */
-EBUSY, /* AVF_AQ_RC_EBUSY */
-EEXIST, /* AVF_AQ_RC_EEXIST */
-EINVAL, /* AVF_AQ_RC_EINVAL */
-ENOTTY, /* AVF_AQ_RC_ENOTTY */
-ENOSPC, /* AVF_AQ_RC_ENOSPC */
-ENOSYS, /* AVF_AQ_RC_ENOSYS */
-ERANGE, /* AVF_AQ_RC_ERANGE */
-EPIPE, /* AVF_AQ_RC_EFLUSHED */
-ESPIPE, /* AVF_AQ_RC_BAD_ADDR */
-EROFS, /* AVF_AQ_RC_EMODE */
-EFBIG, /* AVF_AQ_RC_EFBIG */
};
/* aq_rc is invalid if AQ timed out */
if (aq_ret == AVF_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
return aq_to_posix[aq_rc];
}
/* general information */
#define AVF_AQ_LARGE_BUF 512
#define AVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
void avf_fill_default_direct_cmd_desc(struct avf_aq_desc *desc,
u16 opcode);
#endif /* _AVF_ADMINQ_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,65 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_ALLOC_H_
#define _AVF_ALLOC_H_
struct avf_hw;
/* Memory allocation types */
enum avf_memory_type {
avf_mem_arq_buf = 0, /* ARQ indirect command buffer */
avf_mem_asq_buf = 1,
avf_mem_atq_buf = 2, /* ATQ indirect command buffer */
avf_mem_arq_ring = 3, /* ARQ descriptor ring */
avf_mem_atq_ring = 4, /* ATQ descriptor ring */
avf_mem_pd = 5, /* Page Descriptor */
avf_mem_bp = 6, /* Backing Page - 4KB */
avf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
avf_mem_reserved
};
/* prototype for functions used for dynamic memory allocation */
enum avf_status_code avf_allocate_dma_mem(struct avf_hw *hw,
struct avf_dma_mem *mem,
enum avf_memory_type type,
u64 size, u32 alignment);
enum avf_status_code avf_free_dma_mem(struct avf_hw *hw,
struct avf_dma_mem *mem);
enum avf_status_code avf_allocate_virt_mem(struct avf_hw *hw,
struct avf_virt_mem *mem,
u32 size);
enum avf_status_code avf_free_virt_mem(struct avf_hw *hw,
struct avf_virt_mem *mem);
#endif /* _AVF_ALLOC_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
/*******************************************************************************
Copyright (c) 2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_DEVIDS_H_
#define _AVF_DEVIDS_H_
/* Vendor ID */
#define AVF_INTEL_VENDOR_ID 0x8086
/* Device IDs */
#define AVF_DEV_ID_ADAPTIVE_VF 0x1889
#endif /* _AVF_DEVIDS_H_ */

View File

@ -0,0 +1,245 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_HMC_H_
#define _AVF_HMC_H_
#define AVF_HMC_MAX_BP_COUNT 512
/* forward-declare the HW struct for the compiler */
struct avf_hw;
#define AVF_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
#define AVF_HMC_PD_CNT_IN_SD 512
#define AVF_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
#define AVF_HMC_PAGED_BP_SIZE 4096
#define AVF_HMC_PD_BP_BUF_ALIGNMENT 4096
#define AVF_FIRST_VF_FPM_ID 16
struct avf_hmc_obj_info {
u64 base; /* base addr in FPM */
u32 max_cnt; /* max count available for this hmc func */
u32 cnt; /* count of objects driver actually wants to create */
u64 size; /* size in bytes of one object */
};
enum avf_sd_entry_type {
AVF_SD_TYPE_INVALID = 0,
AVF_SD_TYPE_PAGED = 1,
AVF_SD_TYPE_DIRECT = 2
};
struct avf_hmc_bp {
enum avf_sd_entry_type entry_type;
struct avf_dma_mem addr; /* populate to be used by hw */
u32 sd_pd_index;
u32 ref_cnt;
};
struct avf_hmc_pd_entry {
struct avf_hmc_bp bp;
u32 sd_index;
bool rsrc_pg;
bool valid;
};
struct avf_hmc_pd_table {
struct avf_dma_mem pd_page_addr; /* populate to be used by hw */
struct avf_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
struct avf_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
u32 ref_cnt;
u32 sd_index;
};
struct avf_hmc_sd_entry {
enum avf_sd_entry_type entry_type;
bool valid;
union {
struct avf_hmc_pd_table pd_table;
struct avf_hmc_bp bp;
} u;
};
struct avf_hmc_sd_table {
struct avf_virt_mem addr; /* used to track sd_entry allocations */
u32 sd_cnt;
u32 ref_cnt;
struct avf_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
};
struct avf_hmc_info {
u32 signature;
/* equals to pci func num for PF and dynamically allocated for VFs */
u8 hmc_fn_id;
u16 first_sd_index; /* index of the first available SD */
/* hmc objects */
struct avf_hmc_obj_info *hmc_obj;
struct avf_virt_mem hmc_obj_virt_mem;
struct avf_hmc_sd_table sd_table;
};
#define AVF_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
#define AVF_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
#define AVF_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
#define AVF_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
#define AVF_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
#define AVF_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/**
* AVF_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define AVF_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
{ \
u32 val1, val2, val3; \
val1 = (u32)(AVF_HI_DWORD(pa)); \
val2 = (u32)(pa) | (AVF_HMC_MAX_BP_COUNT << \
AVF_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == AVF_SD_TYPE_PAGED) ? 0 : 1) << \
AVF_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
BIT(AVF_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
val3 = (sd_index) | BIT_ULL(AVF_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), AVF_PFHMC_SDDATAHIGH, val1); \
wr32((hw), AVF_PFHMC_SDDATALOW, val2); \
wr32((hw), AVF_PFHMC_SDCMD, val3); \
}
/**
* AVF_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
* @type: if sd entry is direct or paged
**/
#define AVF_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
{ \
u32 val2, val3; \
val2 = (AVF_HMC_MAX_BP_COUNT << \
AVF_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == AVF_SD_TYPE_PAGED) ? 0 : 1) << \
AVF_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
val3 = (sd_index) | BIT_ULL(AVF_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), AVF_PFHMC_SDDATAHIGH, 0); \
wr32((hw), AVF_PFHMC_SDDATALOW, val2); \
wr32((hw), AVF_PFHMC_SDCMD, val3); \
}
/**
* AVF_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
**/
#define AVF_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), AVF_PFHMC_PDINV, \
(((sd_idx) << AVF_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << AVF_PFHMC_PDINV_PMPDIDX_SHIFT)))
/**
* AVF_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @index: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by avf_hmc_rsrc_type.
**/
#define AVF_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
{ \
u64 fpm_addr, fpm_limit; \
fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (index); \
fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
*(sd_idx) = (u32)(fpm_addr / AVF_HMC_DIRECT_BP_SIZE); \
*(sd_limit) = (u32)((fpm_limit - 1) / AVF_HMC_DIRECT_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(sd_limit) += 1; \
}
/**
* AVF_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_index: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by avf_hmc_rsrc_type.
**/
#define AVF_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
{ \
u64 fpm_adr, fpm_limit; \
fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
(hmc_info)->hmc_obj[(type)].size * (idx); \
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
*(pd_index) = (u32)(fpm_adr / AVF_HMC_PAGED_BP_SIZE); \
*(pd_limit) = (u32)((fpm_limit - 1) / AVF_HMC_PAGED_BP_SIZE); \
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
enum avf_status_code avf_add_sd_table_entry(struct avf_hw *hw,
struct avf_hmc_info *hmc_info,
u32 sd_index,
enum avf_sd_entry_type type,
u64 direct_mode_sz);
enum avf_status_code avf_add_pd_table_entry(struct avf_hw *hw,
struct avf_hmc_info *hmc_info,
u32 pd_index,
struct avf_dma_mem *rsrc_pg);
enum avf_status_code avf_remove_pd_bp(struct avf_hw *hw,
struct avf_hmc_info *hmc_info,
u32 idx);
enum avf_status_code avf_prep_remove_sd_bp(struct avf_hmc_info *hmc_info,
u32 idx);
enum avf_status_code avf_remove_sd_bp_new(struct avf_hw *hw,
struct avf_hmc_info *hmc_info,
u32 idx, bool is_pf);
enum avf_status_code avf_prep_remove_pd_page(struct avf_hmc_info *hmc_info,
u32 idx);
enum avf_status_code avf_remove_pd_page_new(struct avf_hw *hw,
struct avf_hmc_info *hmc_info,
u32 idx, bool is_pf);
#endif /* _AVF_HMC_H_ */

View File

@ -0,0 +1,200 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_LAN_HMC_H_
#define _AVF_LAN_HMC_H_
/* forward-declare the HW struct for the compiler */
struct avf_hw;
/* HMC element context information */
/* Rx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct avf_hmc_obj_rxq {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
u64 base;
u16 qlen;
#define AVF_RXQ_CTX_DBUFF_SHIFT 7
u16 dbuff; /* bigger than needed, see above for reason */
#define AVF_RXQ_CTX_HBUFF_SHIFT 6
u16 hbuff; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 fc_ena;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data
*
* The sizes of the variables may be larger than needed due to crossing byte
* boundaries. If we do not have the width of the variable set to the correct
* size then we could end up shifting bits off the top of the variable when the
* variable is at the top of a byte and crosses over into the next byte.
*/
struct avf_hmc_obj_txq {
u16 head;
u8 new_context;
u64 base;
u8 fc_ena;
u8 timesync_ena;
u8 fd_ena;
u8 alt_vlan_ena;
u16 thead_wb;
u8 cpuid;
u8 head_wb_ena;
u16 qlen;
u8 tphrdesc_ena;
u8 tphrpacket_ena;
u8 tphwdesc_ena;
u64 head_wb_addr;
u32 crc;
u16 rdylist;
u8 rdylist_act;
};
/* for hsplit_0 field of Rx HMC context */
enum avf_hmc_obj_rx_hsplit_0 {
AVF_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
};
/* fcoe_cntx and fcoe_filt are for debugging purpose only */
struct avf_hmc_obj_fcoe_cntx {
u32 rsv[32];
};
struct avf_hmc_obj_fcoe_filt {
u32 rsv[8];
};
/* Context sizes for LAN objects */
enum avf_hmc_lan_object_size {
AVF_HMC_LAN_OBJ_SZ_8 = 0x3,
AVF_HMC_LAN_OBJ_SZ_16 = 0x4,
AVF_HMC_LAN_OBJ_SZ_32 = 0x5,
AVF_HMC_LAN_OBJ_SZ_64 = 0x6,
AVF_HMC_LAN_OBJ_SZ_128 = 0x7,
AVF_HMC_LAN_OBJ_SZ_256 = 0x8,
AVF_HMC_LAN_OBJ_SZ_512 = 0x9,
};
#define AVF_HMC_L2OBJ_BASE_ALIGNMENT 512
#define AVF_HMC_OBJ_SIZE_TXQ 128
#define AVF_HMC_OBJ_SIZE_RXQ 32
#define AVF_HMC_OBJ_SIZE_FCOE_CNTX 64
#define AVF_HMC_OBJ_SIZE_FCOE_FILT 64
enum avf_hmc_lan_rsrc_type {
AVF_HMC_LAN_FULL = 0,
AVF_HMC_LAN_TX = 1,
AVF_HMC_LAN_RX = 2,
AVF_HMC_FCOE_CTX = 3,
AVF_HMC_FCOE_FILT = 4,
AVF_HMC_LAN_MAX = 5
};
enum avf_hmc_model {
AVF_HMC_MODEL_DIRECT_PREFERRED = 0,
AVF_HMC_MODEL_DIRECT_ONLY = 1,
AVF_HMC_MODEL_PAGED_ONLY = 2,
AVF_HMC_MODEL_UNKNOWN,
};
struct avf_hmc_lan_create_obj_info {
struct avf_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
enum avf_sd_entry_type entry_type;
u64 direct_mode_sz;
};
struct avf_hmc_lan_delete_obj_info {
struct avf_hmc_info *hmc_info;
u32 rsrc_type;
u32 start_idx;
u32 count;
};
enum avf_status_code avf_init_lan_hmc(struct avf_hw *hw, u32 txq_num,
u32 rxq_num, u32 fcoe_cntx_num,
u32 fcoe_filt_num);
enum avf_status_code avf_configure_lan_hmc(struct avf_hw *hw,
enum avf_hmc_model model);
enum avf_status_code avf_shutdown_lan_hmc(struct avf_hw *hw);
u64 avf_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
u32 fcoe_cntx_num, u32 fcoe_filt_num);
enum avf_status_code avf_get_lan_tx_queue_context(struct avf_hw *hw,
u16 queue,
struct avf_hmc_obj_txq *s);
enum avf_status_code avf_clear_lan_tx_queue_context(struct avf_hw *hw,
u16 queue);
enum avf_status_code avf_set_lan_tx_queue_context(struct avf_hw *hw,
u16 queue,
struct avf_hmc_obj_txq *s);
enum avf_status_code avf_get_lan_rx_queue_context(struct avf_hw *hw,
u16 queue,
struct avf_hmc_obj_rxq *s);
enum avf_status_code avf_clear_lan_rx_queue_context(struct avf_hw *hw,
u16 queue);
enum avf_status_code avf_set_lan_rx_queue_context(struct avf_hw *hw,
u16 queue,
struct avf_hmc_obj_rxq *s);
enum avf_status_code avf_create_lan_hmc_object(struct avf_hw *hw,
struct avf_hmc_lan_create_obj_info *info);
enum avf_status_code avf_delete_lan_hmc_object(struct avf_hw *hw,
struct avf_hmc_lan_delete_obj_info *info);
#endif /* _AVF_LAN_HMC_H_ */

View File

@ -0,0 +1,187 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
#ifndef _AVF_OSDEP_H_
#define _AVF_OSDEP_H_
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdarg.h>
#include <rte_common.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_byteorder.h>
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
#include <rte_io.h>
#include "../avf_log.h"
#define INLINE inline
#define STATIC static
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
#define __iomem
#define hw_dbg(hw, S, A...) do {} while (0)
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
#define lower_32_bits(n) ((u32)(n))
#ifndef ETH_ADDR_LEN
#define ETH_ADDR_LEN 6
#endif
#ifndef __le16
#define __le16 uint16_t
#endif
#ifndef __le32
#define __le32 uint32_t
#endif
#ifndef __le64
#define __le64 uint64_t
#endif
#ifndef __be16
#define __be16 uint16_t
#endif
#ifndef __be32
#define __be32 uint32_t
#endif
#ifndef __be64
#define __be64 uint64_t
#endif
#define FALSE 0
#define TRUE 1
#define false 0
#define true 1
#define min(a,b) RTE_MIN(a,b)
#define max(a,b) RTE_MAX(a,b)
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define ASSERT(x) if(!(x)) rte_panic("AVF: x")
#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S)
#define DEBUGOUT2(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
#define DEBUGFUNC(F) DEBUGOUT(F "\n")
#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
#define cpu_to_le16(o) rte_cpu_to_le_16(o)
#define cpu_to_le32(s) rte_cpu_to_le_32(s)
#define cpu_to_le64(h) rte_cpu_to_le_64(h)
#define le16_to_cpu(a) rte_le_to_cpu_16(a)
#define le32_to_cpu(c) rte_le_to_cpu_32(c)
#define le64_to_cpu(k) rte_le_to_cpu_64(k)
#define avf_memset(a, b, c, d) memset((a), (b), (c))
#define avf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
#define avf_usec_delay(x) rte_delay_us(x)
#define avf_msec_delay(x) rte_delay_us(1000*(x))
#define AVF_PCI_REG(reg) rte_read32(reg)
#define AVF_PCI_REG_ADDR(a, reg) \
((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
#define AVF_PCI_REG_WRITE(reg, value) \
rte_write32((rte_cpu_to_le_32(value)), reg)
#define AVF_PCI_REG_WRITE_RELAXED(reg, value) \
rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
static inline
uint32_t avf_read_addr(volatile void *addr)
{
return rte_le_to_cpu_32(AVF_PCI_REG(addr));
}
#define AVF_READ_REG(hw, reg) \
avf_read_addr(AVF_PCI_REG_ADDR((hw), (reg)))
#define AVF_WRITE_REG(hw, reg, value) \
AVF_PCI_REG_WRITE(AVF_PCI_REG_ADDR((hw), (reg)), (value))
#define AVF_WRITE_FLUSH(a) \
AVF_READ_REG(a, AVFGEN_RSTAT)
#define rd32(a, reg) avf_read_addr(AVF_PCI_REG_ADDR((a), (reg)))
#define wr32(a, reg, value) \
AVF_PCI_REG_WRITE(AVF_PCI_REG_ADDR((a), (reg)), (value))
#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
#define avf_debug(h, m, s, ...) \
do { \
if (((m) & (h)->debug_mask)) \
PMD_DRV_LOG_RAW(DEBUG, "avf %02x.%x " s, \
(h)->bus.device, (h)->bus.func, \
##__VA_ARGS__); \
} while (0)
/* memory allocation tracking */
struct avf_dma_mem {
void *va;
u64 pa;
u32 size;
const void *zone;
} __attribute__((packed));
struct avf_virt_mem {
void *va;
u32 size;
} __attribute__((packed));
/* SW spinlock */
struct avf_spinlock {
rte_spinlock_t spinlock;
};
#define avf_allocate_dma_mem(h, m, unused, s, a) \
avf_allocate_dma_mem_d(h, m, s, a)
#define avf_free_dma_mem(h, m) avf_free_dma_mem_d(h, m)
#define avf_allocate_virt_mem(h, m, s) avf_allocate_virt_mem_d(h, m, s)
#define avf_free_virt_mem(h, m) avf_free_virt_mem_d(h, m)
static inline void
avf_init_spinlock_d(struct avf_spinlock *sp)
{
rte_spinlock_init(&sp->spinlock);
}
static inline void
avf_acquire_spinlock_d(struct avf_spinlock *sp)
{
rte_spinlock_lock(&sp->spinlock);
}
static inline void
avf_release_spinlock_d(struct avf_spinlock *sp)
{
rte_spinlock_unlock(&sp->spinlock);
}
static inline void
avf_destroy_spinlock_d(__rte_unused struct avf_spinlock *sp)
{
}
#define avf_init_spinlock(_sp) avf_init_spinlock_d(_sp)
#define avf_acquire_spinlock(_sp) avf_acquire_spinlock_d(_sp)
#define avf_release_spinlock(_sp) avf_release_spinlock_d(_sp)
#define avf_destroy_spinlock(_sp) avf_destroy_spinlock_d(_sp)
#endif /* _AVF_OSDEP_H_ */

View File

@ -0,0 +1,206 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_PROTOTYPE_H_
#define _AVF_PROTOTYPE_H_
#include "avf_type.h"
#include "avf_alloc.h"
#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
* mostly because they are needed even before the init
* has happened and will assist in the early SW and FW
* setup.
*/
/* adminq functions */
enum avf_status_code avf_init_adminq(struct avf_hw *hw);
enum avf_status_code avf_shutdown_adminq(struct avf_hw *hw);
enum avf_status_code avf_init_asq(struct avf_hw *hw);
enum avf_status_code avf_init_arq(struct avf_hw *hw);
enum avf_status_code avf_alloc_adminq_asq_ring(struct avf_hw *hw);
enum avf_status_code avf_alloc_adminq_arq_ring(struct avf_hw *hw);
enum avf_status_code avf_shutdown_asq(struct avf_hw *hw);
enum avf_status_code avf_shutdown_arq(struct avf_hw *hw);
u16 avf_clean_asq(struct avf_hw *hw);
void avf_free_adminq_asq(struct avf_hw *hw);
void avf_free_adminq_arq(struct avf_hw *hw);
enum avf_status_code avf_validate_mac_addr(u8 *mac_addr);
void avf_adminq_init_ring_data(struct avf_hw *hw);
enum avf_status_code avf_clean_arq_element(struct avf_hw *hw,
struct avf_arq_event_info *e,
u16 *events_pending);
enum avf_status_code avf_asq_send_command(struct avf_hw *hw,
struct avf_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct avf_asq_cmd_details *cmd_details);
bool avf_asq_done(struct avf_hw *hw);
/* debug function for adminq */
void avf_debug_aq(struct avf_hw *hw, enum avf_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void avf_idle_aq(struct avf_hw *hw);
bool avf_check_asq_alive(struct avf_hw *hw);
enum avf_status_code avf_aq_queue_shutdown(struct avf_hw *hw, bool unloading);
enum avf_status_code avf_aq_get_rss_lut(struct avf_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum avf_status_code avf_aq_set_rss_lut(struct avf_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum avf_status_code avf_aq_get_rss_key(struct avf_hw *hw,
u16 seid,
struct avf_aqc_get_set_rss_key_data *key);
enum avf_status_code avf_aq_set_rss_key(struct avf_hw *hw,
u16 seid,
struct avf_aqc_get_set_rss_key_data *key);
const char *avf_aq_str(struct avf_hw *hw, enum avf_admin_queue_err aq_err);
const char *avf_stat_str(struct avf_hw *hw, enum avf_status_code stat_err);
enum avf_status_code avf_set_mac_type(struct avf_hw *hw);
extern struct avf_rx_ptype_decoded avf_ptype_lookup[];
STATIC INLINE struct avf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return avf_ptype_lookup[ptype];
}
/* prototype for functions used for SW spinlocks */
void avf_init_spinlock(struct avf_spinlock *sp);
void avf_acquire_spinlock(struct avf_spinlock *sp);
void avf_release_spinlock(struct avf_spinlock *sp);
void avf_destroy_spinlock(struct avf_spinlock *sp);
/* avf_common for VF drivers*/
void avf_parse_hw_config(struct avf_hw *hw,
struct virtchnl_vf_resource *msg);
enum avf_status_code avf_reset(struct avf_hw *hw);
enum avf_status_code avf_aq_send_msg_to_pf(struct avf_hw *hw,
enum virtchnl_ops v_opcode,
enum avf_status_code v_retval,
u8 *msg, u16 msglen,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_set_filter_control(struct avf_hw *hw,
struct avf_filter_control_settings *settings);
enum avf_status_code avf_aq_add_rem_control_packet_filter(struct avf_hw *hw,
u8 *mac_addr, u16 ethtype, u16 flags,
u16 vsi_seid, u16 queue, bool is_add,
struct avf_control_filter_stats *stats,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_aq_debug_dump(struct avf_hw *hw, u8 cluster_id,
u8 table_id, u32 start_index, u16 buff_size,
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct avf_asq_cmd_details *cmd_details);
void avf_add_filter_to_drop_tx_flow_control_frames(struct avf_hw *hw,
u16 vsi_seid);
enum avf_status_code avf_aq_rx_ctl_read_register(struct avf_hw *hw,
u32 reg_addr, u32 *reg_val,
struct avf_asq_cmd_details *cmd_details);
u32 avf_read_rx_ctl(struct avf_hw *hw, u32 reg_addr);
enum avf_status_code avf_aq_rx_ctl_write_register(struct avf_hw *hw,
u32 reg_addr, u32 reg_val,
struct avf_asq_cmd_details *cmd_details);
void avf_write_rx_ctl(struct avf_hw *hw, u32 reg_addr, u32 reg_val);
enum avf_status_code avf_aq_set_phy_register(struct avf_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 reg_val,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_aq_get_phy_register(struct avf_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 *reg_val,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_aq_set_arp_proxy_config(struct avf_hw *hw,
struct avf_aqc_arp_proxy_data *proxy_config,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_aq_set_ns_proxy_table_entry(struct avf_hw *hw,
struct avf_aqc_ns_proxy_data *ns_proxy_table_entry,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_aq_set_clear_wol_filter(struct avf_hw *hw,
u8 filter_index,
struct avf_aqc_set_wol_filter_data *filter,
bool set_filter, bool no_wol_tco,
bool filter_valid, bool no_wol_tco_valid,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_aq_get_wake_event_reason(struct avf_hw *hw,
u16 *wake_reason,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_aq_clear_all_wol_filters(struct avf_hw *hw,
struct avf_asq_cmd_details *cmd_details);
enum avf_status_code avf_read_phy_register_clause22(struct avf_hw *hw,
u16 reg, u8 phy_addr, u16 *value);
enum avf_status_code avf_write_phy_register_clause22(struct avf_hw *hw,
u16 reg, u8 phy_addr, u16 value);
enum avf_status_code avf_read_phy_register_clause45(struct avf_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
enum avf_status_code avf_write_phy_register_clause45(struct avf_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
enum avf_status_code avf_read_phy_register(struct avf_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
enum avf_status_code avf_write_phy_register(struct avf_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
u8 avf_get_phy_address(struct avf_hw *hw, u8 dev_num);
enum avf_status_code avf_blink_phy_link_led(struct avf_hw *hw,
u32 time, u32 interval);
enum avf_status_code avf_aq_write_ddp(struct avf_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct avf_asq_cmd_details *
cmd_details);
enum avf_status_code avf_aq_get_ddp_list(struct avf_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct avf_asq_cmd_details *
cmd_details);
struct avf_generic_seg_header *
avf_find_segment_in_package(u32 segment_type,
struct avf_package_header *pkg_header);
struct avf_profile_section_header *
avf_find_section_in_profile(u32 section_type,
struct avf_profile_segment *profile);
enum avf_status_code
avf_write_profile(struct avf_hw *hw, struct avf_profile_segment *avf_seg,
u32 track_id);
enum avf_status_code
avf_rollback_profile(struct avf_hw *hw, struct avf_profile_segment *avf_seg,
u32 track_id);
enum avf_status_code
avf_add_pinfo_to_list(struct avf_hw *hw,
struct avf_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
#endif /* _AVF_PROTOTYPE_H_ */

View File

@ -0,0 +1,346 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_REGISTER_H_
#define _AVF_REGISTER_H_
#define AVFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
#define AVFMSIX_PBA1_MAX_INDEX 19
#define AVFMSIX_PBA1_PENBIT_SHIFT 0
#define AVFMSIX_PBA1_PENBIT_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_PBA1_PENBIT_SHIFT)
#define AVFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define AVFMSIX_TADD1_MAX_INDEX 639
#define AVFMSIX_TADD1_MSIXTADD10_SHIFT 0
#define AVFMSIX_TADD1_MSIXTADD10_MASK AVF_MASK(0x3, AVFMSIX_TADD1_MSIXTADD10_SHIFT)
#define AVFMSIX_TADD1_MSIXTADD_SHIFT 2
#define AVFMSIX_TADD1_MSIXTADD_MASK AVF_MASK(0x3FFFFFFF, AVFMSIX_TADD1_MSIXTADD_SHIFT)
#define AVFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define AVFMSIX_TMSG1_MAX_INDEX 639
#define AVFMSIX_TMSG1_MSIXTMSG_SHIFT 0
#define AVFMSIX_TMSG1_MSIXTMSG_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TMSG1_MSIXTMSG_SHIFT)
#define AVFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define AVFMSIX_TUADD1_MAX_INDEX 639
#define AVFMSIX_TUADD1_MSIXTUADD_SHIFT 0
#define AVFMSIX_TUADD1_MSIXTUADD_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TUADD1_MSIXTUADD_SHIFT)
#define AVFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
#define AVFMSIX_TVCTRL1_MAX_INDEX 639
#define AVFMSIX_TVCTRL1_MASK_SHIFT 0
#define AVFMSIX_TVCTRL1_MASK_MASK AVF_MASK(0x1, AVFMSIX_TVCTRL1_MASK_SHIFT)
#define AVF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define AVF_ARQBAH1_ARQBAH_SHIFT 0
#define AVF_ARQBAH1_ARQBAH_MASK AVF_MASK(0xFFFFFFFF, AVF_ARQBAH1_ARQBAH_SHIFT)
#define AVF_ARQBAL1 0x00006C00 /* Reset: EMPR */
#define AVF_ARQBAL1_ARQBAL_SHIFT 0
#define AVF_ARQBAL1_ARQBAL_MASK AVF_MASK(0xFFFFFFFF, AVF_ARQBAL1_ARQBAL_SHIFT)
#define AVF_ARQH1 0x00007400 /* Reset: EMPR */
#define AVF_ARQH1_ARQH_SHIFT 0
#define AVF_ARQH1_ARQH_MASK AVF_MASK(0x3FF, AVF_ARQH1_ARQH_SHIFT)
#define AVF_ARQLEN1 0x00008000 /* Reset: EMPR */
#define AVF_ARQLEN1_ARQLEN_SHIFT 0
#define AVF_ARQLEN1_ARQLEN_MASK AVF_MASK(0x3FF, AVF_ARQLEN1_ARQLEN_SHIFT)
#define AVF_ARQLEN1_ARQVFE_SHIFT 28
#define AVF_ARQLEN1_ARQVFE_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQVFE_SHIFT)
#define AVF_ARQLEN1_ARQOVFL_SHIFT 29
#define AVF_ARQLEN1_ARQOVFL_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQOVFL_SHIFT)
#define AVF_ARQLEN1_ARQCRIT_SHIFT 30
#define AVF_ARQLEN1_ARQCRIT_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQCRIT_SHIFT)
#define AVF_ARQLEN1_ARQENABLE_SHIFT 31
#define AVF_ARQLEN1_ARQENABLE_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQENABLE_SHIFT)
#define AVF_ARQT1 0x00007000 /* Reset: EMPR */
#define AVF_ARQT1_ARQT_SHIFT 0
#define AVF_ARQT1_ARQT_MASK AVF_MASK(0x3FF, AVF_ARQT1_ARQT_SHIFT)
#define AVF_ATQBAH1 0x00007800 /* Reset: EMPR */
#define AVF_ATQBAH1_ATQBAH_SHIFT 0
#define AVF_ATQBAH1_ATQBAH_MASK AVF_MASK(0xFFFFFFFF, AVF_ATQBAH1_ATQBAH_SHIFT)
#define AVF_ATQBAL1 0x00007C00 /* Reset: EMPR */
#define AVF_ATQBAL1_ATQBAL_SHIFT 0
#define AVF_ATQBAL1_ATQBAL_MASK AVF_MASK(0xFFFFFFFF, AVF_ATQBAL1_ATQBAL_SHIFT)
#define AVF_ATQH1 0x00006400 /* Reset: EMPR */
#define AVF_ATQH1_ATQH_SHIFT 0
#define AVF_ATQH1_ATQH_MASK AVF_MASK(0x3FF, AVF_ATQH1_ATQH_SHIFT)
#define AVF_ATQLEN1 0x00006800 /* Reset: EMPR */
#define AVF_ATQLEN1_ATQLEN_SHIFT 0
#define AVF_ATQLEN1_ATQLEN_MASK AVF_MASK(0x3FF, AVF_ATQLEN1_ATQLEN_SHIFT)
#define AVF_ATQLEN1_ATQVFE_SHIFT 28
#define AVF_ATQLEN1_ATQVFE_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQVFE_SHIFT)
#define AVF_ATQLEN1_ATQOVFL_SHIFT 29
#define AVF_ATQLEN1_ATQOVFL_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQOVFL_SHIFT)
#define AVF_ATQLEN1_ATQCRIT_SHIFT 30
#define AVF_ATQLEN1_ATQCRIT_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQCRIT_SHIFT)
#define AVF_ATQLEN1_ATQENABLE_SHIFT 31
#define AVF_ATQLEN1_ATQENABLE_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQENABLE_SHIFT)
#define AVF_ATQT1 0x00008400 /* Reset: EMPR */
#define AVF_ATQT1_ATQT_SHIFT 0
#define AVF_ATQT1_ATQT_MASK AVF_MASK(0x3FF, AVF_ATQT1_ATQT_SHIFT)
#define AVFGEN_RSTAT 0x00008800 /* Reset: VFR */
#define AVFGEN_RSTAT_VFR_STATE_SHIFT 0
#define AVFGEN_RSTAT_VFR_STATE_MASK AVF_MASK(0x3, AVFGEN_RSTAT_VFR_STATE_SHIFT)
#define AVFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
#define AVFINT_DYN_CTL01_INTENA_SHIFT 0
#define AVFINT_DYN_CTL01_INTENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_INTENA_SHIFT)
#define AVFINT_DYN_CTL01_CLEARPBA_SHIFT 1
#define AVFINT_DYN_CTL01_CLEARPBA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_CLEARPBA_SHIFT)
#define AVFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
#define AVFINT_DYN_CTL01_SWINT_TRIG_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
#define AVFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define AVFINT_DYN_CTL01_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTL01_ITR_INDX_SHIFT)
#define AVFINT_DYN_CTL01_INTERVAL_SHIFT 5
#define AVFINT_DYN_CTL01_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_DYN_CTL01_INTERVAL_SHIFT)
#define AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
#define AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
#define AVFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
#define AVFINT_DYN_CTL01_SW_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
#define AVFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
#define AVFINT_DYN_CTL01_INTENA_MSK_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_INTENA_MSK_SHIFT)
#define AVFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
#define AVFINT_DYN_CTLN1_MAX_INDEX 15
#define AVFINT_DYN_CTLN1_INTENA_SHIFT 0
#define AVFINT_DYN_CTLN1_INTENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_INTENA_SHIFT)
#define AVFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
#define AVFINT_DYN_CTLN1_CLEARPBA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_CLEARPBA_SHIFT)
#define AVFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
#define AVFINT_DYN_CTLN1_SWINT_TRIG_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
#define AVFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
#define AVFINT_DYN_CTLN1_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTLN1_ITR_INDX_SHIFT)
#define AVFINT_DYN_CTLN1_INTERVAL_SHIFT 5
#define AVFINT_DYN_CTLN1_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_DYN_CTLN1_INTERVAL_SHIFT)
#define AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
#define AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
#define AVFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
#define AVFINT_DYN_CTLN1_SW_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
#define AVFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
#define AVFINT_DYN_CTLN1_INTENA_MSK_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
#define AVFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
#define AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
#define AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
#define AVFINT_ICR0_ENA1_ADMINQ_SHIFT 30
#define AVFINT_ICR0_ENA1_ADMINQ_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_ADMINQ_SHIFT)
#define AVFINT_ICR0_ENA1_RSVD_SHIFT 31
#define AVFINT_ICR0_ENA1_RSVD_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_RSVD_SHIFT)
#define AVFINT_ICR01 0x00004800 /* Reset: CORER */
#define AVFINT_ICR01_INTEVENT_SHIFT 0
#define AVFINT_ICR01_INTEVENT_MASK AVF_MASK(0x1, AVFINT_ICR01_INTEVENT_SHIFT)
#define AVFINT_ICR01_QUEUE_0_SHIFT 1
#define AVFINT_ICR01_QUEUE_0_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_0_SHIFT)
#define AVFINT_ICR01_QUEUE_1_SHIFT 2
#define AVFINT_ICR01_QUEUE_1_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_1_SHIFT)
#define AVFINT_ICR01_QUEUE_2_SHIFT 3
#define AVFINT_ICR01_QUEUE_2_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_2_SHIFT)
#define AVFINT_ICR01_QUEUE_3_SHIFT 4
#define AVFINT_ICR01_QUEUE_3_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_3_SHIFT)
#define AVFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
#define AVFINT_ICR01_LINK_STAT_CHANGE_MASK AVF_MASK(0x1, AVFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
#define AVFINT_ICR01_ADMINQ_SHIFT 30
#define AVFINT_ICR01_ADMINQ_MASK AVF_MASK(0x1, AVFINT_ICR01_ADMINQ_SHIFT)
#define AVFINT_ICR01_SWINT_SHIFT 31
#define AVFINT_ICR01_SWINT_MASK AVF_MASK(0x1, AVFINT_ICR01_SWINT_SHIFT)
#define AVFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
#define AVFINT_ITR01_MAX_INDEX 2
#define AVFINT_ITR01_INTERVAL_SHIFT 0
#define AVFINT_ITR01_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_ITR01_INTERVAL_SHIFT)
#define AVFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
#define AVFINT_ITRN1_MAX_INDEX 2
#define AVFINT_ITRN1_INTERVAL_SHIFT 0
#define AVFINT_ITRN1_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_ITRN1_INTERVAL_SHIFT)
#define AVFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define AVFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
#define AVFINT_STAT_CTL01_OTHER_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
#define AVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define AVF_QRX_TAIL1_MAX_INDEX 15
#define AVF_QRX_TAIL1_TAIL_SHIFT 0
#define AVF_QRX_TAIL1_TAIL_MASK AVF_MASK(0x1FFF, AVF_QRX_TAIL1_TAIL_SHIFT)
#define AVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
#define AVF_QTX_TAIL1_MAX_INDEX 15
#define AVF_QTX_TAIL1_TAIL_SHIFT 0
#define AVF_QTX_TAIL1_TAIL_MASK AVF_MASK(0x1FFF, AVF_QTX_TAIL1_TAIL_SHIFT)
#define AVFMSIX_PBA 0x00002000 /* Reset: VFLR */
#define AVFMSIX_PBA_PENBIT_SHIFT 0
#define AVFMSIX_PBA_PENBIT_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_PBA_PENBIT_SHIFT)
#define AVFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define AVFMSIX_TADD_MAX_INDEX 16
#define AVFMSIX_TADD_MSIXTADD10_SHIFT 0
#define AVFMSIX_TADD_MSIXTADD10_MASK AVF_MASK(0x3, AVFMSIX_TADD_MSIXTADD10_SHIFT)
#define AVFMSIX_TADD_MSIXTADD_SHIFT 2
#define AVFMSIX_TADD_MSIXTADD_MASK AVF_MASK(0x3FFFFFFF, AVFMSIX_TADD_MSIXTADD_SHIFT)
#define AVFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define AVFMSIX_TMSG_MAX_INDEX 16
#define AVFMSIX_TMSG_MSIXTMSG_SHIFT 0
#define AVFMSIX_TMSG_MSIXTMSG_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TMSG_MSIXTMSG_SHIFT)
#define AVFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define AVFMSIX_TUADD_MAX_INDEX 16
#define AVFMSIX_TUADD_MSIXTUADD_SHIFT 0
#define AVFMSIX_TUADD_MSIXTUADD_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TUADD_MSIXTUADD_SHIFT)
#define AVFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
#define AVFMSIX_TVCTRL_MAX_INDEX 16
#define AVFMSIX_TVCTRL_MASK_SHIFT 0
#define AVFMSIX_TVCTRL_MASK_MASK AVF_MASK(0x1, AVFMSIX_TVCTRL_MASK_SHIFT)
#define AVFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
#define AVFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
#define AVFCM_PE_ERRDATA_ERROR_CODE_MASK AVF_MASK(0xF, AVFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
#define AVFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
#define AVFCM_PE_ERRDATA_Q_TYPE_MASK AVF_MASK(0x7, AVFCM_PE_ERRDATA_Q_TYPE_SHIFT)
#define AVFCM_PE_ERRDATA_Q_NUM_SHIFT 8
#define AVFCM_PE_ERRDATA_Q_NUM_MASK AVF_MASK(0x3FFFF, AVFCM_PE_ERRDATA_Q_NUM_SHIFT)
#define AVFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
#define AVFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
#define AVFCM_PE_ERRINFO_ERROR_VALID_MASK AVF_MASK(0x1, AVFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
#define AVFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
#define AVFCM_PE_ERRINFO_ERROR_INST_MASK AVF_MASK(0x7, AVFCM_PE_ERRINFO_ERROR_INST_SHIFT)
#define AVFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
#define AVFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
#define AVFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
#define AVFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
#define AVFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
#define AVFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
#define AVFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
#define AVFQF_HENA_MAX_INDEX 1
#define AVFQF_HENA_PTYPE_ENA_SHIFT 0
#define AVFQF_HENA_PTYPE_ENA_MASK AVF_MASK(0xFFFFFFFF, AVFQF_HENA_PTYPE_ENA_SHIFT)
#define AVFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
#define AVFQF_HKEY_MAX_INDEX 12
#define AVFQF_HKEY_KEY_0_SHIFT 0
#define AVFQF_HKEY_KEY_0_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_0_SHIFT)
#define AVFQF_HKEY_KEY_1_SHIFT 8
#define AVFQF_HKEY_KEY_1_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_1_SHIFT)
#define AVFQF_HKEY_KEY_2_SHIFT 16
#define AVFQF_HKEY_KEY_2_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_2_SHIFT)
#define AVFQF_HKEY_KEY_3_SHIFT 24
#define AVFQF_HKEY_KEY_3_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_3_SHIFT)
#define AVFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
#define AVFQF_HLUT_MAX_INDEX 15
#define AVFQF_HLUT_LUT0_SHIFT 0
#define AVFQF_HLUT_LUT0_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT0_SHIFT)
#define AVFQF_HLUT_LUT1_SHIFT 8
#define AVFQF_HLUT_LUT1_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT1_SHIFT)
#define AVFQF_HLUT_LUT2_SHIFT 16
#define AVFQF_HLUT_LUT2_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT2_SHIFT)
#define AVFQF_HLUT_LUT3_SHIFT 24
#define AVFQF_HLUT_LUT3_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT3_SHIFT)
#define AVFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
#define AVFQF_HREGION_MAX_INDEX 7
#define AVFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
#define AVFQF_HREGION_OVERRIDE_ENA_0_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
#define AVFQF_HREGION_REGION_0_SHIFT 1
#define AVFQF_HREGION_REGION_0_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_0_SHIFT)
#define AVFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
#define AVFQF_HREGION_OVERRIDE_ENA_1_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
#define AVFQF_HREGION_REGION_1_SHIFT 5
#define AVFQF_HREGION_REGION_1_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_1_SHIFT)
#define AVFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
#define AVFQF_HREGION_OVERRIDE_ENA_2_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
#define AVFQF_HREGION_REGION_2_SHIFT 9
#define AVFQF_HREGION_REGION_2_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_2_SHIFT)
#define AVFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
#define AVFQF_HREGION_OVERRIDE_ENA_3_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
#define AVFQF_HREGION_REGION_3_SHIFT 13
#define AVFQF_HREGION_REGION_3_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_3_SHIFT)
#define AVFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
#define AVFQF_HREGION_OVERRIDE_ENA_4_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
#define AVFQF_HREGION_REGION_4_SHIFT 17
#define AVFQF_HREGION_REGION_4_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_4_SHIFT)
#define AVFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
#define AVFQF_HREGION_OVERRIDE_ENA_5_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
#define AVFQF_HREGION_REGION_5_SHIFT 21
#define AVFQF_HREGION_REGION_5_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_5_SHIFT)
#define AVFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
#define AVFQF_HREGION_OVERRIDE_ENA_6_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
#define AVFQF_HREGION_REGION_6_SHIFT 25
#define AVFQF_HREGION_REGION_6_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_6_SHIFT)
#define AVFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
#define AVFQF_HREGION_OVERRIDE_ENA_7_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define AVFQF_HREGION_REGION_7_SHIFT 29
#define AVFQF_HREGION_REGION_7_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_7_SHIFT)
#define AVFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
#define AVFINT_DYN_CTL01_WB_ON_ITR_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
#define AVFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
#define AVFINT_DYN_CTLN1_WB_ON_ITR_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
#define AVFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
#define AVFPE_AEQALLOC1_AECOUNT_SHIFT 0
#define AVFPE_AEQALLOC1_AECOUNT_MASK AVF_MASK(0xFFFFFFFF, AVFPE_AEQALLOC1_AECOUNT_SHIFT)
#define AVFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
#define AVFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
#define AVFPE_CCQPHIGH1_PECCQPHIGH_MASK AVF_MASK(0xFFFFFFFF, AVFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
#define AVFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
#define AVFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
#define AVFPE_CCQPLOW1_PECCQPLOW_MASK AVF_MASK(0xFFFFFFFF, AVFPE_CCQPLOW1_PECCQPLOW_SHIFT)
#define AVFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
#define AVFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
#define AVFPE_CCQPSTATUS1_CCQP_DONE_MASK AVF_MASK(0x1, AVFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
#define AVFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
#define AVFPE_CCQPSTATUS1_HMC_PROFILE_MASK AVF_MASK(0x7, AVFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
#define AVFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
#define AVFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK AVF_MASK(0x3F, AVFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
#define AVFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
#define AVFPE_CCQPSTATUS1_CCQP_ERR_MASK AVF_MASK(0x1, AVFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
#define AVFPE_CQACK1 0x0000B000 /* Reset: VFR */
#define AVFPE_CQACK1_PECQID_SHIFT 0
#define AVFPE_CQACK1_PECQID_MASK AVF_MASK(0x1FFFF, AVFPE_CQACK1_PECQID_SHIFT)
#define AVFPE_CQARM1 0x0000B400 /* Reset: VFR */
#define AVFPE_CQARM1_PECQID_SHIFT 0
#define AVFPE_CQARM1_PECQID_MASK AVF_MASK(0x1FFFF, AVFPE_CQARM1_PECQID_SHIFT)
#define AVFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
#define AVFPE_CQPDB1_WQHEAD_SHIFT 0
#define AVFPE_CQPDB1_WQHEAD_MASK AVF_MASK(0x7FF, AVFPE_CQPDB1_WQHEAD_SHIFT)
#define AVFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
#define AVFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
#define AVFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK AVF_MASK(0xFFFF, AVFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
#define AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
#define AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK AVF_MASK(0xFFFF, AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
#define AVFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
#define AVFPE_CQPTAIL1_WQTAIL_SHIFT 0
#define AVFPE_CQPTAIL1_WQTAIL_MASK AVF_MASK(0x7FF, AVFPE_CQPTAIL1_WQTAIL_SHIFT)
#define AVFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
#define AVFPE_CQPTAIL1_CQP_OP_ERR_MASK AVF_MASK(0x1, AVFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
#define AVFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
#define AVFPE_IPCONFIG01_PEIPID_SHIFT 0
#define AVFPE_IPCONFIG01_PEIPID_MASK AVF_MASK(0xFFFF, AVFPE_IPCONFIG01_PEIPID_SHIFT)
#define AVFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
#define AVFPE_IPCONFIG01_USEENTIREIDRANGE_MASK AVF_MASK(0x1, AVFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
#define AVFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
#define AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
#define AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK AVF_MASK(0x1F, AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
#define AVFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
#define AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
#define AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK AVF_MASK(0xFFFFFF, AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
#define AVFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
#define AVFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
#define AVFPE_TCPNOWTIMER1_TCP_NOW_MASK AVF_MASK(0xFFFFFFFF, AVFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
#define AVFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
#define AVFPE_WQEALLOC1_PEQPID_SHIFT 0
#define AVFPE_WQEALLOC1_PEQPID_MASK AVF_MASK(0x3FFFF, AVFPE_WQEALLOC1_PEQPID_SHIFT)
#define AVFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
#define AVFPE_WQEALLOC1_WQE_DESC_INDEX_MASK AVF_MASK(0xFFF, AVFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
#endif /* _AVF_REGISTER_H_ */

View File

@ -0,0 +1,108 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _AVF_STATUS_H_
#define _AVF_STATUS_H_
/* Error Codes */
enum avf_status_code {
AVF_SUCCESS = 0,
AVF_ERR_NVM = -1,
AVF_ERR_NVM_CHECKSUM = -2,
AVF_ERR_PHY = -3,
AVF_ERR_CONFIG = -4,
AVF_ERR_PARAM = -5,
AVF_ERR_MAC_TYPE = -6,
AVF_ERR_UNKNOWN_PHY = -7,
AVF_ERR_LINK_SETUP = -8,
AVF_ERR_ADAPTER_STOPPED = -9,
AVF_ERR_INVALID_MAC_ADDR = -10,
AVF_ERR_DEVICE_NOT_SUPPORTED = -11,
AVF_ERR_MASTER_REQUESTS_PENDING = -12,
AVF_ERR_INVALID_LINK_SETTINGS = -13,
AVF_ERR_AUTONEG_NOT_COMPLETE = -14,
AVF_ERR_RESET_FAILED = -15,
AVF_ERR_SWFW_SYNC = -16,
AVF_ERR_NO_AVAILABLE_VSI = -17,
AVF_ERR_NO_MEMORY = -18,
AVF_ERR_BAD_PTR = -19,
AVF_ERR_RING_FULL = -20,
AVF_ERR_INVALID_PD_ID = -21,
AVF_ERR_INVALID_QP_ID = -22,
AVF_ERR_INVALID_CQ_ID = -23,
AVF_ERR_INVALID_CEQ_ID = -24,
AVF_ERR_INVALID_AEQ_ID = -25,
AVF_ERR_INVALID_SIZE = -26,
AVF_ERR_INVALID_ARP_INDEX = -27,
AVF_ERR_INVALID_FPM_FUNC_ID = -28,
AVF_ERR_QP_INVALID_MSG_SIZE = -29,
AVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
AVF_ERR_INVALID_FRAG_COUNT = -31,
AVF_ERR_QUEUE_EMPTY = -32,
AVF_ERR_INVALID_ALIGNMENT = -33,
AVF_ERR_FLUSHED_QUEUE = -34,
AVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
AVF_ERR_INVALID_IMM_DATA_SIZE = -36,
AVF_ERR_TIMEOUT = -37,
AVF_ERR_OPCODE_MISMATCH = -38,
AVF_ERR_CQP_COMPL_ERROR = -39,
AVF_ERR_INVALID_VF_ID = -40,
AVF_ERR_INVALID_HMCFN_ID = -41,
AVF_ERR_BACKING_PAGE_ERROR = -42,
AVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
AVF_ERR_INVALID_PBLE_INDEX = -44,
AVF_ERR_INVALID_SD_INDEX = -45,
AVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
AVF_ERR_INVALID_SD_TYPE = -47,
AVF_ERR_MEMCPY_FAILED = -48,
AVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
AVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
AVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
AVF_ERR_SRQ_ENABLED = -52,
AVF_ERR_ADMIN_QUEUE_ERROR = -53,
AVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
AVF_ERR_BUF_TOO_SHORT = -55,
AVF_ERR_ADMIN_QUEUE_FULL = -56,
AVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
AVF_ERR_BAD_IWARP_CQE = -58,
AVF_ERR_NVM_BLANK_MODE = -59,
AVF_ERR_NOT_IMPLEMENTED = -60,
AVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
AVF_ERR_DIAG_TEST_FAILED = -62,
AVF_ERR_NOT_READY = -63,
AVF_NOT_SUPPORTED = -64,
AVF_ERR_FIRMWARE_API_VERSION = -65,
AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _AVF_STATUS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,787 @@
/*******************************************************************************
Copyright (c) 2013 - 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
/* Description:
* This header file describes the VF-PF communication protocol used
* by the drivers for all devices starting from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally.
* The Firmware copies the cookie fields when sending messages between the
* PF and VF, but uses all other fields internally. Due to this limitation,
* we must send all messages as "indirect", i.e. using an external buffer.
*
* All the VSI indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value
* is of status_code type, defined in the shared type.h.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
* PF driver, then request a reset, then get resources, then configure
* queues and interrupts. After these operations are complete, the VF
* driver may start its queues, optionally add MAC and VLAN filters, and
* process traffic.
*/
/* START GENERIC DEFINES
* Need to ensure the following enums and defines hold the same meaning and
* value in current and future projects
*/
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
VIRTCHNL_ERR_PARAM = -5,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
};
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
/* deprecated with AVF 1.0 */
enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure.
*/
enum virtchnl_ops {
/* The PF sends status change events to VFs using
* the VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops.
* Use of "advanced opcode" features must be negotiated as part of capabilities
* exchange and are not considered part of base mode feature set.
*/
VIRTCHNL_OP_UNKNOWN = 0,
VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
VIRTCHNL_OP_RESET_VF = 2,
VIRTCHNL_OP_GET_VF_RESOURCES = 3,
VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
VIRTCHNL_OP_ENABLE_QUEUES = 8,
VIRTCHNL_OP_DISABLE_QUEUES = 9,
VIRTCHNL_OP_ADD_ETH_ADDR = 10,
VIRTCHNL_OP_DEL_ETH_ADDR = 11,
VIRTCHNL_OP_ADD_VLAN = 12,
VIRTCHNL_OP_DEL_VLAN = 13,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
#ifdef VIRTCHNL_SOL_VF_SUPPORT
VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
#endif
#ifdef VIRTCHNL_IWARP
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
#endif
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
};
/* This macro is used to generate a compilation error if a structure
* is not exactly the correct length. It gives a divide by zero error if the
* structure is not of the correct size, otherwise it creates an enum that is
* never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
*/
struct virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
enum virtchnl_status_code v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */
};
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
/* Message descriptions and data structures.*/
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1.
* If there is a major version mismatch, then the VF cannot operate.
* If there is a minor version mismatch, then the VF can operate but should
* add a warning to the system log.
*
* This enum element MUST always be specified as == 1, regardless of other
* changes in the API. The PF must always respond to this message without
* error regardless of version mismatch.
*/
#define VIRTCHNL_VERSION_MAJOR 1
#define VIRTCHNL_VERSION_MINOR 1
#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct virtchnl_version_info {
u32 major;
u32 minor;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
/* VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized
* after this operation.
*
* When reset is complete, PF must ensure that all queues in all VSIs associated
* with the VF are stopped, all queue configurations in the HMC are set to 0,
* and all MAC and VLAN filters (except the default MAC address) on all VSIs
* are cleared.
*/
/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
* vsi_type should always be 6 for backward compatibility. Add other fields
* as needed.
*/
enum virtchnl_vsi_type {
VIRTCHNL_VSI_TYPE_INVALID = 0,
VIRTCHNL_VSI_SRIOV = 6,
};
/* VIRTCHNL_OP_GET_VF_RESOURCES
* Version 1.0 VF sends this request to PF with no parameters
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* virtchnl_vf_resource and one or more
* virtchnl_vsi_resource structures.
*/
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum virtchnl_vsi_type vsi_type;
u16 qset_handle;
u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
u16 max_vectors;
u16 max_mtu;
u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
struct virtchnl_vsi_resource vsi_res[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of virtchnl_txq_info.
* PF configures requested queue and returns a status code.
*/
/* Tx queue config info */
struct virtchnl_txq_info {
u16 vsi_id;
u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled; /* deprecated with AVF 1.0 */
u64 dma_ring_addr;
u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
* PF configures requested queue and returns a status code.
*/
/* Rx queue config info */
struct virtchnl_rxq_info {
u16 vsi_id;
u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size;
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
u32 pad1;
u64 dma_ring_addr;
enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
u32 pad2;
};
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for all active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
struct virtchnl_txq_info txq;
struct virtchnl_rxq_info rxq;
};
VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
struct virtchnl_queue_pair_info qpair[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
* additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the
* maximum number it is able to support. If the request is successful, PF will
* then reset the VF to institute required changes.
*/
/* VF resource request */
struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0.
* PF configures interrupt mapping and returns status.
*/
struct virtchnl_vector_map {
u16 vsi_id;
u16 vector_id;
u16 rxq_map;
u16 txq_map;
u16 rxitr_idx;
u16 txitr_idx;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
struct virtchnl_vector_map vecmap[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
*/
struct virtchnl_queue_select {
u16 vsi_id;
u16 pad;
u32 rx_queues;
u32 tx_queues;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
/* VIRTCHNL_OP_ADD_ETH_ADDR
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
* PF adds the filters and returns status.
*/
/* VIRTCHNL_OP_DEL_ETH_ADDR
* VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI.
* PF removes the filters and returns status.
*/
struct virtchnl_ether_addr {
u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
u8 pad[2];
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
struct virtchnl_ether_addr list[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
#ifdef VIRTCHNL_SOL_VF_SUPPORT
/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
* VF sends this message to get the default MTU and list of additional ethernet
* addresses it is allowed to use.
* PF responds with an indirect message containing
* virtchnl_addnl_solaris_config with zero or more
* virtchnl_ether_addr structures.
*
* It is expected that this operation will only ever be needed for Solaris VFs
* running under a Solaris PF.
*/
struct virtchnl_addnl_solaris_config {
u16 default_mtu;
struct virtchnl_ether_addr_list al;
};
#endif
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
/* VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
u16 vlan_id[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled.
*/
struct virtchnl_promisc_info {
u16 vsi_id;
u16 flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
#define FLAG_VF_UNICAST_PROMISC 0x00000001
#define FLAG_VF_MULTICAST_PROMISC 0x00000002
/* VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses
* the virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
* PF replies with struct virtchnl_eth_stats in an external buffer.
*/
struct virtchnl_eth_stats {
u64 rx_bytes; /* received bytes */
u64 rx_unicast; /* received unicast pkts */
u64 rx_multicast; /* received multicast pkts */
u64 rx_broadcast; /* received broadcast pkts */
u64 rx_discards;
u64 rx_unknown_protocol;
u64 tx_bytes; /* transmitted bytes*/
u64 tx_unicast; /* transmitted unicast pkts */
u64 tx_multicast; /* transmitted multicast pkts */
u64 tx_broadcast; /* transmitted broadcast pkts */
u64 tx_discards;
u64 tx_errors;
};
/* VIRTCHNL_OP_CONFIG_RSS_KEY
* VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the RSS fields in
* the VF resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF.
*/
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
struct virtchnl_rss_hena {
u64 hena;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
* messages in response to this one.
*/
enum virtchnl_event_codes {
VIRTCHNL_EVENT_UNKNOWN = 0,
VIRTCHNL_EVENT_LINK_CHANGE,
VIRTCHNL_EVENT_RESET_IMPENDING,
VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define PF_EVENT_SEVERITY_INFO 0
#define PF_EVENT_SEVERITY_ATTENTION 1
#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
} link_event;
} event_data;
int severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
#ifdef VIRTCHNL_IWARP
/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
* VF uses this message to request PF to map IWARP vectors to IWARP queues.
* The request for this originates from the VF IWARP driver through
* a client interface between VF LAN and VF IWARP driver.
* A vector could have an AEQ and CEQ attached to it although
* there is a single AEQ per VF IWARP instance in which case
* most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
* There will never be a case where there will be multiple CEQs attached
* to a single vector.
* PF configures interrupt mapping and returns status.
*/
/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
#define QUEUE_TYPE_PE_AEQ 0x80
#define QUEUE_INVALID_IDX 0xFFFF
struct virtchnl_iwarp_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
struct virtchnl_iwarp_qvlist_info {
u32 num_vectors;
struct virtchnl_iwarp_qv_info qv_info[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
#endif
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum virtchnl_vfr_states {
VIRTCHNL_VFR_INPROGRESS = 0,
VIRTCHNL_VFR_COMPLETED,
VIRTCHNL_VFR_VFACTIVE,
};
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
* @v_opcode: Opcode for the message
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* validate msg format against struct for each opcode
*/
static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
int valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct virtchnl_version_info);
break;
case VIRTCHNL_OP_RESET_VF:
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(ver))
valid_len = sizeof(u32);
break;
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct virtchnl_txq_info);
break;
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
valid_len += (vqc->num_queue_pairs *
sizeof(struct
virtchnl_queue_pair_info));
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
valid_len += (vimi->num_vectors *
sizeof(struct virtchnl_vector_map));
if (vimi->num_vectors == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
valid_len = sizeof(struct virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
valid_len += veal->num_elements *
sizeof(struct virtchnl_ether_addr);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
valid_len += vfl->num_elements * sizeof(u16);
if (vfl->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct virtchnl_promisc_info);
break;
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
#ifdef VIRTCHNL_IWARP
case VIRTCHNL_OP_IWARP:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
*/
if (msglen)
valid_len = msglen;
else
err_msg_format = true;
break;
case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
if (msglen >= valid_len) {
struct virtchnl_iwarp_qvlist_info *qv =
(struct virtchnl_iwarp_qvlist_info *)msg;
if (qv->num_vectors == 0) {
err_msg_format = true;
break;
}
valid_len += ((qv->num_vectors - 1) *
sizeof(struct virtchnl_iwarp_qv_info));
}
break;
#endif
case VIRTCHNL_OP_CONFIG_RSS_KEY:
valid_len = sizeof(struct virtchnl_rss_key);
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
valid_len += vrk->key_len - 1;
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
valid_len = sizeof(struct virtchnl_rss_lut);
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
valid_len += vrl->lut_entries - 1;
}
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
return VIRTCHNL_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
}
#endif /* _VIRTCHNL_H_ */