common/idpf: introduce common library

Introduce common library for IDPF (Infrastructure Data
Path Function) PMD.
Add base code and OS specific implementation first.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
Junfeng Guo 2022-10-31 08:33:29 +00:00 committed by Thomas Monjalon
parent c49c880ffe
commit fb4ac04e9b
22 changed files with 8475 additions and 0 deletions

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_ALLOC_H_
#define _IDPF_ALLOC_H_
/* Memory types */
enum idpf_memset_type {
IDPF_NONDMA_MEM = 0,
IDPF_DMA_MEM
};
/* Memcpy types */
enum idpf_memcpy_type {
IDPF_NONDMA_TO_NONDMA = 0,
IDPF_NONDMA_TO_DMA,
IDPF_DMA_TO_DMA,
IDPF_DMA_TO_NONDMA
};
#endif /* _IDPF_ALLOC_H_ */

View File

@ -0,0 +1,364 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#include "idpf_type.h"
#include "idpf_prototype.h"
#include "virtchnl.h"
/**
* idpf_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
*
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
*/
int idpf_set_mac_type(struct idpf_hw *hw)
{
int status = 0;
DEBUGFUNC("Set MAC type\n");
if (hw->vendor_id == IDPF_INTEL_VENDOR_ID) {
switch (hw->device_id) {
case IDPF_DEV_ID_PF:
hw->mac.type = IDPF_MAC_PF;
break;
case IDPF_DEV_ID_VF:
hw->mac.type = IDPF_MAC_VF;
break;
default:
hw->mac.type = IDPF_MAC_GENERIC;
break;
}
} else {
status = -ENODEV;
}
DEBUGOUT2("Setting MAC type found mac: %d, returns: %d\n",
hw->mac.type, status);
return status;
}
/**
* idpf_init_hw - main initialization routine
* @hw: pointer to the hardware structure
* @ctlq_size: struct to pass ctlq size data
*/
int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size)
{
struct idpf_ctlq_create_info *q_info;
int status = 0;
struct idpf_ctlq_info *cq = NULL;
/* Setup initial control queues */
q_info = (struct idpf_ctlq_create_info *)
idpf_calloc(hw, 2, sizeof(struct idpf_ctlq_create_info));
if (!q_info)
return -ENOMEM;
q_info[0].type = IDPF_CTLQ_TYPE_MAILBOX_TX;
q_info[0].buf_size = ctlq_size.asq_buf_size;
q_info[0].len = ctlq_size.asq_ring_size;
q_info[0].id = -1; /* default queue */
if (hw->mac.type == IDPF_MAC_PF) {
q_info[0].reg.head = PF_FW_ATQH;
q_info[0].reg.tail = PF_FW_ATQT;
q_info[0].reg.len = PF_FW_ATQLEN;
q_info[0].reg.bah = PF_FW_ATQBAH;
q_info[0].reg.bal = PF_FW_ATQBAL;
q_info[0].reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
q_info[0].reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
q_info[0].reg.head_mask = PF_FW_ATQH_ATQH_M;
} else {
q_info[0].reg.head = VF_ATQH;
q_info[0].reg.tail = VF_ATQT;
q_info[0].reg.len = VF_ATQLEN;
q_info[0].reg.bah = VF_ATQBAH;
q_info[0].reg.bal = VF_ATQBAL;
q_info[0].reg.len_mask = VF_ATQLEN_ATQLEN_M;
q_info[0].reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
q_info[0].reg.head_mask = VF_ATQH_ATQH_M;
}
q_info[1].type = IDPF_CTLQ_TYPE_MAILBOX_RX;
q_info[1].buf_size = ctlq_size.arq_buf_size;
q_info[1].len = ctlq_size.arq_ring_size;
q_info[1].id = -1; /* default queue */
if (hw->mac.type == IDPF_MAC_PF) {
q_info[1].reg.head = PF_FW_ARQH;
q_info[1].reg.tail = PF_FW_ARQT;
q_info[1].reg.len = PF_FW_ARQLEN;
q_info[1].reg.bah = PF_FW_ARQBAH;
q_info[1].reg.bal = PF_FW_ARQBAL;
q_info[1].reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
q_info[1].reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
q_info[1].reg.head_mask = PF_FW_ARQH_ARQH_M;
} else {
q_info[1].reg.head = VF_ARQH;
q_info[1].reg.tail = VF_ARQT;
q_info[1].reg.len = VF_ARQLEN;
q_info[1].reg.bah = VF_ARQBAH;
q_info[1].reg.bal = VF_ARQBAL;
q_info[1].reg.len_mask = VF_ARQLEN_ARQLEN_M;
q_info[1].reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
q_info[1].reg.head_mask = VF_ARQH_ARQH_M;
}
status = idpf_ctlq_init(hw, 2, q_info);
if (status) {
/* TODO return error */
idpf_free(hw, q_info);
return status;
}
LIST_FOR_EACH_ENTRY(cq, &hw->cq_list_head, idpf_ctlq_info, cq_list) {
if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
hw->asq = cq;
else if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
hw->arq = cq;
}
/* TODO hardcode a mac addr for now */
hw->mac.addr[0] = 0x00;
hw->mac.addr[1] = 0x00;
hw->mac.addr[2] = 0x00;
hw->mac.addr[3] = 0x00;
hw->mac.addr[4] = 0x03;
hw->mac.addr[5] = 0x14;
return 0;
}
/**
* idpf_send_msg_to_cp
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
*
* Send message to CP. By default, this message
* is sent asynchronously, i.e. idpf_asq_send_command() does not wait for
* completion before returning.
*/
int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode,
int v_retval, u8 *msg, u16 msglen)
{
struct idpf_ctlq_msg ctlq_msg = { 0 };
struct idpf_dma_mem dma_mem = { 0 };
int status;
ctlq_msg.opcode = idpf_mbq_opc_send_msg_to_pf;
ctlq_msg.func_id = 0;
ctlq_msg.data_len = msglen;
ctlq_msg.cookie.mbx.chnl_retval = v_retval;
ctlq_msg.cookie.mbx.chnl_opcode = v_opcode;
if (msglen > 0) {
dma_mem.va = (struct idpf_dma_mem *)
idpf_alloc_dma_mem(hw, &dma_mem, msglen);
if (!dma_mem.va)
return -ENOMEM;
idpf_memcpy(dma_mem.va, msg, msglen, IDPF_NONDMA_TO_DMA);
ctlq_msg.ctx.indirect.payload = &dma_mem;
}
status = idpf_ctlq_send(hw, hw->asq, 1, &ctlq_msg);
if (dma_mem.va)
idpf_free_dma_mem(hw, &dma_mem);
return status;
}
/**
* idpf_asq_done - check if FW has processed the Admin Send Queue
* @hw: pointer to the hw struct
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
*/
bool idpf_asq_done(struct idpf_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, hw->asq->reg.head) == hw->asq->next_to_use;
}
/**
* idpf_check_asq_alive
* @hw: pointer to the hw struct
*
* Returns true if Queue is enabled else false.
*/
bool idpf_check_asq_alive(struct idpf_hw *hw)
{
if (hw->asq->reg.len)
return !!(rd32(hw, hw->asq->reg.len) &
PF_FW_ATQLEN_ATQENABLE_M);
return false;
}
/**
* idpf_clean_arq_element
* @hw: pointer to the hw struct
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
* the contents through e. It can also return how many events are
* left to process through 'pending'
*/
int idpf_clean_arq_element(struct idpf_hw *hw,
struct idpf_arq_event_info *e, u16 *pending)
{
struct idpf_ctlq_msg msg = { 0 };
int status;
*pending = 1;
status = idpf_ctlq_recv(hw->arq, pending, &msg);
/* ctlq_msg does not align to ctlq_desc, so copy relevant data here */
e->desc.opcode = msg.opcode;
e->desc.cookie_high = msg.cookie.mbx.chnl_opcode;
e->desc.cookie_low = msg.cookie.mbx.chnl_retval;
e->desc.ret_val = msg.status;
e->desc.datalen = msg.data_len;
if (msg.data_len > 0) {
e->buf_len = msg.data_len;
idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, msg.data_len,
IDPF_DMA_TO_NONDMA);
}
return status;
}
/**
* idpf_deinit_hw - shutdown routine
* @hw: pointer to the hardware structure
*/
int idpf_deinit_hw(struct idpf_hw *hw)
{
hw->asq = NULL;
hw->arq = NULL;
return idpf_ctlq_deinit(hw);
}
/**
* idpf_reset
* @hw: pointer to the hardware structure
*
* Send a RESET message to the CPF. Does not wait for response from CPF
* as none will be forthcoming. Immediately after calling this function,
* the control queue should be shut down and (optionally) reinitialized.
*/
int idpf_reset(struct idpf_hw *hw)
{
return idpf_send_msg_to_cp(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0);
}
/**
* idpf_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set true, for VSI table set false
* @lut: pointer to the lut buffer provided by the caller
* @lut_size: size of the lut buffer
* @set: set true to set the table, false to get the table
*
* Internal function to get or set RSS look up table
*/
STATIC int idpf_get_set_rss_lut(struct idpf_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size,
bool set)
{
/* TODO fill out command */
return 0;
}
/**
* idpf_get_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set true, for VSI table set false
* @lut: pointer to the lut buffer provided by the caller
* @lut_size: size of the lut buffer
*
* get the RSS lookup table, PF or VSI type
*/
int idpf_get_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,
u8 *lut, u16 lut_size)
{
return idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false);
}
/**
* idpf_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set true, for VSI table set false
* @lut: pointer to the lut buffer provided by the caller
* @lut_size: size of the lut buffer
*
* set the RSS lookup table, PF or VSI type
*/
int idpf_set_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,
u8 *lut, u16 lut_size)
{
return idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
/**
* idpf_get_set_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
* @set: set true to set the key, false to get the key
*
* get the RSS key per VSI
*/
STATIC int idpf_get_set_rss_key(struct idpf_hw *hw, u16 vsi_id,
struct idpf_get_set_rss_key_data *key,
bool set)
{
/* TODO fill out command */
return 0;
}
/**
* idpf_get_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
*
*/
int idpf_get_rss_key(struct idpf_hw *hw, u16 vsi_id,
struct idpf_get_set_rss_key_data *key)
{
return idpf_get_set_rss_key(hw, vsi_id, key, false);
}
/**
* idpf_set_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
*
* set the RSS key per VSI
*/
int idpf_set_rss_key(struct idpf_hw *hw, u16 vsi_id,
struct idpf_get_set_rss_key_data *key)
{
return idpf_get_set_rss_key(hw, vsi_id, key, true);
}
RTE_LOG_REGISTER_DEFAULT(idpf_common_logger, NOTICE);

View File

@ -0,0 +1,691 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#include "idpf_controlq.h"
/**
* idpf_ctlq_setup_regs - initialize control queue registers
* @cq: pointer to the specific control queue
* @q_create_info: structs containing info for each queue to be initialized
*/
static void
idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
struct idpf_ctlq_create_info *q_create_info)
{
/* set head and tail registers in our local struct */
cq->reg.head = q_create_info->reg.head;
cq->reg.tail = q_create_info->reg.tail;
cq->reg.len = q_create_info->reg.len;
cq->reg.bah = q_create_info->reg.bah;
cq->reg.bal = q_create_info->reg.bal;
cq->reg.len_mask = q_create_info->reg.len_mask;
cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
cq->reg.head_mask = q_create_info->reg.head_mask;
}
/**
* idpf_ctlq_init_regs - Initialize control queue registers
* @hw: pointer to hw struct
* @cq: pointer to the specific Control queue
* @is_rxq: true if receive control queue, false otherwise
*
* Initialize registers. The caller is expected to have already initialized the
* descriptor ring memory and buffer memory
*/
static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
bool is_rxq)
{
/* Update tail to post pre-allocated buffers for rx queues */
if (is_rxq)
wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
/* For non-Mailbox control queues only TAIL need to be set */
if (cq->q_id != -1)
return;
/* Clear Head for both send or receive */
wr32(hw, cq->reg.head, 0);
/* set starting point */
wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
}
/**
* idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
* @cq: pointer to the specific Control queue
*
* Record the address of the receive queue DMA buffers in the descriptors.
* The buffers must have been previously allocated.
*/
static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
{
int i = 0;
for (i = 0; i < cq->ring_size; i++) {
struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
/* No buffer to post to descriptor, continue */
if (!bi)
continue;
desc->flags =
CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
desc->opcode = 0;
desc->datalen = (__le16)CPU_TO_LE16(bi->size);
desc->ret_val = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
desc->params.indirect.addr_high =
CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
desc->params.indirect.addr_low =
CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
desc->params.indirect.param0 = 0;
desc->params.indirect.param1 = 0;
}
}
/**
* idpf_ctlq_shutdown - shutdown the CQ
* @hw: pointer to hw struct
* @cq: pointer to the specific Control queue
*
* The main shutdown routine for any controq queue
*/
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
idpf_acquire_lock(&cq->cq_lock);
if (!cq->ring_size)
goto shutdown_sq_out;
#ifdef SIMICS_BUILD
wr32(hw, cq->reg.head, 0);
wr32(hw, cq->reg.tail, 0);
wr32(hw, cq->reg.len, 0);
wr32(hw, cq->reg.bal, 0);
wr32(hw, cq->reg.bah, 0);
#endif /* SIMICS_BUILD */
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);
/* Set ring_size to 0 to indicate uninitialized queue */
cq->ring_size = 0;
shutdown_sq_out:
idpf_release_lock(&cq->cq_lock);
idpf_destroy_lock(&cq->cq_lock);
}
/**
* idpf_ctlq_add - add one control queue
* @hw: pointer to hardware struct
* @qinfo: info for queue to be created
* @cq_out: (output) double pointer to control queue to be created
*
* Allocate and initialize a control queue and add it to the control queue list.
* The cq parameter will be allocated/initialized and passed back to the caller
* if no errors occur.
*
* Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
*/
int idpf_ctlq_add(struct idpf_hw *hw,
struct idpf_ctlq_create_info *qinfo,
struct idpf_ctlq_info **cq_out)
{
bool is_rxq = false;
int status = 0;
if (!qinfo->len || !qinfo->buf_size ||
qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
*cq_out = (struct idpf_ctlq_info *)
idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
if (!(*cq_out))
return -ENOMEM;
(*cq_out)->cq_type = qinfo->type;
(*cq_out)->q_id = qinfo->id;
(*cq_out)->buf_size = qinfo->buf_size;
(*cq_out)->ring_size = qinfo->len;
(*cq_out)->next_to_use = 0;
(*cq_out)->next_to_clean = 0;
(*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
switch (qinfo->type) {
case IDPF_CTLQ_TYPE_MAILBOX_RX:
is_rxq = true;
#ifdef __KERNEL__
fallthrough;
#else
/* fallthrough */
#endif /* __KERNEL__ */
case IDPF_CTLQ_TYPE_MAILBOX_TX:
status = idpf_ctlq_alloc_ring_res(hw, *cq_out);
break;
default:
status = -EINVAL;
break;
}
if (status)
goto init_free_q;
if (is_rxq) {
idpf_ctlq_init_rxq_bufs(*cq_out);
} else {
/* Allocate the array of msg pointers for TX queues */
(*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **)
idpf_calloc(hw, qinfo->len,
sizeof(struct idpf_ctlq_msg *));
if (!(*cq_out)->bi.tx_msg) {
status = -ENOMEM;
goto init_dealloc_q_mem;
}
}
idpf_ctlq_setup_regs(*cq_out, qinfo);
idpf_ctlq_init_regs(hw, *cq_out, is_rxq);
idpf_init_lock(&(*cq_out)->cq_lock);
LIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list);
return status;
init_dealloc_q_mem:
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, *cq_out);
init_free_q:
idpf_free(hw, *cq_out);
return status;
}
/**
* idpf_ctlq_remove - deallocate and remove specified control queue
* @hw: pointer to hardware struct
* @cq: pointer to control queue to be removed
*/
void idpf_ctlq_remove(struct idpf_hw *hw,
struct idpf_ctlq_info *cq)
{
LIST_REMOVE(cq, cq_list);
idpf_ctlq_shutdown(hw, cq);
idpf_free(hw, cq);
}
/**
* idpf_ctlq_init - main initialization routine for all control queues
* @hw: pointer to hardware struct
* @num_q: number of queues to initialize
* @q_info: array of structs containing info for each queue to be initialized
*
* This initializes any number and any type of control queues. This is an all
* or nothing routine; if one fails, all previously allocated queues will be
* destroyed. This must be called prior to using the individual add/remove
* APIs.
*/
int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
struct idpf_ctlq_create_info *q_info)
{
struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
int ret_code = 0;
int i = 0;
LIST_INIT(&hw->cq_list_head);
for (i = 0; i < num_q; i++) {
struct idpf_ctlq_create_info *qinfo = q_info + i;
ret_code = idpf_ctlq_add(hw, qinfo, &cq);
if (ret_code)
goto init_destroy_qs;
}
return ret_code;
init_destroy_qs:
LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
idpf_ctlq_info, cq_list)
idpf_ctlq_remove(hw, cq);
return ret_code;
}
/**
* idpf_ctlq_deinit - destroy all control queues
* @hw: pointer to hw struct
*/
int idpf_ctlq_deinit(struct idpf_hw *hw)
{
struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
int ret_code = 0;
LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
idpf_ctlq_info, cq_list)
idpf_ctlq_remove(hw, cq);
return ret_code;
}
/**
* idpf_ctlq_send - send command to Control Queue (CTQ)
* @hw: pointer to hw struct
* @cq: handle to control queue struct to send on
* @num_q_msg: number of messages to send on control queue
* @q_msg: pointer to array of queue messages to be sent
*
* The caller is expected to allocate DMAable buffers and pass them to the
* send routine via the q_msg struct / control queue specific data struct.
* The control queue will hold a reference to each send message until
* the completion for that message has been cleaned.
*/
int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
{
struct idpf_ctlq_desc *desc;
int num_desc_avail = 0;
int status = 0;
int i = 0;
if (!cq || !cq->ring_size)
return -ENOBUFS;
idpf_acquire_lock(&cq->cq_lock);
/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
status = -ENOSPC;
goto sq_send_command_out;
}
for (i = 0; i < num_q_msg; i++) {
struct idpf_ctlq_msg *msg = &q_msg[i];
u64 msg_cookie;
desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
desc->opcode = CPU_TO_LE16(msg->opcode);
desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
msg_cookie = *(u64 *)&msg->cookie;
desc->cookie_high =
CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
desc->cookie_low =
CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
IDPF_CTLQ_FLAG_HOST_ID_S);
if (msg->data_len) {
struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
desc->datalen |= CPU_TO_LE16(msg->data_len);
desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
/* Update the address values in the desc with the pa
* value for respective buffer
*/
desc->params.indirect.addr_high =
CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
desc->params.indirect.addr_low =
CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
idpf_memcpy(&desc->params, msg->ctx.indirect.context,
IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
#ifdef SIMICS_BUILD
/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
* need to set peer PF function id in param0 for Simics
*/
if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
desc->params.indirect.param0 =
CPU_TO_LE32(msg->func_id);
}
#endif
} else {
idpf_memcpy(&desc->params, msg->ctx.direct,
IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
#ifdef SIMICS_BUILD
/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
* need to set peer PF function id in param0 for Simics
*/
if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
desc->params.direct.param0 =
CPU_TO_LE32(msg->func_id);
}
#endif
}
/* Store buffer info */
cq->bi.tx_msg[cq->next_to_use] = msg;
(cq->next_to_use)++;
if (cq->next_to_use == cq->ring_size)
cq->next_to_use = 0;
}
/* Force memory write to complete before letting hardware
* know that there are new descriptors to fetch.
*/
idpf_wmb();
wr32(hw, cq->reg.tail, cq->next_to_use);
sq_send_command_out:
idpf_release_lock(&cq->cq_lock);
return status;
}
/**
* idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
* requested queue
* @cq: pointer to the specific Control queue
* @clean_count: (input|output) number of descriptors to clean as input, and
* number of descriptors actually cleaned as output
* @msg_status: (output) pointer to msg pointer array to be populated; needs
* to be allocated by caller
*
* Returns an array of message pointers associated with the cleaned
* descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
* descriptors. The status will be returned for each; any messages that failed
* to send will have a non-zero status. The caller is expected to free original
* ctlq_msgs and free or reuse the DMA buffers.
*/
int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
struct idpf_ctlq_msg *msg_status[])
{
struct idpf_ctlq_desc *desc;
u16 i = 0, num_to_clean;
u16 ntc, desc_err;
int ret = 0;
if (!cq || !cq->ring_size)
return -ENOBUFS;
if (*clean_count == 0)
return 0;
if (*clean_count > cq->ring_size)
return -EINVAL;
idpf_acquire_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
num_to_clean = *clean_count;
for (i = 0; i < num_to_clean; i++) {
/* Fetch next descriptor and check if marked as done */
desc = IDPF_CTLQ_DESC(cq, ntc);
if (!(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
desc_err = LE16_TO_CPU(desc->ret_val);
if (desc_err) {
/* strip off FW internal code */
desc_err &= 0xff;
}
msg_status[i] = cq->bi.tx_msg[ntc];
msg_status[i]->status = desc_err;
cq->bi.tx_msg[ntc] = NULL;
/* Zero out any stale data */
idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
ntc++;
if (ntc == cq->ring_size)
ntc = 0;
}
cq->next_to_clean = ntc;
idpf_release_lock(&cq->cq_lock);
/* Return number of descriptors actually cleaned */
*clean_count = i;
return ret;
}
/**
* idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
* @hw: pointer to hw struct
* @cq: pointer to control queue handle
* @buff_count: (input|output) input is number of buffers caller is trying to
* return; output is number of buffers that were not posted
* @buffs: array of pointers to dma mem structs to be given to hardware
*
* Caller uses this function to return DMA buffers to the descriptor ring after
* consuming them; buff_count will be the number of buffers.
*
* Note: this function needs to be called after a receive call even
* if there are no DMA buffers to be returned, i.e. buff_count = 0,
* buffs = NULL to support direct commands
*/
int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs)
{
struct idpf_ctlq_desc *desc;
u16 ntp = cq->next_to_post;
bool buffs_avail = false;
u16 tbp = ntp + 1;
int status = 0;
int i = 0;
if (*buff_count > cq->ring_size)
return -EINVAL;
if (*buff_count > 0)
buffs_avail = true;
idpf_acquire_lock(&cq->cq_lock);
if (tbp >= cq->ring_size)
tbp = 0;
if (tbp == cq->next_to_clean)
/* Nothing to do */
goto post_buffs_out;
/* Post buffers for as many as provided or up until the last one used */
while (ntp != cq->next_to_clean) {
desc = IDPF_CTLQ_DESC(cq, ntp);
if (cq->bi.rx_buff[ntp])
goto fill_desc;
if (!buffs_avail) {
/* If the caller hasn't given us any buffers or
* there are none left, search the ring itself
* for an available buffer to move to this
* entry starting at the next entry in the ring
*/
tbp = ntp + 1;
/* Wrap ring if necessary */
if (tbp >= cq->ring_size)
tbp = 0;
while (tbp != cq->next_to_clean) {
if (cq->bi.rx_buff[tbp]) {
cq->bi.rx_buff[ntp] =
cq->bi.rx_buff[tbp];
cq->bi.rx_buff[tbp] = NULL;
/* Found a buffer, no need to
* search anymore
*/
break;
}
/* Wrap ring if necessary */
tbp++;
if (tbp >= cq->ring_size)
tbp = 0;
}
if (tbp == cq->next_to_clean)
goto post_buffs_out;
} else {
/* Give back pointer to DMA buffer */
cq->bi.rx_buff[ntp] = buffs[i];
i++;
if (i >= *buff_count)
buffs_avail = false;
}
fill_desc:
desc->flags =
CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
/* Post buffers to descriptor */
desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
desc->params.indirect.addr_high =
CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
desc->params.indirect.addr_low =
CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
ntp++;
if (ntp == cq->ring_size)
ntp = 0;
}
post_buffs_out:
/* Only update tail if buffers were actually posted */
if (cq->next_to_post != ntp) {
if (ntp)
/* Update next_to_post to ntp - 1 since current ntp
* will not have a buffer
*/
cq->next_to_post = ntp - 1;
else
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
wr32(hw, cq->reg.tail, cq->next_to_post);
}
idpf_release_lock(&cq->cq_lock);
/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
return status;
}
/**
* idpf_ctlq_recv - receive control queue message call back
* @cq: pointer to control queue handle to receive on
* @num_q_msg: (input|output) input number of messages that should be received;
* output number of messages actually received
* @q_msg: (output) array of received control queue messages on this q;
* needs to be pre-allocated by caller for as many messages as requested
*
* Called by interrupt handler or polling mechanism. Caller is expected
* to free buffers
*/
int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
{
u16 num_to_clean, ntc, ret_val, flags;
struct idpf_ctlq_desc *desc;
int ret_code = 0;
u16 i = 0;
if (!cq || !cq->ring_size)
return -ENOBUFS;
if (*num_q_msg == 0)
return 0;
else if (*num_q_msg > cq->ring_size)
return -EINVAL;
/* take the lock before we start messing with the ring */
idpf_acquire_lock(&cq->cq_lock);
ntc = cq->next_to_clean;
num_to_clean = *num_q_msg;
for (i = 0; i < num_to_clean; i++) {
u64 msg_cookie;
/* Fetch next descriptor and check if marked as done */
desc = IDPF_CTLQ_DESC(cq, ntc);
flags = LE16_TO_CPU(desc->flags);
if (!(flags & IDPF_CTLQ_FLAG_DD))
break;
ret_val = LE16_TO_CPU(desc->ret_val);
q_msg[i].vmvf_type = (flags &
(IDPF_CTLQ_FLAG_FTYPE_VM |
IDPF_CTLQ_FLAG_FTYPE_PF)) >>
IDPF_CTLQ_FLAG_FTYPE_S;
if (flags & IDPF_CTLQ_FLAG_ERR)
ret_code = -EBADMSG;
msg_cookie = (u64)LE32_TO_CPU(desc->cookie_high) << 32;
msg_cookie |= (u64)LE32_TO_CPU(desc->cookie_low);
idpf_memcpy(&q_msg[i].cookie, &msg_cookie, sizeof(u64),
IDPF_NONDMA_TO_NONDMA);
q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
q_msg[i].status = ret_val;
if (desc->datalen) {
idpf_memcpy(q_msg[i].ctx.indirect.context,
&desc->params.indirect,
IDPF_INDIRECT_CTX_SIZE,
IDPF_DMA_TO_NONDMA);
/* Assign pointer to dma buffer to ctlq_msg array
* to be given to upper layer
*/
q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
/* Zero out pointer to DMA buffer info;
* will be repopulated by post buffers API
*/
cq->bi.rx_buff[ntc] = NULL;
} else {
idpf_memcpy(q_msg[i].ctx.direct,
desc->params.raw,
IDPF_DIRECT_CTX_SIZE,
IDPF_DMA_TO_NONDMA);
}
/* Zero out stale data in descriptor */
idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
IDPF_DMA_MEM);
ntc++;
if (ntc == cq->ring_size)
ntc = 0;
};
cq->next_to_clean = ntc;
idpf_release_lock(&cq->cq_lock);
*num_q_msg = i;
if (*num_q_msg == 0)
ret_code = -ENOMSG;
return ret_code;
}

View File

@ -0,0 +1,224 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_CONTROLQ_H_
#define _IDPF_CONTROLQ_H_
#ifdef __KERNEL__
#include <linux/slab.h>
#endif
#ifndef __KERNEL__
#include "idpf_osdep.h"
#include "idpf_alloc.h"
#endif
#include "idpf_controlq_api.h"
/* Maximum buffer lengths for all control queue types */
#define IDPF_CTLQ_MAX_RING_SIZE 1024
#define IDPF_CTLQ_MAX_BUF_LEN 4096
#define IDPF_CTLQ_DESC(R, i) \
(&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i]))
#define IDPF_CTLQ_DESC_UNUSED(R) \
((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \
(R)->next_to_clean - (R)->next_to_use - 1))
#ifndef __KERNEL__
/* Data type manipulation macros. */
#define IDPF_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define IDPF_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
#define IDPF_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
#define IDPF_LO_WORD(x) ((u16)((x) & 0xFFFF))
#endif
/* Control Queue default settings */
#define IDPF_CTRL_SQ_CMD_TIMEOUT 250 /* msecs */
struct idpf_ctlq_desc {
__le16 flags;
__le16 opcode;
__le16 datalen; /* 0 for direct commands */
union {
__le16 ret_val;
__le16 pfid_vfid;
#define IDPF_CTLQ_DESC_VF_ID_S 0
#ifdef SIMICS_BUILD
#define IDPF_CTLQ_DESC_VF_ID_M (0x3FF << IDPF_CTLQ_DESC_VF_ID_S)
#define IDPF_CTLQ_DESC_PF_ID_S 10
#define IDPF_CTLQ_DESC_PF_ID_M (0x3F << IDPF_CTLQ_DESC_PF_ID_S)
#else
#define IDPF_CTLQ_DESC_VF_ID_M (0x7FF << IDPF_CTLQ_DESC_VF_ID_S)
#define IDPF_CTLQ_DESC_PF_ID_S 11
#define IDPF_CTLQ_DESC_PF_ID_M (0x1F << IDPF_CTLQ_DESC_PF_ID_S)
#endif
};
__le32 cookie_high;
__le32 cookie_low;
union {
struct {
__le32 param0;
__le32 param1;
__le32 param2;
__le32 param3;
} direct;
struct {
__le32 param0;
__le32 param1;
__le32 addr_high;
__le32 addr_low;
} indirect;
u8 raw[16];
} params;
};
/* Flags sub-structure
* |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
* |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID |
*/
/* command flags and offsets */
#define IDPF_CTLQ_FLAG_DD_S 0
#define IDPF_CTLQ_FLAG_CMP_S 1
#define IDPF_CTLQ_FLAG_ERR_S 2
#define IDPF_CTLQ_FLAG_FTYPE_S 6
#define IDPF_CTLQ_FLAG_RD_S 10
#define IDPF_CTLQ_FLAG_VFC_S 11
#define IDPF_CTLQ_FLAG_BUF_S 12
#define IDPF_CTLQ_FLAG_HOST_ID_S 13
#define IDPF_CTLQ_FLAG_DD BIT(IDPF_CTLQ_FLAG_DD_S) /* 0x1 */
#define IDPF_CTLQ_FLAG_CMP BIT(IDPF_CTLQ_FLAG_CMP_S) /* 0x2 */
#define IDPF_CTLQ_FLAG_ERR BIT(IDPF_CTLQ_FLAG_ERR_S) /* 0x4 */
#define IDPF_CTLQ_FLAG_FTYPE_VM BIT(IDPF_CTLQ_FLAG_FTYPE_S) /* 0x40 */
#define IDPF_CTLQ_FLAG_FTYPE_PF BIT(IDPF_CTLQ_FLAG_FTYPE_S + 1) /* 0x80 */
#define IDPF_CTLQ_FLAG_RD BIT(IDPF_CTLQ_FLAG_RD_S) /* 0x400 */
#define IDPF_CTLQ_FLAG_VFC BIT(IDPF_CTLQ_FLAG_VFC_S) /* 0x800 */
#define IDPF_CTLQ_FLAG_BUF BIT(IDPF_CTLQ_FLAG_BUF_S) /* 0x1000 */
/* Host ID is a special field that has 3b and not a 1b flag */
#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)
struct idpf_mbxq_desc {
u8 pad[8]; /* CTLQ flags/opcode/len/retval fields */
u32 chnl_opcode; /* avoid confusion with desc->opcode */
u32 chnl_retval; /* ditto for desc->retval */
u32 pf_vf_id; /* used by CP when sending to PF */
};
enum idpf_mac_type {
IDPF_MAC_UNKNOWN = 0,
IDPF_MAC_PF,
IDPF_MAC_VF,
IDPF_MAC_GENERIC
};
#define ETH_ALEN 6
struct idpf_mac_info {
enum idpf_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
};
#define IDPF_AQ_LINK_UP 0x1
/* PCI bus types */
enum idpf_bus_type {
idpf_bus_type_unknown = 0,
idpf_bus_type_pci,
idpf_bus_type_pcix,
idpf_bus_type_pci_express,
idpf_bus_type_reserved
};
/* PCI bus speeds */
enum idpf_bus_speed {
idpf_bus_speed_unknown = 0,
idpf_bus_speed_33 = 33,
idpf_bus_speed_66 = 66,
idpf_bus_speed_100 = 100,
idpf_bus_speed_120 = 120,
idpf_bus_speed_133 = 133,
idpf_bus_speed_2500 = 2500,
idpf_bus_speed_5000 = 5000,
idpf_bus_speed_8000 = 8000,
idpf_bus_speed_reserved
};
/* PCI bus widths */
enum idpf_bus_width {
idpf_bus_width_unknown = 0,
idpf_bus_width_pcie_x1 = 1,
idpf_bus_width_pcie_x2 = 2,
idpf_bus_width_pcie_x4 = 4,
idpf_bus_width_pcie_x8 = 8,
idpf_bus_width_32 = 32,
idpf_bus_width_64 = 64,
idpf_bus_width_reserved
};
/* Bus parameters */
struct idpf_bus_info {
enum idpf_bus_speed speed;
enum idpf_bus_width width;
enum idpf_bus_type type;
u16 func;
u16 device;
u16 lan_id;
u16 bus_id;
};
/* Function specific capabilities */
struct idpf_hw_func_caps {
u32 num_alloc_vfs;
u32 vf_base_id;
};
/* Define the APF hardware struct to replace other control structs as needed
* Align to ctlq_hw_info
*/
struct idpf_hw {
/* Some part of BAR0 address space is not mapped by the LAN driver.
* This results in 2 regions of BAR0 to be mapped by LAN driver which
* will have its own base hardware address when mapped.
*/
u8 *hw_addr;
u8 *hw_addr_region2;
u64 hw_addr_len;
u64 hw_addr_region2_len;
void *back;
/* control queue - send and receive */
struct idpf_ctlq_info *asq;
struct idpf_ctlq_info *arq;
/* subsystem structs */
struct idpf_mac_info mac;
struct idpf_bus_info bus;
struct idpf_hw_func_caps func_caps;
/* pci info */
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
LIST_HEAD_TYPE(list_head, idpf_ctlq_info) cq_list_head;
};
int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw,
struct idpf_ctlq_info *cq);
void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
/* prototype for functions used for dynamic memory allocation */
void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem,
u64 size);
void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem);
#endif /* _IDPF_CONTROLQ_H_ */

View File

@ -0,0 +1,207 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_CONTROLQ_API_H_
#define _IDPF_CONTROLQ_API_H_
#ifdef __KERNEL__
#include "idpf_mem.h"
#else /* !__KERNEL__ */
#include "idpf_osdep.h"
#endif /* !__KERNEL__ */
struct idpf_hw;
/* Used for queue init, response and events */
enum idpf_ctlq_type {
IDPF_CTLQ_TYPE_MAILBOX_TX = 0,
IDPF_CTLQ_TYPE_MAILBOX_RX = 1,
IDPF_CTLQ_TYPE_CONFIG_TX = 2,
IDPF_CTLQ_TYPE_CONFIG_RX = 3,
IDPF_CTLQ_TYPE_EVENT_RX = 4,
IDPF_CTLQ_TYPE_RDMA_TX = 5,
IDPF_CTLQ_TYPE_RDMA_RX = 6,
IDPF_CTLQ_TYPE_RDMA_COMPL = 7
};
/*
* Generic Control Queue Structures
*/
struct idpf_ctlq_reg {
/* used for queue tracking */
u32 head;
u32 tail;
/* Below applies only to default mb (if present) */
u32 len;
u32 bah;
u32 bal;
u32 len_mask;
u32 len_ena_mask;
u32 head_mask;
};
/* Generic queue msg structure */
struct idpf_ctlq_msg {
u8 vmvf_type; /* represents the source of the message on recv */
#define IDPF_VMVF_TYPE_VF 0
#define IDPF_VMVF_TYPE_VM 1
#define IDPF_VMVF_TYPE_PF 2
u8 host_id;
/* 3b field used only when sending a message to peer - to be used in
* combination with target func_id to route the message
*/
#define IDPF_HOST_ID_MASK 0x7
u16 opcode;
u16 data_len; /* data_len = 0 when no payload is attached */
union {
u16 func_id; /* when sending a message */
u16 status; /* when receiving a message */
};
union {
struct {
u32 chnl_retval;
u32 chnl_opcode;
} mbx;
} cookie;
union {
#define IDPF_DIRECT_CTX_SIZE 16
#define IDPF_INDIRECT_CTX_SIZE 8
/* 16 bytes of context can be provided or 8 bytes of context
* plus the address of a DMA buffer
*/
u8 direct[IDPF_DIRECT_CTX_SIZE];
struct {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
} ctx;
};
/* Generic queue info structures */
/* MB, CONFIG and EVENT q do not have extended info */
struct idpf_ctlq_create_info {
enum idpf_ctlq_type type;
int id; /* absolute queue offset passed as input
* -1 for default mailbox if present
*/
u16 len; /* Queue length passed as input */
u16 buf_size; /* buffer size passed as input */
u64 base_address; /* output, HPA of the Queue start */
struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
int ext_info_size;
void *ext_info; /* Specific to q type */
};
/* Control Queue information */
struct idpf_ctlq_info {
LIST_ENTRY_TYPE(idpf_ctlq_info) cq_list;
enum idpf_ctlq_type cq_type;
int q_id;
idpf_lock cq_lock; /* queue lock
* idpf_lock is defined in OSdep.h
*/
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
u16 next_to_post; /* starting descriptor to post buffers
* to after recev
*/
struct idpf_dma_mem desc_ring; /* descriptor ring memory
* idpf_dma_mem is defined in OSdep.h
*/
union {
struct idpf_dma_mem **rx_buff;
struct idpf_ctlq_msg **tx_msg;
} bi;
u16 buf_size; /* queue buffer size */
u16 ring_size; /* Number of descriptors */
struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
};
/* PF/VF mailbox commands */
enum idpf_mbx_opc {
/* idpf_mbq_opc_send_msg_to_pf:
* usage: used by PF or VF to send a message to its CPF
* target: RX queue and function ID of parent PF taken from HW
*/
idpf_mbq_opc_send_msg_to_pf = 0x0801,
/* idpf_mbq_opc_send_msg_to_vf:
* usage: used by PF to send message to a VF
* target: VF control queue ID must be specified in descriptor
*/
idpf_mbq_opc_send_msg_to_vf = 0x0802,
/* idpf_mbq_opc_send_msg_to_peer_pf:
* usage: used by any function to send message to any peer PF
* target: RX queue and host of parent PF taken from HW
*/
idpf_mbq_opc_send_msg_to_peer_pf = 0x0803,
/* idpf_mbq_opc_send_msg_to_peer_drv:
* usage: used by any function to send message to any peer driver
* target: RX queue and target host must be specific in descriptor
*/
idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
};
/*
* API supported for control queue management
*/
/* Will init all required q including default mb. "q_info" is an array of
* create_info structs equal to the number of control queues to be created.
*/
__rte_internal
int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
struct idpf_ctlq_create_info *q_info);
/* Allocate and initialize a single control queue, which will be added to the
* control queue list; returns a handle to the created control queue
*/
int idpf_ctlq_add(struct idpf_hw *hw,
struct idpf_ctlq_create_info *qinfo,
struct idpf_ctlq_info **cq);
/* Deinitialize and deallocate a single control queue */
void idpf_ctlq_remove(struct idpf_hw *hw,
struct idpf_ctlq_info *cq);
/* Sends messages to HW and will also free the buffer*/
__rte_internal
int idpf_ctlq_send(struct idpf_hw *hw,
struct idpf_ctlq_info *cq,
u16 num_q_msg,
struct idpf_ctlq_msg q_msg[]);
/* Receives messages and called by interrupt handler/polling
* initiated by app/process. Also caller is supposed to free the buffers
*/
__rte_internal
int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg);
/* Reclaims send descriptors on HW write back */
__rte_internal
int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
struct idpf_ctlq_msg *msg_status[]);
/* Indicate RX buffers are done being processed */
__rte_internal
int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,
struct idpf_ctlq_info *cq,
u16 *buff_count,
struct idpf_dma_mem **buffs);
/* Will destroy all q including the default mb */
__rte_internal
int idpf_ctlq_deinit(struct idpf_hw *hw);
#endif /* _IDPF_CONTROLQ_API_H_ */

View File

@ -0,0 +1,179 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#include "idpf_controlq.h"
/**
* idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
* @hw: pointer to hw struct
* @cq: pointer to the specific Control queue
*/
static int
idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
struct idpf_ctlq_info *cq)
{
size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
if (!cq->desc_ring.va)
return -ENOMEM;
return 0;
}
/**
* idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
* @hw: pointer to hw struct
* @cq: pointer to the specific Control queue
*
* Allocate the buffer head for all control queues, and if it's a receive
* queue, allocate DMA buffers
*/
static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
struct idpf_ctlq_info *cq)
{
int i = 0;
/* Do not allocate DMA buffers for transmit queues */
if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
return 0;
/* We'll be allocating the buffer info memory first, then we can
* allocate the mapped buffers for the event processing
*/
cq->bi.rx_buff = (struct idpf_dma_mem **)
idpf_calloc(hw, cq->ring_size,
sizeof(struct idpf_dma_mem *));
if (!cq->bi.rx_buff)
return -ENOMEM;
/* allocate the mapped buffers (except for the last one) */
for (i = 0; i < cq->ring_size - 1; i++) {
struct idpf_dma_mem *bi;
int num = 1; /* number of idpf_dma_mem to be allocated */
cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc(hw, num,
sizeof(struct idpf_dma_mem));
if (!cq->bi.rx_buff[i])
goto unwind_alloc_cq_bufs;
bi = cq->bi.rx_buff[i];
bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
if (!bi->va) {
/* unwind will not free the failed entry */
idpf_free(hw, cq->bi.rx_buff[i]);
goto unwind_alloc_cq_bufs;
}
}
return 0;
unwind_alloc_cq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--) {
idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
idpf_free(hw, cq->bi.rx_buff[i]);
}
idpf_free(hw, cq->bi.rx_buff);
return -ENOMEM;
}
/**
* idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
* @hw: pointer to hw struct
* @cq: pointer to the specific Control queue
*
* This assumes the posted send buffers have already been cleaned
* and de-allocated
*/
static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
struct idpf_ctlq_info *cq)
{
idpf_free_dma_mem(hw, &cq->desc_ring);
}
/**
* idpf_ctlq_free_bufs - Free CQ buffer info elements
* @hw: pointer to hw struct
* @cq: pointer to the specific Control queue
*
* Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
* queues. The upper layers are expected to manage freeing of TX DMA buffers
*/
static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
void *bi;
if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
int i;
/* free DMA buffers for rx queues*/
for (i = 0; i < cq->ring_size; i++) {
if (cq->bi.rx_buff[i]) {
idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
idpf_free(hw, cq->bi.rx_buff[i]);
}
}
bi = (void *)cq->bi.rx_buff;
} else {
bi = (void *)cq->bi.tx_msg;
}
/* free the buffer header */
idpf_free(hw, bi);
}
/**
* idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
* @hw: pointer to hw struct
* @cq: pointer to the specific Control queue
*
* Free the memory used by the ring, buffers and other related structures
*/
void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
/* free ring buffers and the ring itself */
idpf_ctlq_free_bufs(hw, cq);
idpf_ctlq_free_desc_ring(hw, cq);
}
/**
* idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
* @hw: pointer to hw struct
* @cq: pointer to control queue struct
*
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
int ret_code;
/* verify input for valid configuration */
if (!cq->ring_size || !cq->buf_size)
return -EINVAL;
/* allocate the ring memory */
ret_code = idpf_ctlq_alloc_desc_ring(hw, cq);
if (ret_code)
return ret_code;
/* allocate buffers in the rings */
ret_code = idpf_ctlq_alloc_bufs(hw, cq);
if (ret_code)
goto idpf_init_cq_free_ring;
/* success! */
return 0;
idpf_init_cq_free_ring:
idpf_free_dma_mem(hw, &cq->desc_ring);
return ret_code;
}

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_DEVIDS_H_
#define _IDPF_DEVIDS_H_
/* Vendor ID */
#define IDPF_INTEL_VENDOR_ID 0x8086
/* Device IDs */
#define IDPF_DEV_ID_PF 0x1452
#define IDPF_DEV_ID_VF 0x1889
#endif /* _IDPF_DEVIDS_H_ */

View File

@ -0,0 +1,134 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_LAN_PF_REGS_H_
#define _IDPF_LAN_PF_REGS_H_
/* Receive queues */
#define PF_QRX_BASE 0x00000000
#define PF_QRX_TAIL(_QRX) (PF_QRX_BASE + (((_QRX) * 0x1000)))
#define PF_QRX_BUFFQ_BASE 0x03000000
#define PF_QRX_BUFFQ_TAIL(_QRX) (PF_QRX_BUFFQ_BASE + (((_QRX) * 0x1000)))
/* Transmit queues */
#define PF_QTX_BASE 0x05000000
#define PF_QTX_COMM_DBELL(_DBQM) (PF_QTX_BASE + ((_DBQM) * 0x1000))
/* Control(PF Mailbox) Queue */
#define PF_FW_BASE 0x08400000
#define PF_FW_ARQBAL (PF_FW_BASE)
#define PF_FW_ARQBAH (PF_FW_BASE + 0x4)
#define PF_FW_ARQLEN (PF_FW_BASE + 0x8)
#define PF_FW_ARQLEN_ARQLEN_S 0
#define PF_FW_ARQLEN_ARQLEN_M MAKEMASK(0x1FFF, PF_FW_ARQLEN_ARQLEN_S)
#define PF_FW_ARQLEN_ARQVFE_S 28
#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
#define PF_FW_ARQLEN_ARQOVFL_S 29
#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
#define PF_FW_ARQLEN_ARQCRIT_S 30
#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
#define PF_FW_ARQLEN_ARQENABLE_S 31
#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
#define PF_FW_ARQH (PF_FW_BASE + 0xC)
#define PF_FW_ARQH_ARQH_S 0
#define PF_FW_ARQH_ARQH_M MAKEMASK(0x1FFF, PF_FW_ARQH_ARQH_S)
#define PF_FW_ARQT (PF_FW_BASE + 0x10)
#define PF_FW_ATQBAL (PF_FW_BASE + 0x14)
#define PF_FW_ATQBAH (PF_FW_BASE + 0x18)
#define PF_FW_ATQLEN (PF_FW_BASE + 0x1C)
#define PF_FW_ATQLEN_ATQLEN_S 0
#define PF_FW_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
#define PF_FW_ATQLEN_ATQVFE_S 28
#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
#define PF_FW_ATQLEN_ATQOVFL_S 29
#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
#define PF_FW_ATQLEN_ATQCRIT_S 30
#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
#define PF_FW_ATQLEN_ATQENABLE_S 31
#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
#define PF_FW_ATQH (PF_FW_BASE + 0x20)
#define PF_FW_ATQH_ATQH_S 0
#define PF_FW_ATQH_ATQH_M MAKEMASK(0x3FF, PF_FW_ATQH_ATQH_S)
#define PF_FW_ATQT (PF_FW_BASE + 0x24)
/* Interrupts */
#define PF_GLINT_BASE 0x08900000
#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
#define PF_GLINT_DYN_CTL_INTENA_S 0
#define PF_GLINT_DYN_CTL_INTENA_M BIT(PF_GLINT_DYN_CTL_INTENA_S)
#define PF_GLINT_DYN_CTL_CLEARPBA_S 1
#define PF_GLINT_DYN_CTL_CLEARPBA_M BIT(PF_GLINT_DYN_CTL_CLEARPBA_S)
#define PF_GLINT_DYN_CTL_SWINT_TRIG_S 2
#define PF_GLINT_DYN_CTL_SWINT_TRIG_M BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
#define PF_GLINT_DYN_CTL_ITR_INDX_S 3
#define PF_GLINT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, PF_GLINT_DYN_CTL_ITR_INDX_S)
#define PF_GLINT_DYN_CTL_INTERVAL_S 5
#define PF_GLINT_DYN_CTL_INTERVAL_M BIT(PF_GLINT_DYN_CTL_INTERVAL_S)
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S)
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S 25
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S)
#define PF_GLINT_DYN_CTL_WB_ON_ITR_S 30
#define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S)
#define PF_GLINT_DYN_CTL_INTENA_MSK_S 31
#define PF_GLINT_DYN_CTL_INTENA_MSK_M BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S)
#define PF_GLINT_ITR_V2(_i, _reg_start) (((_i) * 4) + (_reg_start))
#define PF_GLINT_ITR(_i, _INT) (PF_GLINT_BASE + (((_i) + 1) * 4) + ((_INT) * 0x1000))
#define PF_GLINT_ITR_MAX_INDEX 2
#define PF_GLINT_ITR_INTERVAL_S 0
#define PF_GLINT_ITR_INTERVAL_M MAKEMASK(0xFFF, PF_GLINT_ITR_INTERVAL_S)
/* Timesync registers */
#define PF_TIMESYNC_BASE 0x08404000
#define PF_GLTSYN_CMD_SYNC (PF_TIMESYNC_BASE)
#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_S 0
#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M MAKEMASK(0x3, PF_GLTSYN_CMD_SYNC_EXEC_CMD_S)
#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_S 2
#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(PF_GLTSYN_CMD_SYNC_SHTIME_EN_S)
#define PF_GLTSYN_SHTIME_0 (PF_TIMESYNC_BASE + 0x4)
#define PF_GLTSYN_SHTIME_L (PF_TIMESYNC_BASE + 0x8)
#define PF_GLTSYN_SHTIME_H (PF_TIMESYNC_BASE + 0xC)
#define PF_GLTSYN_ART_L (PF_TIMESYNC_BASE + 0x10)
#define PF_GLTSYN_ART_H (PF_TIMESYNC_BASE + 0x14)
/* Generic registers */
#define PF_INT_DIR_OICR_ENA 0x08406000
#define PF_INT_DIR_OICR_ENA_S 0
#define PF_INT_DIR_OICR_ENA_M MAKEMASK(0xFFFFFFFF, PF_INT_DIR_OICR_ENA_S)
#define PF_INT_DIR_OICR 0x08406004
#define PF_INT_DIR_OICR_TSYN_EVNT 0
#define PF_INT_DIR_OICR_PHY_TS_0 BIT(1)
#define PF_INT_DIR_OICR_PHY_TS_1 BIT(2)
#define PF_INT_DIR_OICR_CAUSE 0x08406008
#define PF_INT_DIR_OICR_CAUSE_CAUSE_S 0
#define PF_INT_DIR_OICR_CAUSE_CAUSE_M MAKEMASK(0xFFFFFFFF, PF_INT_DIR_OICR_CAUSE_CAUSE_S)
#define PF_INT_PBA_CLEAR 0x0840600C
#define PF_FUNC_RID 0x08406010
#define PF_FUNC_RID_FUNCTION_NUMBER_S 0
#define PF_FUNC_RID_FUNCTION_NUMBER_M MAKEMASK(0x7, PF_FUNC_RID_FUNCTION_NUMBER_S)
#define PF_FUNC_RID_DEVICE_NUMBER_S 3
#define PF_FUNC_RID_DEVICE_NUMBER_M MAKEMASK(0x1F, PF_FUNC_RID_DEVICE_NUMBER_S)
#define PF_FUNC_RID_BUS_NUMBER_S 8
#define PF_FUNC_RID_BUS_NUMBER_M MAKEMASK(0xFF, PF_FUNC_RID_BUS_NUMBER_S)
/* Reset registers */
#define PFGEN_RTRIG 0x08407000
#define PFGEN_RTRIG_CORER_S 0
#define PFGEN_RTRIG_CORER_M BIT(0)
#define PFGEN_RTRIG_LINKR_S 1
#define PFGEN_RTRIG_LINKR_M BIT(1)
#define PFGEN_RTRIG_IMCR_S 2
#define PFGEN_RTRIG_IMCR_M BIT(2)
#define PFGEN_RSTAT 0x08407008 /* PFR Status */
#define PFGEN_RSTAT_PFR_STATE_S 0
#define PFGEN_RSTAT_PFR_STATE_M MAKEMASK(0x3, PFGEN_RSTAT_PFR_STATE_S)
#define PFGEN_CTRL 0x0840700C
#define PFGEN_CTRL_PFSWR BIT(0)
#endif

View File

@ -0,0 +1,428 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_
#ifndef __KERNEL__
#include "idpf_osdep.h"
#endif
enum idpf_rss_hash {
/* Values 0 - 28 are reserved for future use */
IDPF_HASH_INVALID = 0,
IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29,
IDPF_HASH_NONF_MULTICAST_IPV4_UDP,
IDPF_HASH_NONF_IPV4_UDP,
IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK,
IDPF_HASH_NONF_IPV4_TCP,
IDPF_HASH_NONF_IPV4_SCTP,
IDPF_HASH_NONF_IPV4_OTHER,
IDPF_HASH_FRAG_IPV4,
/* Values 37-38 are reserved */
IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39,
IDPF_HASH_NONF_MULTICAST_IPV6_UDP,
IDPF_HASH_NONF_IPV6_UDP,
IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK,
IDPF_HASH_NONF_IPV6_TCP,
IDPF_HASH_NONF_IPV6_SCTP,
IDPF_HASH_NONF_IPV6_OTHER,
IDPF_HASH_FRAG_IPV6,
IDPF_HASH_NONF_RSVD47,
IDPF_HASH_NONF_FCOE_OX,
IDPF_HASH_NONF_FCOE_RX,
IDPF_HASH_NONF_FCOE_OTHER,
/* Values 51-62 are reserved */
IDPF_HASH_L2_PAYLOAD = 63,
IDPF_HASH_MAX
};
/* Supported RSS offloads */
#define IDPF_DEFAULT_RSS_HASH ( \
BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \
BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \
BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \
BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \
BIT_ULL(IDPF_HASH_FRAG_IPV4) | \
BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \
BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \
BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \
BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \
BIT_ULL(IDPF_HASH_FRAG_IPV6) | \
BIT_ULL(IDPF_HASH_L2_PAYLOAD))
/* TODO: Wrap below comment under internal flag
* Below 6 pcktypes are not supported by FVL or older products
* They are supported by FPK and future products
*/
#define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \
BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \
BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \
BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \
BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP))
/* For idpf_splitq_base_tx_compl_desc */
#define IDPF_TXD_COMPLQ_GEN_S 15
#define IDPF_TXD_COMPLQ_GEN_M BIT_ULL(IDPF_TXD_COMPLQ_GEN_S)
#define IDPF_TXD_COMPLQ_COMPL_TYPE_S 11
#define IDPF_TXD_COMPLQ_COMPL_TYPE_M \
MAKEMASK(0x7UL, IDPF_TXD_COMPLQ_COMPL_TYPE_S)
#define IDPF_TXD_COMPLQ_QID_S 0
#define IDPF_TXD_COMPLQ_QID_M MAKEMASK(0x3FFUL, IDPF_TXD_COMPLQ_QID_S)
/* For base mode TX descriptors */
#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S 23
#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S)
#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S 19
#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M \
(0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S)
#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S 12
#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M \
(0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S)
#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S 11
#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M \
BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S)
#define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST \
IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M
#define IDPF_TXD_CTX_QW0_TUNN_NATT_S 9
#define IDPF_TXD_CTX_QW0_TUNN_NATT_M (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
#define IDPF_TXD_CTX_UDP_TUNNELING BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S)
#define IDPF_TXD_CTX_GRE_TUNNELING (0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S 2
#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M \
(0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S)
#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S 0
#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M \
(0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S)
#define IDPF_TXD_CTX_QW1_MSS_S 50
#define IDPF_TXD_CTX_QW1_MSS_M \
MAKEMASK(0x3FFFULL, IDPF_TXD_CTX_QW1_MSS_S)
#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30
#define IDPF_TXD_CTX_QW1_TSO_LEN_M \
MAKEMASK(0x3FFFFULL, IDPF_TXD_CTX_QW1_TSO_LEN_S)
#define IDPF_TXD_CTX_QW1_CMD_S 4
#define IDPF_TXD_CTX_QW1_CMD_M \
MAKEMASK(0xFFFUL, IDPF_TXD_CTX_QW1_CMD_S)
#define IDPF_TXD_CTX_QW1_DTYPE_S 0
#define IDPF_TXD_CTX_QW1_DTYPE_M \
MAKEMASK(0xFUL, IDPF_TXD_CTX_QW1_DTYPE_S)
#define IDPF_TXD_QW1_L2TAG1_S 48
#define IDPF_TXD_QW1_L2TAG1_M \
MAKEMASK(0xFFFFULL, IDPF_TXD_QW1_L2TAG1_S)
#define IDPF_TXD_QW1_TX_BUF_SZ_S 34
#define IDPF_TXD_QW1_TX_BUF_SZ_M \
MAKEMASK(0x3FFFULL, IDPF_TXD_QW1_TX_BUF_SZ_S)
#define IDPF_TXD_QW1_OFFSET_S 16
#define IDPF_TXD_QW1_OFFSET_M \
MAKEMASK(0x3FFFFULL, IDPF_TXD_QW1_OFFSET_S)
#define IDPF_TXD_QW1_CMD_S 4
#define IDPF_TXD_QW1_CMD_M MAKEMASK(0xFFFUL, IDPF_TXD_QW1_CMD_S)
#define IDPF_TXD_QW1_DTYPE_S 0
#define IDPF_TXD_QW1_DTYPE_M MAKEMASK(0xFUL, IDPF_TXD_QW1_DTYPE_S)
/* TX Completion Descriptor Completion Types */
#define IDPF_TXD_COMPLT_ITR_FLUSH 0
#define IDPF_TXD_COMPLT_RULE_MISS 1
#define IDPF_TXD_COMPLT_RS 2
#define IDPF_TXD_COMPLT_REINJECTED 3
#define IDPF_TXD_COMPLT_RE 4
#define IDPF_TXD_COMPLT_SW_MARKER 5
enum idpf_tx_desc_dtype_value {
IDPF_TX_DESC_DTYPE_DATA = 0,
IDPF_TX_DESC_DTYPE_CTX = 1,
IDPF_TX_DESC_DTYPE_REINJECT_CTX = 2,
IDPF_TX_DESC_DTYPE_FLEX_DATA = 3,
IDPF_TX_DESC_DTYPE_FLEX_CTX = 4,
IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5,
IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 = 6,
IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 = 7,
IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX = 8,
IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_TSO_CTX = 9,
IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_CTX = 10,
IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX = 11,
IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE = 12,
IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_TSO_CTX = 13,
IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_CTX = 14,
/* DESC_DONE - HW has completed write-back of descriptor */
IDPF_TX_DESC_DTYPE_DESC_DONE = 15,
};
enum idpf_tx_ctx_desc_cmd_bits {
IDPF_TX_CTX_DESC_TSO = 0x01,
IDPF_TX_CTX_DESC_TSYN = 0x02,
IDPF_TX_CTX_DESC_IL2TAG2 = 0x04,
IDPF_TX_CTX_DESC_RSVD = 0x08,
IDPF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
IDPF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
IDPF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
IDPF_TX_CTX_DESC_SWTCH_VSI = 0x30,
IDPF_TX_CTX_DESC_FILT_AU_EN = 0x40,
IDPF_TX_CTX_DESC_FILT_AU_EVICT = 0x80,
IDPF_TX_CTX_DESC_RSVD1 = 0xF00
};
enum idpf_tx_desc_len_fields {
/* Note: These are predefined bit offsets */
IDPF_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
IDPF_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
IDPF_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
};
#define IDPF_TXD_QW1_MACLEN_M MAKEMASK(0x7FUL, IDPF_TX_DESC_LEN_MACLEN_S)
#define IDPF_TXD_QW1_IPLEN_M MAKEMASK(0x7FUL, IDPF_TX_DESC_LEN_IPLEN_S)
#define IDPF_TXD_QW1_L4LEN_M MAKEMASK(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S)
#define IDPF_TXD_QW1_FCLEN_M MAKEMASK(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S)
enum idpf_tx_base_desc_cmd_bits {
IDPF_TX_DESC_CMD_EOP = 0x0001,
IDPF_TX_DESC_CMD_RS = 0x0002,
/* only on VFs else RSVD */
IDPF_TX_DESC_CMD_ICRC = 0x0004,
IDPF_TX_DESC_CMD_IL2TAG1 = 0x0008,
IDPF_TX_DESC_CMD_RSVD1 = 0x0010,
IDPF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
IDPF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
IDPF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
IDPF_TX_DESC_CMD_RSVD2 = 0x0080,
IDPF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
IDPF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
IDPF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
IDPF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
IDPF_TX_DESC_CMD_RSVD3 = 0x0400,
IDPF_TX_DESC_CMD_RSVD4 = 0x0800,
};
/* Transmit descriptors */
/* splitq tx buf, singleq tx buf and singleq compl desc */
struct idpf_base_tx_desc {
__le64 buf_addr; /* Address of descriptor's data buf */
__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
};/* read used with buffer queues*/
struct idpf_splitq_tx_compl_desc {
/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
__le16 qid_comptype_gen;
union {
__le16 q_head; /* Queue head */
__le16 compl_tag; /* Completion tag */
} q_head_compl_tag;
u32 rsvd;
};/* writeback used with completion queues*/
/* Context descriptors */
struct idpf_base_tx_ctx_desc {
struct {
__le32 tunneling_params;
__le16 l2tag2;
__le16 rsvd1;
} qw0;
__le64 qw1; /* type_cmd_tlen_mss/rt_hint */
};
/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */
enum idpf_tx_flex_desc_cmd_bits {
IDPF_TX_FLEX_DESC_CMD_EOP = 0x01,
IDPF_TX_FLEX_DESC_CMD_RS = 0x02,
IDPF_TX_FLEX_DESC_CMD_RE = 0x04,
IDPF_TX_FLEX_DESC_CMD_IL2TAG1 = 0x08,
IDPF_TX_FLEX_DESC_CMD_DUMMY = 0x10,
IDPF_TX_FLEX_DESC_CMD_CS_EN = 0x20,
IDPF_TX_FLEX_DESC_CMD_FILT_AU_EN = 0x40,
IDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT = 0x80,
};
struct idpf_flex_tx_desc {
__le64 buf_addr; /* Packet buffer address */
struct {
__le16 cmd_dtype;
#define IDPF_FLEX_TXD_QW1_DTYPE_S 0
#define IDPF_FLEX_TXD_QW1_DTYPE_M \
MAKEMASK(0x1FUL, IDPF_FLEX_TXD_QW1_DTYPE_S)
#define IDPF_FLEX_TXD_QW1_CMD_S 5
#define IDPF_FLEX_TXD_QW1_CMD_M MAKEMASK(0x7FFUL, IDPF_TXD_QW1_CMD_S)
union {
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
u8 raw[4];
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */
struct {
__le16 l2tag1;
u8 flex;
u8 tsync;
} tsync;
/* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
struct {
__le16 l2tag1;
__le16 l2tag2;
} l2tags;
} flex;
__le16 buf_size;
} qw1;
};
struct idpf_flex_tx_sched_desc {
__le64 buf_addr; /* Packet buffer address */
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */
struct {
u8 cmd_dtype;
#define IDPF_TXD_FLEX_FLOW_DTYPE_M 0x1F
#define IDPF_TXD_FLEX_FLOW_CMD_EOP 0x20
#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN 0x40
#define IDPF_TXD_FLEX_FLOW_CMD_RE 0x80
u8 rsvd[3];
__le16 compl_tag;
__le16 rxr_bufsize;
#define IDPF_TXD_FLEX_FLOW_RXR 0x4000
#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M 0x3FFF
} qw1;
};
/* Common cmd fields for all flex context descriptors
* Note: these defines already account for the 5 bit dtype in the cmd_dtype
* field
*/
enum idpf_tx_flex_ctx_desc_cmd_bits {
IDPF_TX_FLEX_CTX_DESC_CMD_TSO = 0x0020,
IDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN = 0x0040,
IDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2 = 0x0080,
IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK = 0x0200, /* 2 bits */
IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL = 0x0400, /* 2 bits */
IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI = 0x0600, /* 2 bits */
};
/* Standard flex descriptor TSO context quad word */
struct idpf_flex_tx_tso_ctx_qw {
__le32 flex_tlen;
#define IDPF_TXD_FLEX_CTX_TLEN_M 0x3FFFF
#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S 24
__le16 mss_rt;
#define IDPF_TXD_FLEX_CTX_MSS_RT_M 0x3FFF
u8 hdr_len;
u8 flex;
};
union idpf_flex_tx_ctx_desc {
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_CTX (0x04) */
struct {
u8 qw0_flex[8];
struct {
__le16 cmd_dtype;
__le16 l2tag1;
u8 qw1_flex[4];
} qw1;
} gen;
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
struct {
__le16 cmd_dtype;
u8 flex[6];
} qw1;
} tso;
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX (0x08) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
struct {
__le16 cmd_dtype;
__le16 l2tag2;
u8 flex0;
u8 ptag;
u8 flex1[2];
} qw1;
} tso_l2tag2_ptag;
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX (0x0B) */
struct {
u8 qw0_flex[8];
struct {
__le16 cmd_dtype;
__le16 l2tag2;
u8 flex[4];
} qw1;
} l2tag2;
/* DTYPE = IDPF_TX_DESC_DTYPE_REINJECT_CTX (0x02) */
struct {
struct {
__le32 sa_domain;
#define IDPF_TXD_FLEX_CTX_SA_DOM_M 0xFFFF
#define IDPF_TXD_FLEX_CTX_SA_DOM_VAL 0x10000
__le32 sa_idx;
#define IDPF_TXD_FLEX_CTX_SAIDX_M 0x1FFFFF
} qw0;
struct {
__le16 cmd_dtype;
__le16 txr2comp;
#define IDPF_TXD_FLEX_CTX_TXR2COMP 0x1
__le16 miss_txq_comp_tag;
__le16 miss_txq_id;
} qw1;
} reinjection_pkt;
};
/* Host Split Context Descriptors */
struct idpf_flex_tx_hs_ctx_desc {
union {
struct {
__le32 host_fnum_tlen;
#define IDPF_TXD_FLEX_CTX_TLEN_S 0
/* see IDPF_TXD_FLEX_CTX_TLEN_M for mask definition */
#define IDPF_TXD_FLEX_CTX_FNUM_S 18
#define IDPF_TXD_FLEX_CTX_FNUM_M 0x7FF
#define IDPF_TXD_FLEX_CTX_HOST_S 29
#define IDPF_TXD_FLEX_CTX_HOST_M 0x7
__le16 ftype_mss_rt;
#define IDPF_TXD_FLEX_CTX_MSS_RT_0 0
#define IDPF_TXD_FLEX_CTX_MSS_RT_M 0x3FFF
#define IDPF_TXD_FLEX_CTX_FTYPE_S 14
#define IDPF_TXD_FLEX_CTX_FTYPE_VF MAKEMASK(0x0, IDPF_TXD_FLEX_CTX_FTYPE_S)
#define IDPF_TXD_FLEX_CTX_FTYPE_VDEV MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_FTYPE_S)
#define IDPF_TXD_FLEX_CTX_FTYPE_PF MAKEMASK(0x2, IDPF_TXD_FLEX_CTX_FTYPE_S)
u8 hdr_len;
u8 ptag;
} tso;
struct {
u8 flex0[2];
__le16 host_fnum_ftype;
u8 flex1[3];
u8 ptag;
} no_tso;
} qw0;
__le64 qw1_cmd_dtype;
#define IDPF_TXD_FLEX_CTX_QW1_PASID_S 16
#define IDPF_TXD_FLEX_CTX_QW1_PASID_M 0xFFFFF
#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID_S 36
#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID \
MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_PASID_VALID_S)
#define IDPF_TXD_FLEX_CTX_QW1_TPH_S 37
#define IDPF_TXD_FLEX_CTX_QW1_TPH \
MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_TPH_S)
#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_S 38
#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_M 0xF
/* The following are only valid for DTYPE = 0x09 and DTYPE = 0x0A */
#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_S 42
#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_M 0x1FFFFF
#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S 63
#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VALID \
MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S)
/* The following are only valid for DTYPE = 0x0D and DTYPE = 0x0E */
#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_S 48
#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_M 0xFF
#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_S 56
#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_M 0xFF
};
#endif /* _IDPF_LAN_TXRX_H_ */

View File

@ -0,0 +1,114 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_LAN_VF_REGS_H_
#define _IDPF_LAN_VF_REGS_H_
/* Reset */
#define VFGEN_RSTAT 0x00008800
#define VFGEN_RSTAT_VFR_STATE_S 0
#define VFGEN_RSTAT_VFR_STATE_M MAKEMASK(0x3, VFGEN_RSTAT_VFR_STATE_S)
/* Control(VF Mailbox) Queue */
#define VF_BASE 0x00006000
#define VF_ATQBAL (VF_BASE + 0x1C00)
#define VF_ATQBAH (VF_BASE + 0x1800)
#define VF_ATQLEN (VF_BASE + 0x0800)
#define VF_ATQLEN_ATQLEN_S 0
#define VF_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, VF_ATQLEN_ATQLEN_S)
#define VF_ATQLEN_ATQVFE_S 28
#define VF_ATQLEN_ATQVFE_M BIT(VF_ATQLEN_ATQVFE_S)
#define VF_ATQLEN_ATQOVFL_S 29
#define VF_ATQLEN_ATQOVFL_M BIT(VF_ATQLEN_ATQOVFL_S)
#define VF_ATQLEN_ATQCRIT_S 30
#define VF_ATQLEN_ATQCRIT_M BIT(VF_ATQLEN_ATQCRIT_S)
#define VF_ATQLEN_ATQENABLE_S 31
#define VF_ATQLEN_ATQENABLE_M BIT(VF_ATQLEN_ATQENABLE_S)
#define VF_ATQH (VF_BASE + 0x0400)
#define VF_ATQH_ATQH_S 0
#define VF_ATQH_ATQH_M MAKEMASK(0x3FF, VF_ATQH_ATQH_S)
#define VF_ATQT (VF_BASE + 0x2400)
#define VF_ARQBAL (VF_BASE + 0x0C00)
#define VF_ARQBAH (VF_BASE)
#define VF_ARQLEN (VF_BASE + 0x2000)
#define VF_ARQLEN_ARQLEN_S 0
#define VF_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, VF_ARQLEN_ARQLEN_S)
#define VF_ARQLEN_ARQVFE_S 28
#define VF_ARQLEN_ARQVFE_M BIT(VF_ARQLEN_ARQVFE_S)
#define VF_ARQLEN_ARQOVFL_S 29
#define VF_ARQLEN_ARQOVFL_M BIT(VF_ARQLEN_ARQOVFL_S)
#define VF_ARQLEN_ARQCRIT_S 30
#define VF_ARQLEN_ARQCRIT_M BIT(VF_ARQLEN_ARQCRIT_S)
#define VF_ARQLEN_ARQENABLE_S 31
#define VF_ARQLEN_ARQENABLE_M BIT(VF_ARQLEN_ARQENABLE_S)
#define VF_ARQH (VF_BASE + 0x1400)
#define VF_ARQH_ARQH_S 0
#define VF_ARQH_ARQH_M MAKEMASK(0x1FFF, VF_ARQH_ARQH_S)
#define VF_ARQT (VF_BASE + 0x1000)
/* Transmit queues */
#define VF_QTX_TAIL_BASE 0x00000000
#define VF_QTX_TAIL(_QTX) (VF_QTX_TAIL_BASE + (_QTX) * 0x4)
#define VF_QTX_TAIL_EXT_BASE 0x00040000
#define VF_QTX_TAIL_EXT(_QTX) (VF_QTX_TAIL_EXT_BASE + ((_QTX) * 4))
/* Receive queues */
#define VF_QRX_TAIL_BASE 0x00002000
#define VF_QRX_TAIL(_QRX) (VF_QRX_TAIL_BASE + ((_QRX) * 4))
#define VF_QRX_TAIL_EXT_BASE 0x00050000
#define VF_QRX_TAIL_EXT(_QRX) (VF_QRX_TAIL_EXT_BASE + ((_QRX) * 4))
#define VF_QRXB_TAIL_BASE 0x00060000
#define VF_QRXB_TAIL(_QRX) (VF_QRXB_TAIL_BASE + ((_QRX) * 4))
/* Interrupts */
#define VF_INT_DYN_CTL0 0x00005C00
#define VF_INT_DYN_CTL0_INTENA_S 0
#define VF_INT_DYN_CTL0_INTENA_M BIT(VF_INT_DYN_CTL0_INTENA_S)
#define VF_INT_DYN_CTL0_ITR_INDX_S 3
#define VF_INT_DYN_CTL0_ITR_INDX_M MAKEMASK(0x3, VF_INT_DYN_CTL0_ITR_INDX_S)
#define VF_INT_DYN_CTLN(_INT) (0x00003800 + ((_INT) * 4))
#define VF_INT_DYN_CTLN_EXT(_INT) (0x00070000 + ((_INT) * 4))
#define VF_INT_DYN_CTLN_INTENA_S 0
#define VF_INT_DYN_CTLN_INTENA_M BIT(VF_INT_DYN_CTLN_INTENA_S)
#define VF_INT_DYN_CTLN_CLEARPBA_S 1
#define VF_INT_DYN_CTLN_CLEARPBA_M BIT(VF_INT_DYN_CTLN_CLEARPBA_S)
#define VF_INT_DYN_CTLN_SWINT_TRIG_S 2
#define VF_INT_DYN_CTLN_SWINT_TRIG_M BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S)
#define VF_INT_DYN_CTLN_ITR_INDX_S 3
#define VF_INT_DYN_CTLN_ITR_INDX_M MAKEMASK(0x3, VF_INT_DYN_CTLN_ITR_INDX_S)
#define VF_INT_DYN_CTLN_INTERVAL_S 5
#define VF_INT_DYN_CTLN_INTERVAL_M BIT(VF_INT_DYN_CTLN_INTERVAL_S)
#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S 24
#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S)
#define VF_INT_DYN_CTLN_SW_ITR_INDX_S 25
#define VF_INT_DYN_CTLN_SW_ITR_INDX_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S)
#define VF_INT_DYN_CTLN_WB_ON_ITR_S 30
#define VF_INT_DYN_CTLN_WB_ON_ITR_M BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S)
#define VF_INT_DYN_CTLN_INTENA_MSK_S 31
#define VF_INT_DYN_CTLN_INTENA_MSK_M BIT(VF_INT_DYN_CTLN_INTENA_MSK_S)
#define VF_INT_ITR0(_i) (0x00004C00 + ((_i) * 4))
#define VF_INT_ITRN_V2(_i, _reg_start) ((_reg_start) + (((_i)) * 4))
#define VF_INT_ITRN(_i, _INT) (0x00002800 + ((_i) * 4) + ((_INT) * 0x40))
#define VF_INT_ITRN_64(_i, _INT) (0x00002C00 + ((_i) * 4) + ((_INT) * 0x100))
#define VF_INT_ITRN_2K(_i, _INT) (0x00072000 + ((_i) * 4) + ((_INT) * 0x100))
#define VF_INT_ITRN_MAX_INDEX 2
#define VF_INT_ITRN_INTERVAL_S 0
#define VF_INT_ITRN_INTERVAL_M MAKEMASK(0xFFF, VF_INT_ITRN_INTERVAL_S)
#define VF_INT_PBA_CLEAR 0x00008900
#define VF_INT_ICR0_ENA1 0x00005000
#define VF_INT_ICR0_ENA1_ADMINQ_S 30
#define VF_INT_ICR0_ENA1_ADMINQ_M BIT(VF_INT_ICR0_ENA1_ADMINQ_S)
#define VF_INT_ICR0_ENA1_RSVD_S 31
#define VF_INT_ICR01 0x00004800
#define VF_QF_HENA(_i) (0x0000C400 + ((_i) * 4))
#define VF_QF_HENA_MAX_INDX 1
#define VF_QF_HKEY(_i) (0x0000CC00 + ((_i) * 4))
#define VF_QF_HKEY_MAX_INDX 12
#define VF_QF_HLUT(_i) (0x0000D000 + ((_i) * 4))
#define VF_QF_HLUT_MAX_INDX 15
#endif

View File

@ -0,0 +1,364 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_OSDEP_H_
#define _IDPF_OSDEP_H_
#include <string.h>
#include <stdint.h>
#include <stdio.h>
#include <stdarg.h>
#include <inttypes.h>
#include <sys/queue.h>
#include <stdbool.h>
#include <rte_common.h>
#include <rte_memcpy.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_byteorder.h>
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
#include <rte_random.h>
#include <rte_io.h>
#define INLINE inline
#define STATIC static
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
typedef uint64_t s64;
typedef struct idpf_lock idpf_lock;
#define __iomem
#define hw_dbg(hw, S, A...) do {} while (0)
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
#define lower_32_bits(n) ((u32)(n))
#define low_16_bits(x) ((x) & 0xFFFF)
#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16)
#ifndef ETH_ADDR_LEN
#define ETH_ADDR_LEN 6
#endif
#ifndef __le16
#define __le16 uint16_t
#endif
#ifndef __le32
#define __le32 uint32_t
#endif
#ifndef __le64
#define __le64 uint64_t
#endif
#ifndef __be16
#define __be16 uint16_t
#endif
#ifndef __be32
#define __be32 uint32_t
#endif
#ifndef __be64
#define __be64 uint64_t
#endif
#ifndef BIT_ULL
#define BIT_ULL(a) RTE_BIT64(a)
#endif
#ifndef BIT
#define BIT(a) RTE_BIT32(a)
#endif
#define FALSE 0
#define TRUE 1
#define false 0
#define true 1
/* Avoid macro redefinition warning on Windows */
#ifdef RTE_EXEC_ENV_WINDOWS
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
#endif
#define min(a, b) RTE_MIN(a, b)
#define max(a, b) RTE_MAX(a, b)
#define ARRAY_SIZE(arr) RTE_DIM(arr)
#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->(f)))
#define MAKEMASK(m, s) ((m) << (s))
extern int idpf_common_logger;
#define DEBUGOUT(S) rte_log(RTE_LOG_DEBUG, idpf_common_logger, S)
#define DEBUGOUT2(S, A...) rte_log(RTE_LOG_DEBUG, idpf_common_logger, S, ##A)
#define DEBUGFUNC(F) DEBUGOUT(F "\n")
#define idpf_debug(h, m, s, ...) \
do { \
if (((m) & (h)->debug_mask)) \
PMD_DRV_LOG_RAW(DEBUG, "idpf %02x.%x " s, \
(h)->bus.device, (h)->bus.func, \
##__VA_ARGS__); \
} while (0)
#define idpf_info(hw, fmt, args...) idpf_debug(hw, IDPF_DBG_ALL, fmt, ##args)
#define idpf_warn(hw, fmt, args...) idpf_debug(hw, IDPF_DBG_ALL, fmt, ##args)
#define idpf_debug_array(hw, type, rowsize, groupsize, buf, len) \
do { \
struct idpf_hw *hw_l = hw; \
u16 len_l = len; \
u8 *buf_l = buf; \
int i; \
for (i = 0; i < len_l; i += 8) \
idpf_debug(hw_l, type, \
"0x%04X 0x%016"PRIx64"\n", \
i, *((u64 *)((buf_l) + i))); \
} while (0)
#define idpf_snprintf snprintf
#ifndef SNPRINTF
#define SNPRINTF idpf_snprintf
#endif
#define IDPF_PCI_REG(reg) rte_read32(reg)
#define IDPF_PCI_REG_ADDR(a, reg) \
((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
#define IDPF_PCI_REG64(reg) rte_read64(reg)
#define IDPF_PCI_REG_ADDR64(a, reg) \
((volatile uint64_t *)((char *)(a)->hw_addr + (reg)))
#define idpf_wmb() rte_io_wmb()
#define idpf_rmb() rte_io_rmb()
#define idpf_mb() rte_io_mb()
static inline uint32_t idpf_read_addr(volatile void *addr)
{
return rte_le_to_cpu_32(IDPF_PCI_REG(addr));
}
static inline uint64_t idpf_read_addr64(volatile void *addr)
{
return rte_le_to_cpu_64(IDPF_PCI_REG64(addr));
}
#define IDPF_PCI_REG_WRITE(reg, value) \
rte_write32((rte_cpu_to_le_32(value)), reg)
#define IDPF_PCI_REG_WRITE64(reg, value) \
rte_write64((rte_cpu_to_le_64(value)), reg)
#define IDPF_READ_REG(hw, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((hw), (reg)))
#define IDPF_WRITE_REG(hw, reg, value) \
IDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((hw), (reg)), (value))
#define rd32(a, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((a), (reg)))
#define wr32(a, reg, value) \
IDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((a), (reg)), (value))
#define div64_long(n, d) ((n) / (d))
#define rd64(a, reg) idpf_read_addr64(IDPF_PCI_REG_ADDR64((a), (reg)))
#define BITS_PER_BYTE 8
/* memory allocation tracking */
struct idpf_dma_mem {
void *va;
u64 pa;
u32 size;
const void *zone;
} __rte_packed;
struct idpf_virt_mem {
void *va;
u32 size;
} __rte_packed;
#define idpf_malloc(h, s) rte_zmalloc(NULL, s, 0)
#define idpf_calloc(h, c, s) rte_zmalloc(NULL, (c) * (s), 0)
#define idpf_free(h, m) rte_free(m)
#define idpf_memset(a, b, c, d) memset((a), (b), (c))
#define idpf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
#define idpf_memdup(a, b, c, d) rte_memcpy(idpf_malloc(a, c), b, c)
#define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
#define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
#define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
#define NTOHS(a) rte_be_to_cpu_16(a)
#define NTOHL(a) rte_be_to_cpu_32(a)
#define HTONS(a) rte_cpu_to_be_16(a)
#define HTONL(a) rte_cpu_to_be_32(a)
/* SW spinlock */
struct idpf_lock {
rte_spinlock_t spinlock;
};
static inline void
idpf_init_lock(struct idpf_lock *sp)
{
rte_spinlock_init(&sp->spinlock);
}
static inline void
idpf_acquire_lock(struct idpf_lock *sp)
{
rte_spinlock_lock(&sp->spinlock);
}
static inline void
idpf_release_lock(struct idpf_lock *sp)
{
rte_spinlock_unlock(&sp->spinlock);
}
static inline void
idpf_destroy_lock(__rte_unused struct idpf_lock *sp)
{
}
struct idpf_hw;
static inline void *
idpf_alloc_dma_mem(__rte_unused struct idpf_hw *hw,
struct idpf_dma_mem *mem, u64 size)
{
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
if (!mem)
return NULL;
snprintf(z_name, sizeof(z_name), "idpf_dma_%"PRIu64, rte_rand());
mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, RTE_PGSIZE_4K);
if (!mz)
return NULL;
mem->size = size;
mem->va = mz->addr;
mem->pa = mz->iova;
mem->zone = (const void *)mz;
memset(mem->va, 0, size);
return mem->va;
}
static inline void
idpf_free_dma_mem(__rte_unused struct idpf_hw *hw,
struct idpf_dma_mem *mem)
{
rte_memzone_free((const struct rte_memzone *)mem->zone);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
}
static inline u8
idpf_hweight8(u32 num)
{
u8 bits = 0;
u32 i;
for (i = 0; i < 8; i++) {
bits += (u8)(num & 0x1);
num >>= 1;
}
return bits;
}
static inline u8
idpf_hweight32(u32 num)
{
u8 bits = 0;
u32 i;
for (i = 0; i < 32; i++) {
bits += (u8)(num & 0x1);
num >>= 1;
}
return bits;
}
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define DELAY(x) rte_delay_us(x)
#define idpf_usec_delay(x) rte_delay_us(x)
#define idpf_msec_delay(x, y) rte_delay_us(1000 * (x))
#define udelay(x) DELAY(x)
#define msleep(x) DELAY(1000 * (x))
#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
#ifndef IDPF_DBG_TRACE
#define IDPF_DBG_TRACE BIT_ULL(0)
#endif
#ifndef DIVIDE_AND_ROUND_UP
#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
#endif
#ifndef IDPF_INTEL_VENDOR_ID
#define IDPF_INTEL_VENDOR_ID 0x8086
#endif
#ifndef IS_UNICAST_ETHER_ADDR
#define IS_UNICAST_ETHER_ADDR(addr) \
((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0))
#endif
#ifndef IS_MULTICAST_ETHER_ADDR
#define IS_MULTICAST_ETHER_ADDR(addr) \
((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1))
#endif
#ifndef IS_BROADCAST_ETHER_ADDR
/* Check whether an address is broadcast. */
#define IS_BROADCAST_ETHER_ADDR(addr) \
((bool)((((u16 *)(addr))[0] == ((u16)0xffff))))
#endif
#ifndef IS_ZERO_ETHER_ADDR
#define IS_ZERO_ETHER_ADDR(addr) \
(((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) && \
((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) && \
((bool)((((u16 *)(addr))[2] == ((u16)0x0)))))
#endif
#ifndef LIST_HEAD_TYPE
#define LIST_HEAD_TYPE(list_name, type) LIST_HEAD(list_name, type)
#endif
#ifndef LIST_ENTRY_TYPE
#define LIST_ENTRY_TYPE(type) LIST_ENTRY(type)
#endif
#ifndef LIST_FOR_EACH_ENTRY_SAFE
#define LIST_FOR_EACH_ENTRY_SAFE(pos, temp, head, entry_type, list) \
LIST_FOREACH(pos, head, list)
#endif
#ifndef LIST_FOR_EACH_ENTRY
#define LIST_FOR_EACH_ENTRY(pos, head, entry_type, list) \
LIST_FOREACH(pos, head, list)
#endif
#endif /* _IDPF_OSDEP_H_ */

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_PROTOTYPE_H_
#define _IDPF_PROTOTYPE_H_
/* Include generic macros and types first */
#include "idpf_osdep.h"
#include "idpf_controlq.h"
#include "idpf_type.h"
#include "idpf_alloc.h"
#include "idpf_devids.h"
#include "idpf_controlq_api.h"
#include "idpf_lan_pf_regs.h"
#include "idpf_lan_vf_regs.h"
#include "idpf_lan_txrx.h"
#include "virtchnl.h"
#define APF
int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size);
int idpf_deinit_hw(struct idpf_hw *hw);
int idpf_clean_arq_element(struct idpf_hw *hw,
struct idpf_arq_event_info *e,
u16 *events_pending);
bool idpf_asq_done(struct idpf_hw *hw);
bool idpf_check_asq_alive(struct idpf_hw *hw);
int idpf_get_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,
u8 *lut, u16 lut_size);
int idpf_set_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,
u8 *lut, u16 lut_size);
int idpf_get_rss_key(struct idpf_hw *hw, u16 seid,
struct idpf_get_set_rss_key_data *key);
int idpf_set_rss_key(struct idpf_hw *hw, u16 seid,
struct idpf_get_set_rss_key_data *key);
int idpf_set_mac_type(struct idpf_hw *hw);
int idpf_reset(struct idpf_hw *hw);
int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode,
int v_retval, u8 *msg, u16 msglen);
#endif /* _IDPF_PROTOTYPE_H_ */

View File

@ -0,0 +1,106 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _IDPF_TYPE_H_
#define _IDPF_TYPE_H_
#include "idpf_controlq.h"
#define UNREFERENCED_XPARAMETER
#define UNREFERENCED_1PARAMETER(_p)
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
#define MAKEMASK(m, s) ((m) << (s))
struct idpf_eth_stats {
u64 rx_bytes; /* gorc */
u64 rx_unicast; /* uprc */
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
u64 tx_multicast; /* mptc */
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
};
/* Statistics collected by the MAC */
struct idpf_hw_port_stats {
/* eth stats collected by the port */
struct idpf_eth_stats eth;
/* additional port specific stats */
u64 tx_dropped_link_down; /* tdold */
u64 crc_errors; /* crcerrs */
u64 illegal_bytes; /* illerrc */
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
u64 rx_length_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 priority_xon_rx[8]; /* pxonrxc[8] */
u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
u64 link_xon_tx; /* lxontxc */
u64 link_xoff_tx; /* lxofftxc */
u64 priority_xon_tx[8]; /* pxontxc[8] */
u64 priority_xoff_tx[8]; /* pxofftxc[8] */
u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */
u64 rx_size_511; /* prc511 */
u64 rx_size_1023; /* prc1023 */
u64 rx_size_1522; /* prc1522 */
u64 rx_size_big; /* prc9522 */
u64 rx_undersize; /* ruc */
u64 rx_fragments; /* rfc */
u64 rx_oversize; /* roc */
u64 rx_jabber; /* rjc */
u64 tx_size_64; /* ptc64 */
u64 tx_size_127; /* ptc127 */
u64 tx_size_255; /* ptc255 */
u64 tx_size_511; /* ptc511 */
u64 tx_size_1023; /* ptc1023 */
u64 tx_size_1522; /* ptc1522 */
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
};
/* Static buffer size to initialize control queue */
struct idpf_ctlq_size {
u16 asq_buf_size;
u16 asq_ring_size;
u16 arq_buf_size;
u16 arq_ring_size;
};
/* Temporary definition to compile - TBD if needed */
struct idpf_arq_event_info {
struct idpf_ctlq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
};
struct idpf_get_set_rss_key_data {
u8 standard_rss_key[0x28];
u8 extended_hash_key[0xc];
};
struct idpf_aq_get_phy_abilities_resp {
__le32 phy_type;
};
struct idpf_filter_program_desc {
__le32 qid;
};
#endif /* _IDPF_TYPE_H_ */

View File

@ -0,0 +1,14 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2022 Intel Corporation
sources = files(
'idpf_common.c',
'idpf_controlq.c',
'idpf_controlq_setup.c',
)
cflags += ['-Wno-unused-value']
cflags += ['-Wno-unused-variable']
cflags += ['-Wno-unused-parameter']
cflags += ['-Wno-implicit-fallthrough']
cflags += ['-Wno-strict-aliasing']

View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _SIOV_REGS_H_
#define _SIOV_REGS_H_
#define VDEV_MBX_START 0x20000 /* Begin at 128KB */
#define VDEV_MBX_ATQBAL (VDEV_MBX_START + 0x0000)
#define VDEV_MBX_ATQBAH (VDEV_MBX_START + 0x0004)
#define VDEV_MBX_ATQLEN (VDEV_MBX_START + 0x0008)
#define VDEV_MBX_ATQH (VDEV_MBX_START + 0x000C)
#define VDEV_MBX_ATQT (VDEV_MBX_START + 0x0010)
#define VDEV_MBX_ARQBAL (VDEV_MBX_START + 0x0014)
#define VDEV_MBX_ARQBAH (VDEV_MBX_START + 0x0018)
#define VDEV_MBX_ARQLEN (VDEV_MBX_START + 0x001C)
#define VDEV_MBX_ARQH (VDEV_MBX_START + 0x0020)
#define VDEV_MBX_ARQT (VDEV_MBX_START + 0x0024)
#define VDEV_GET_RSTAT 0x21000 /* 132KB for RSTAT */
/* Begin at offset after 1MB (after 256 4k pages) */
#define VDEV_QRX_TAIL_START 0x100000
#define VDEV_QRX_TAIL(_i) (VDEV_QRX_TAIL_START + ((_i) * 0x1000)) /* 2k Rx queues */
/* Begin at offset of 9MB for Rx buffer queue tail register pages */
#define VDEV_QRX_BUFQ_TAIL_START 0x900000
/* 2k Rx buffer queues */
#define VDEV_QRX_BUFQ_TAIL(_i) (VDEV_QRX_BUFQ_TAIL_START + ((_i) * 0x1000))
/* Begin at offset of 17MB for 2k Tx queues */
#define VDEV_QTX_TAIL_START 0x1100000
#define VDEV_QTX_TAIL(_i) (VDEV_QTX_TAIL_START + ((_i) * 0x1000)) /* 2k Tx queues */
/* Begin at offset of 25MB for 2k Tx completion queues */
#define VDEV_QTX_COMPL_TAIL_START 0x1900000
/* 2k Tx completion queues */
#define VDEV_QTX_COMPL_TAIL(_i) (VDEV_QTX_COMPL_TAIL_START + ((_i) * 0x1000))
#define VDEV_INT_DYN_CTL01 0x2100000 /* Begin at offset 33MB */
/* Begin at offset of 33MB + 4k to accommodate CTL01 register */
#define VDEV_INT_DYN_START (VDEV_INT_DYN_CTL01 + 0x1000)
#define VDEV_INT_DYN_CTL(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000))
#define VDEV_INT_ITR_0(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x04)
#define VDEV_INT_ITR_1(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08)
#define VDEV_INT_ITR_2(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C)
/* Next offset to begin at 42MB (0x2A00000) */
#endif /* _SIOV_REGS_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,606 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
/*
* Copyright (C) 2019 Intel Corporation
*
* For licensing information, see the file 'LICENSE' in the root folder
*/
#ifndef _VIRTCHNL2_LAN_DESC_H_
#define _VIRTCHNL2_LAN_DESC_H_
/* VIRTCHNL2_TX_DESC_IDS
* Transmit descriptor ID flags
*/
#define VIRTCHNL2_TXDID_DATA BIT(0)
#define VIRTCHNL2_TXDID_CTX BIT(1)
#define VIRTCHNL2_TXDID_REINJECT_CTX BIT(2)
#define VIRTCHNL2_TXDID_FLEX_DATA BIT(3)
#define VIRTCHNL2_TXDID_FLEX_CTX BIT(4)
#define VIRTCHNL2_TXDID_FLEX_TSO_CTX BIT(5)
#define VIRTCHNL2_TXDID_FLEX_TSYN_L2TAG1 BIT(6)
#define VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2 BIT(7)
#define VIRTCHNL2_TXDID_FLEX_TSO_L2TAG2_PARSTAG_CTX BIT(8)
#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_TSO_CTX BIT(9)
#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_CTX BIT(10)
#define VIRTCHNL2_TXDID_FLEX_L2TAG2_CTX BIT(11)
#define VIRTCHNL2_TXDID_FLEX_FLOW_SCHED BIT(12)
#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_TSO_CTX BIT(13)
#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_CTX BIT(14)
#define VIRTCHNL2_TXDID_DESC_DONE BIT(15)
/* VIRTCHNL2_RX_DESC_IDS
* Receive descriptor IDs (range from 0 to 63)
*/
#define VIRTCHNL2_RXDID_0_16B_BASE 0
#define VIRTCHNL2_RXDID_1_32B_BASE 1
/* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be
* differentiated based on queue model; e.g. single queue model can
* only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ
* for DID 2.
*/
#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ 2
#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC 2
#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW 3
#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB 4
#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL 5
#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2 6
#define VIRTCHNL2_RXDID_7_HW_RSVD 7
/* 9 through 15 are reserved */
#define VIRTCHNL2_RXDID_16_COMMS_GENERIC 16
#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN 17
#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4 18
#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6 19
#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW 20
#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP 21
/* 22 through 63 are reserved */
/* VIRTCHNL2_RX_DESC_ID_BITMASKS
* Receive descriptor ID bitmasks
*/
#define VIRTCHNL2_RXDID_0_16B_BASE_M BIT(VIRTCHNL2_RXDID_0_16B_BASE)
#define VIRTCHNL2_RXDID_1_32B_BASE_M BIT(VIRTCHNL2_RXDID_1_32B_BASE)
#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M BIT(VIRTCHNL2_RXDID_2_FLEX_SPLITQ)
#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M BIT(VIRTCHNL2_RXDID_2_FLEX_SQ_NIC)
#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW_M BIT(VIRTCHNL2_RXDID_3_FLEX_SQ_SW)
#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB_M BIT(VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB)
#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL_M BIT(VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL)
#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2_M BIT(VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2)
#define VIRTCHNL2_RXDID_7_HW_RSVD_M BIT(VIRTCHNL2_RXDID_7_HW_RSVD)
/* 9 through 15 are reserved */
#define VIRTCHNL2_RXDID_16_COMMS_GENERIC_M BIT(VIRTCHNL2_RXDID_16_COMMS_GENERIC)
#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN_M BIT(VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN)
#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4_M BIT(VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4)
#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6_M BIT(VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6)
#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW_M BIT(VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW)
#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP_M BIT(VIRTCHNL2_RXDID_21_COMMS_AUX_TCP)
/* 22 through 63 are reserved */
/* Rx */
/* For splitq virtchnl2_rx_flex_desc_adv desc members */
#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S 0
#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M \
MAKEMASK(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S 0
#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S 10
#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M \
MAKEMASK(0x3UL, VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S 12
#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M \
MAKEMASK(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S 0
#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M \
MAKEMASK(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S 14
#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S 0
#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M \
MAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S 10
#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S 11
#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 12
#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M \
MAKEMASK(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M)
#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15
#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \
BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)
/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW1_BITS
* for splitq virtchnl2_rx_flex_desc_adv
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_S 0
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_S 1
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_S 2
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S 3
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S 4
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S 5
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S 6
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S 7
/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW0_BITS
* for splitq virtchnl2_rx_flex_desc_adv
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_S 0
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S 1
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_S 2
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_S 3
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S 4
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S 5
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S 6
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S 7
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LAST 8 /* this entry must be last!!! */
/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_1_BITS
* for splitq virtchnl2_rx_flex_desc_adv
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_S 0 /* 2 bits */
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S 2
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S 3
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S 4
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S 5
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S 6
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S 7
#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_LAST 8 /* this entry must be last!!! */
/* for singleq (flex) virtchnl2_rx_flex_desc fields */
/* for virtchnl2_rx_flex_desc.ptype_flex_flags0 member */
#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_S 0
#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_PTYPE_S) /* 10 bits */
/* for virtchnl2_rx_flex_desc.pkt_length member */
#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S 0
#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M \
MAKEMASK(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S) /* 14 bits */
/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_0_BITS
* for singleq (flex) virtchnl2_rx_flex_desc
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S 0
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_S 1
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_S 2
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S 3
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S 4
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S 5
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S 6
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S 7
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_S 8
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_S 9
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_S 10
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_S 11
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_S 12
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_S 13
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S 14
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S 15
#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LAST 16 /* this entry must be last!!! */
/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_1_BITS
* for singleq (flex) virtchnl2_rx_flex_desc
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_S 0 /* 4 bits */
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_S 4
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_S 5
/* [10:6] reserved */
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_S 11
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S 12
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S 13
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S 14
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S 15
#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_LAST 16 /* this entry must be last!!! */
/* for virtchnl2_rx_flex_desc.ts_low member */
#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID BIT(0)
/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S 63
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_M \
BIT_ULL(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S)
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S 52
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_M \
MAKEMASK(0x7FFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S)
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S 38
#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M \
MAKEMASK(0x3FFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S)
#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S 30
#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M \
MAKEMASK(0xFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S)
#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S 19
#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M \
MAKEMASK(0xFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S)
#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S 0
#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M \
MAKEMASK(0x7FFFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S)
/* VIRTCHNL2_RX_BASE_DESC_STATUS_BITS
* for singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_BASE_DESC_STATUS_DD_S 0
#define VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_S 1
#define VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_S 2
#define VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_S 3
#define VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_S 4
#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_S 5 /* 3 bits */
#define VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_S 8
#define VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_S 9 /* 2 bits */
#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_S 11
#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_S 12 /* 2 bits */
#define VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_S 14
#define VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_S 15
#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_S 16 /* 2 bits */
#define VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_S 18
#define VIRTCHNL2_RX_BASE_DESC_STATUS_LAST 19 /* this entry must be last!!! */
/* VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_BITS
* for singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S 0
/* VIRTCHNL2_RX_BASE_DESC_ERROR_BITS
* for singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_S 0
#define VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_S 1
#define VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_S 2
#define VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_S 3 /* 3 bits */
#define VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_S 3
#define VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_S 4
#define VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_S 5
#define VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_S 6
#define VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_S 7
/* VIRTCHNL2_RX_BASE_DESC_FLTSTAT_VALUES
* for singleq (base) virtchnl2_rx_base_desc
* Note: These are predefined bit offsets
*/
#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_NO_DATA 0
#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_FD_ID 1
#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSV 2
#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH 3
/* Receive Descriptors */
/* splitq buf
* | 16| 0|
* ----------------------------------------------------------------
* | RSV | Buffer ID |
* ----------------------------------------------------------------
* | Rx packet buffer address |
* ----------------------------------------------------------------
* | Rx header buffer address |
* ----------------------------------------------------------------
* | RSV |
* ----------------------------------------------------------------
* | 0|
*/
struct virtchnl2_splitq_rx_buf_desc {
struct {
__le16 buf_id; /* Buffer Identifier */
__le16 rsvd0;
__le32 rsvd1;
} qword0;
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 rsvd2;
}; /* read used with buffer queues*/
/* singleq buf
* | 0|
* ----------------------------------------------------------------
* | Rx packet buffer address |
* ----------------------------------------------------------------
* | Rx header buffer address |
* ----------------------------------------------------------------
* | RSV |
* ----------------------------------------------------------------
* | RSV |
* ----------------------------------------------------------------
* | 0|
*/
struct virtchnl2_singleq_rx_buf_desc {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 rsvd1;
__le64 rsvd2;
}; /* read used with buffer queues*/
union virtchnl2_rx_buf_desc {
struct virtchnl2_singleq_rx_buf_desc read;
struct virtchnl2_splitq_rx_buf_desc split_rd;
};
/* (0x00) singleq wb(compl) */
struct virtchnl2_singleq_base_rx_desc {
struct {
struct {
__le16 mirroring_status;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fd_id; /* Flow Director filter id */
} hi_dword;
} qword0;
struct {
/* status/error/PTYPE/length */
__le64 status_error_ptype_len;
} qword1;
struct {
__le16 ext_status; /* extended status */
__le16 rsvd;
__le16 l2tag2_1;
__le16 l2tag2_2;
} qword2;
struct {
__le32 reserved;
__le32 fd_id;
} qword3;
}; /* writeback */
/* (0x01) singleq flex compl */
struct virtchnl2_rx_flex_desc {
/* Qword 0 */
u8 rxdid; /* descriptor builder profile id */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
/* sph=[11:11] */
/* ff1/ext=[15:12] */
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 flex_meta0;
__le16 flex_meta1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 time_stamp_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 flex_meta2;
__le16 flex_meta3;
union {
struct {
__le16 flex_meta4;
__le16 flex_meta5;
} flex;
__le32 ts_high;
} flex_ts;
};
/* (0x02) */
struct virtchnl2_rx_flex_desc_nic {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le32 rss_hash;
/* Qword 2 */
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 flow_id;
union {
struct {
__le16 rsvd;
__le16 flow_id_ipv6;
} flex;
__le32 ts_high;
} flex_ts;
};
/* Rx Flex Descriptor Switch Profile
* RxDID Profile Id 3
* Flex-field 0: Source Vsi
*/
struct virtchnl2_rx_flex_desc_sw {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 src_vsi; /* [10:15] are reserved */
__le16 flex_md1_rsvd;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 rsvd; /* flex words 2-3 are reserved */
__le32 ts_high;
};
/* Rx Flex Descriptor NIC Profile
* RxDID Profile Id 6
* Flex-field 0: RSS hash lower 16-bits
* Flex-field 1: RSS hash upper 16-bits
* Flex-field 2: Flow Id lower 16-bits
* Flex-field 3: Source Vsi
* Flex-field 4: reserved, Vlan id taken from L2Tag
*/
struct virtchnl2_rx_flex_desc_nic_2 {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le32 rss_hash;
/* Qword 2 */
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 flow_id;
__le16 src_vsi;
union {
struct {
__le16 rsvd;
__le16 flow_id_ipv6;
} flex;
__le32 ts_high;
} flex_ts;
};
/* Rx Flex Descriptor Advanced (Split Queue Model)
* RxDID Profile Id 7
*/
struct virtchnl2_rx_flex_desc_adv {
/* Qword 0 */
u8 rxdid_ucast; /* profile_id=[3:0] */
/* rsvd=[5:4] */
/* ucast=[7:6] */
u8 status_err0_qw0;
__le16 ptype_err_fflags0; /* ptype=[9:0] */
/* ip_hdr_err=[10:10] */
/* udp_len_err=[11:11] */
/* ff0=[15:12] */
__le16 pktlen_gen_bufq_id; /* plen=[13:0] */
/* gen=[14:14] only in splitq */
/* bufq_id=[15:15] only in splitq */
__le16 hdrlen_flags; /* header=[9:0] */
/* rsc=[10:10] only in splitq */
/* sph=[11:11] only in splitq */
/* ext_udp_0=[12:12] */
/* int_udp_0=[13:13] */
/* trunc_mirr=[14:14] */
/* miss_prepend=[15:15] */
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
u8 fflags1;
u8 ts_low;
__le16 fmd0;
__le16 fmd1;
/* Qword 2 */
__le16 fmd2;
u8 fflags2;
u8 hash3;
__le16 fmd3;
__le16 fmd4;
/* Qword 3 */
__le16 fmd5;
__le16 fmd6;
__le16 fmd7_0;
__le16 fmd7_1;
}; /* writeback */
/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile
* RxDID Profile Id 8
* Flex-field 0: BufferID
* Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
* Flex-field 2: Hash[15:0]
* Flex-flags 2: Hash[23:16]
* Flex-field 3: L2TAG2
* Flex-field 5: L2TAG1
* Flex-field 7: Timestamp (upper 32 bits)
*/
struct virtchnl2_rx_flex_desc_adv_nic_3 {
/* Qword 0 */
u8 rxdid_ucast; /* profile_id=[3:0] */
/* rsvd=[5:4] */
/* ucast=[7:6] */
u8 status_err0_qw0;
__le16 ptype_err_fflags0; /* ptype=[9:0] */
/* ip_hdr_err=[10:10] */
/* udp_len_err=[11:11] */
/* ff0=[15:12] */
__le16 pktlen_gen_bufq_id; /* plen=[13:0] */
/* gen=[14:14] only in splitq */
/* bufq_id=[15:15] only in splitq */
__le16 hdrlen_flags; /* header=[9:0] */
/* rsc=[10:10] only in splitq */
/* sph=[11:11] only in splitq */
/* ext_udp_0=[12:12] */
/* int_udp_0=[13:13] */
/* trunc_mirr=[14:14] */
/* miss_prepend=[15:15] */
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
u8 fflags1;
u8 ts_low;
__le16 buf_id; /* only in splitq */
union {
__le16 raw_cs;
__le16 l2tag1;
__le16 rscseglen;
} misc;
/* Qword 2 */
__le16 hash1;
union {
u8 fflags2;
u8 mirrorid;
u8 hash2;
} ff2_mirrid_hash2;
u8 hash3;
__le16 l2tag2;
__le16 fmd4;
/* Qword 3 */
__le16 l2tag1;
__le16 fmd6;
__le32 ts_high;
}; /* writeback */
union virtchnl2_rx_desc {
struct virtchnl2_singleq_rx_buf_desc read;
struct virtchnl2_singleq_base_rx_desc base_wb;
struct virtchnl2_rx_flex_desc flex_wb;
struct virtchnl2_rx_flex_desc_nic flex_nic_wb;
struct virtchnl2_rx_flex_desc_sw flex_sw_wb;
struct virtchnl2_rx_flex_desc_nic_2 flex_nic_2_wb;
struct virtchnl2_rx_flex_desc_adv flex_adv_wb;
struct virtchnl2_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb;
};
#endif /* _VIRTCHNL_LAN_DESC_H_ */

View File

@ -0,0 +1,567 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _VIRTCHNL_INLINE_IPSEC_H_
#define _VIRTCHNL_INLINE_IPSEC_H_
#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3
#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16
#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128
#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
#define VIRTCHNL_IPSEC_SA_DESTROY 0
#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF
#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF
/* crypto type */
#define VIRTCHNL_AUTH 1
#define VIRTCHNL_CIPHER 2
#define VIRTCHNL_AEAD 3
/* caps enabled */
#define VIRTCHNL_IPSEC_ESN_ENA BIT(0)
#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA BIT(1)
#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA BIT(2)
#define VIRTCHNL_IPSEC_AUDIT_ENA BIT(3)
#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA BIT(4)
#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA BIT(5)
#define VIRTCHNL_IPSEC_ARW_CHECK_ENA BIT(6)
#define VIRTCHNL_IPSEC_24BIT_SPI_ENA BIT(7)
/* algorithm type */
/* Hash Algorithm */
#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */
#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */
#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */
#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */
#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */
#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */
#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */
#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */
#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
/* Cipher Algorithm */
#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */
#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */
#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */
#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */
/* AEAD Algorithm */
#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */
#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */
#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
/* protocol type */
#define VIRTCHNL_PROTO_ESP 1
#define VIRTCHNL_PROTO_AH 2
#define VIRTCHNL_PROTO_RSVD1 3
/* sa mode */
#define VIRTCHNL_SA_MODE_TRANSPORT 1
#define VIRTCHNL_SA_MODE_TUNNEL 2
#define VIRTCHNL_SA_MODE_TRAN_TUN 3
#define VIRTCHNL_SA_MODE_UNKNOWN 4
/* sa direction */
#define VIRTCHNL_DIR_INGRESS 1
#define VIRTCHNL_DIR_EGRESS 2
#define VIRTCHNL_DIR_INGRESS_EGRESS 3
/* sa termination */
#define VIRTCHNL_TERM_SOFTWARE 1
#define VIRTCHNL_TERM_HARDWARE 2
/* sa ip type */
#define VIRTCHNL_IPV4 1
#define VIRTCHNL_IPV6 2
/* for virtchnl_ipsec_resp */
enum inline_ipsec_resp {
INLINE_IPSEC_SUCCESS = 0,
INLINE_IPSEC_FAIL = -1,
INLINE_IPSEC_ERR_FIFO_FULL = -2,
INLINE_IPSEC_ERR_NOT_READY = -3,
INLINE_IPSEC_ERR_VF_DOWN = -4,
INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
INLINE_IPSEC_ERR_NO_MEM = -6,
};
/* Detailed opcodes for DPDK and IPsec use */
enum inline_ipsec_ops {
INLINE_IPSEC_OP_GET_CAP = 0,
INLINE_IPSEC_OP_GET_STATUS = 1,
INLINE_IPSEC_OP_SA_CREATE = 2,
INLINE_IPSEC_OP_SA_UPDATE = 3,
INLINE_IPSEC_OP_SA_DESTROY = 4,
INLINE_IPSEC_OP_SP_CREATE = 5,
INLINE_IPSEC_OP_SP_DESTROY = 6,
INLINE_IPSEC_OP_SA_READ = 7,
INLINE_IPSEC_OP_EVENT = 8,
INLINE_IPSEC_OP_RESP = 9,
};
#pragma pack(1)
/* Not all valid, if certain field is invalid, set 1 for all bits */
struct virtchnl_algo_cap {
u32 algo_type;
u16 block_size;
u16 min_key_size;
u16 max_key_size;
u16 inc_key_size;
u16 min_iv_size;
u16 max_iv_size;
u16 inc_iv_size;
u16 min_digest_size;
u16 max_digest_size;
u16 inc_digest_size;
u16 min_aad_size;
u16 max_aad_size;
u16 inc_aad_size;
};
#pragma pack()
/* vf record the capability of crypto from the virtchnl */
struct virtchnl_sym_crypto_cap {
u8 crypto_type;
u8 algo_cap_num;
struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
};
/* VIRTCHNL_OP_GET_IPSEC_CAP
* VF pass virtchnl_ipsec_cap to PF
* and PF return capability of ipsec from virtchnl.
*/
#pragma pack(1)
struct virtchnl_ipsec_cap {
/* max number of SA per VF */
u16 max_sa_num;
/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
u8 virtchnl_protocol_type;
/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
u8 virtchnl_sa_mode;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 termination_mode;
/* number of supported crypto capability */
u8 crypto_cap_num;
/* descriptor ID */
u16 desc_id;
/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
u32 caps_enabled;
/* crypto capabilities */
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
};
/* configuration of crypto function */
struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;
u32 algo_type;
/* Length of valid IV data. */
u16 iv_len;
/* Length of digest */
u16 digest_len;
/* SA salt */
u32 salt;
/* The length of the symmetric key */
u16 key_len;
/* key data buffer */
u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
};
#pragma pack()
struct virtchnl_ipsec_sym_crypto_cfg {
struct virtchnl_ipsec_crypto_cfg_item
items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_CREATE
* VF send this SA configuration to PF using virtchnl;
* PF create SA as configuration and PF driver will return
* an unique index (sa_idx) for the created SA.
*/
struct virtchnl_ipsec_sa_cfg {
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* type of outer IP - IPv4/IPv6 */
u8 virtchnl_ip_type;
/* type of esn - !0:enable/0:disable */
u8 esn_enabled;
/* udp encap - !0:enable/0:disable */
u8 udp_encap_enabled;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* reserved */
u8 reserved1;
/* SA security parameter index */
u32 spi;
/* outer src ip address */
u8 src_addr[16];
/* outer dst ip address */
u8 dst_addr[16];
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* When enabled, sa_index must be valid */
u8 sa_index_en;
/* SA index when sa_index_en is true */
u32 sa_index;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When enabled, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-reply window check - enable/disable
* When enabled, arw_size must be valid.
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* no ip offload mode - enable/disable
* When enabled, ip type and address must not be valid.
*/
u8 no_ip_offload_en;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* crypto configuration */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* VIRTCHNL_OP_IPSEC_SA_UPDATE
* VF send configuration of index of SA to PF
* PF will update SA according to configuration
*/
struct virtchnl_ipsec_sa_update {
u32 sa_index; /* SA to update */
u32 esn_hi; /* high 32 bits of esn */
u32 esn_low; /* low 32 bits of esn */
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_DESTROY
* VF send configuration of index of SA to PF
* PF will destroy SA according to configuration
* flag bitmap indicate all SA or just selected SA will
* be destroyed
*/
struct virtchnl_ipsec_sa_destroy {
/* All zero bitmap indicates all SA will be destroyed.
* Non-zero bitmap indicates the selected SA in
* array sa_index will be destroyed.
*/
u8 flag;
/* selected SA index */
u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
};
/* VIRTCHNL_OP_IPSEC_SA_READ
* VF send this SA configuration to PF using virtchnl;
* PF read SA and will return configuration for the created SA.
*/
struct virtchnl_ipsec_sa_read {
/* SA valid - invalid/valid */
u8 valid;
/* SA active - inactive/active */
u8 active;
/* SA SN rollover - not_rollover/rollover */
u8 sn_rollover;
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 virtchnl_termination;
/* auditing mode - enable/disable */
u8 audit_en;
/* lifetime byte limit - enable/disable
* When set to limit, byte_limit_hard and byte_limit_soft
* must be valid.
*/
u8 byte_limit_en;
/* hard byte limit count */
u64 byte_limit_hard;
/* soft byte limit count */
u64 byte_limit_soft;
/* drop on authentication failure - enable/disable */
u8 drop_on_auth_fail_en;
/* anti-replay window check - enable/disable
* When set to check, arw_size, arw_top, and arw must be valid
*/
u8 arw_check_en;
/* size of arw window, offset by 1. Setting to 0
* represents ARW window size of 1. Setting to 127
* represents ARW window size of 128
*/
u8 arw_size;
/* reserved */
u8 reserved1;
/* top of anti-replay-window */
u64 arw_top;
/* anti-replay-window */
u8 arw[16];
/* packets processed */
u64 packets_processed;
/* bytes processed */
u64 bytes_processed;
/* packets dropped */
u32 packets_dropped;
/* authentication failures */
u32 auth_fails;
/* ARW check failures */
u32 arw_fails;
/* type of esn - enable/disable */
u8 esn;
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* SA security parameter index */
u32 spi;
/* SA salt */
u32 salt;
/* high 32 bits of esn */
u32 esn_hi;
/* low 32 bits of esn */
u32 esn_low;
/* SA Domain. Used to logical separate an SADB into groups.
* PF drivers supporting a single group ignore this field.
*/
u16 sa_domain;
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
u16 spd_ref;
/* crypto configuration. Salt and keys are set to 0 */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* Add allowlist entry in IES */
struct virtchnl_ipsec_sp_cfg {
u32 spi;
u32 dip[4];
/* Drop frame if true or redirect to QAT if false. */
u8 drop;
/* Congestion domain. For future use. */
u8 cgd;
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
/* Set TC (congestion domain) if true. For future use. */
u8 set_tc;
/* 0 for NAT-T unsupported, 1 for NAT-T supported */
u8 is_udp;
/* reserved */
u8 reserved;
/* NAT-T UDP port number. Only valid in case NAT-T supported */
u16 udp_port;
};
#pragma pack(1)
/* Delete allowlist entry in IES */
struct virtchnl_ipsec_sp_destroy {
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
u32 rule_id;
};
#pragma pack()
/* Response from IES to allowlist operations */
struct virtchnl_ipsec_sp_cfg_resp {
u32 rule_id;
};
struct virtchnl_ipsec_sa_cfg_resp {
u32 sa_handle;
};
#define INLINE_IPSEC_EVENT_RESET 0x1
#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2
#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4
struct virtchnl_ipsec_event {
u32 ipsec_event_data;
};
#define INLINE_IPSEC_STATUS_AVAILABLE 0x1
#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2
struct virtchnl_ipsec_status {
u32 status;
};
struct virtchnl_ipsec_resp {
u32 resp;
};
/* Internal message descriptor for VF <-> IPsec communication */
struct inline_ipsec_msg {
u16 ipsec_opcode;
u16 req_id;
union {
/* IPsec request */
struct virtchnl_ipsec_sa_cfg sa_cfg[0];
struct virtchnl_ipsec_sp_cfg sp_cfg[0];
struct virtchnl_ipsec_sa_update sa_update[0];
struct virtchnl_ipsec_sa_destroy sa_destroy[0];
struct virtchnl_ipsec_sp_destroy sp_destroy[0];
/* IPsec response */
struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
struct virtchnl_ipsec_cap ipsec_cap[0];
struct virtchnl_ipsec_status ipsec_status[0];
/* response to del_sa, del_sp, update_sa */
struct virtchnl_ipsec_resp ipsec_resp[0];
/* IPsec event (no req_id is required) */
struct virtchnl_ipsec_event event[0];
/* Reserved */
struct virtchnl_ipsec_sa_read sa_read[0];
} ipsec_data;
};
static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
{
u16 valid_len = sizeof(struct inline_ipsec_msg);
switch (opcode) {
case INLINE_IPSEC_OP_GET_CAP:
case INLINE_IPSEC_OP_GET_STATUS:
break;
case INLINE_IPSEC_OP_SA_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
break;
case INLINE_IPSEC_OP_SP_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
break;
case INLINE_IPSEC_OP_SA_UPDATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_update);
break;
case INLINE_IPSEC_OP_SA_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
break;
case INLINE_IPSEC_OP_SP_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
break;
/* Only for msg length calculation of response to VF in case of
* inline ipsec failure.
*/
case INLINE_IPSEC_OP_RESP:
valid_len += sizeof(struct virtchnl_ipsec_resp);
break;
default:
valid_len = 0;
break;
}
return valid_len;
}
#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2022 Intel Corporation
subdir('base')

View File

@ -0,0 +1,12 @@
INTERNAL {
global:
idpf_ctlq_deinit;
idpf_ctlq_init;
idpf_ctlq_clean_sq;
idpf_ctlq_recv;
idpf_ctlq_send;
idpf_ctlq_post_rx_buffs;
local: *;
};

View File

@ -6,6 +6,7 @@ drivers = [
'cpt',
'dpaax',
'iavf',
'idpf',
'mvep',
'octeontx',
]