net/qede/base: multi-Txq support on same queue-zone for VFs
A step toward having multi-Txq support on same queue-zone for VFs. This change takes care of: - VFs assume a single CID per-queue, where queue X receives CID X. Switch to a model similar to that of PF - I.e., Use different CIDs for Rx/Tx, and use mapping to acquire/release those. Each VF currently will have 32 CIDs available for it [for its possible 16 Rx & 16 Tx queues]. - To retain the same interface for PFs/VFs when initializing queues, the base driver would have to retain a unique number per-each queue that would be communicated in some extended TLV [current TLV interface allows the PF to send only the queue-id]. The new TLV isn't part of the current change but base driver would now start adding such unique keys internally to queue_cids. This would also force us to start having alloc/setup/free for L2 [we've refrained from doing so until now] The limit would be no-more than 64 queues per qzone [This could be changed if needed, but hopefully no one needs so many queues] - In IOV, Add infrastructure for up to 64 qids per-qzone, although at the moment hard-code '0' for Rx and '1' for Tx [Since VF still isn't communicating via new TLV which index to associate with a given queue in its queue-zone]. Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
parent
eb6088c168
commit
eb8e81ad0d
@ -200,6 +200,7 @@ struct ecore_cxt_mngr;
|
||||
struct ecore_dma_mem;
|
||||
struct ecore_sb_sp_info;
|
||||
struct ecore_ll2_info;
|
||||
struct ecore_l2_info;
|
||||
struct ecore_igu_info;
|
||||
struct ecore_mcp_info;
|
||||
struct ecore_dcbx_info;
|
||||
@ -598,6 +599,9 @@ struct ecore_hwfn {
|
||||
/* If one of the following is set then EDPM shouldn't be used */
|
||||
u8 dcbx_no_edpm;
|
||||
u8 db_bar_no_edpm;
|
||||
|
||||
/* L2-related */
|
||||
struct ecore_l2_info *p_l2_info;
|
||||
};
|
||||
|
||||
#ifndef __EXTRACT__LINUX__
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include "bcm_osal.h"
|
||||
#include "reg_addr.h"
|
||||
#include "common_hsi.h"
|
||||
#include "ecore_hsi_common.h"
|
||||
#include "ecore_hsi_eth.h"
|
||||
#include "ecore_rt_defs.h"
|
||||
@ -101,7 +102,6 @@ struct ecore_tid_seg {
|
||||
|
||||
struct ecore_conn_type_cfg {
|
||||
u32 cid_count;
|
||||
u32 cid_start;
|
||||
u32 cids_per_vf;
|
||||
struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
|
||||
};
|
||||
@ -197,6 +197,9 @@ struct ecore_cxt_mngr {
|
||||
|
||||
/* Acquired CIDs */
|
||||
struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
|
||||
/* TBD - do we want this allocated to reserve space? */
|
||||
struct ecore_cid_acquired_map
|
||||
acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
|
||||
|
||||
/* ILT shadow table */
|
||||
struct ecore_dma_mem *ilt_shadow;
|
||||
@ -1015,44 +1018,75 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
|
||||
static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
u32 type;
|
||||
u32 type, vf;
|
||||
|
||||
for (type = 0; type < MAX_CONN_TYPES; type++) {
|
||||
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
|
||||
p_mngr->acquired[type].max_count = 0;
|
||||
p_mngr->acquired[type].start_cid = 0;
|
||||
|
||||
for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
|
||||
OSAL_FREE(p_hwfn->p_dev,
|
||||
p_mngr->acquired_vf[type][vf].cid_map);
|
||||
p_mngr->acquired_vf[type][vf].max_count = 0;
|
||||
p_mngr->acquired_vf[type][vf].start_cid = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static enum _ecore_status_t
|
||||
ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
|
||||
u32 cid_start, u32 cid_count,
|
||||
struct ecore_cid_acquired_map *p_map)
|
||||
{
|
||||
u32 size;
|
||||
|
||||
if (!cid_count)
|
||||
return ECORE_SUCCESS;
|
||||
|
||||
size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
|
||||
p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
|
||||
if (p_map->cid_map == OSAL_NULL)
|
||||
return ECORE_NOMEM;
|
||||
|
||||
p_map->max_count = cid_count;
|
||||
p_map->start_cid = cid_start;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
|
||||
"Type %08x start: %08x count %08x\n",
|
||||
type, p_map->start_cid, p_map->max_count);
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
u32 start_cid = 0;
|
||||
u32 type;
|
||||
u32 start_cid = 0, vf_start_cid = 0;
|
||||
u32 type, vf;
|
||||
|
||||
for (type = 0; type < MAX_CONN_TYPES; type++) {
|
||||
u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
|
||||
u32 size;
|
||||
struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
|
||||
struct ecore_cid_acquired_map *p_map;
|
||||
|
||||
if (cid_cnt == 0)
|
||||
continue;
|
||||
|
||||
size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
|
||||
p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
|
||||
GFP_KERNEL, size);
|
||||
if (!p_mngr->acquired[type].cid_map)
|
||||
/* Handle PF maps */
|
||||
p_map = &p_mngr->acquired[type];
|
||||
if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
|
||||
p_cfg->cid_count, p_map))
|
||||
goto cid_map_fail;
|
||||
|
||||
p_mngr->acquired[type].max_count = cid_cnt;
|
||||
p_mngr->acquired[type].start_cid = start_cid;
|
||||
/* Handle VF maps */
|
||||
for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
|
||||
p_map = &p_mngr->acquired_vf[type][vf];
|
||||
if (ecore_cid_map_alloc_single(p_hwfn, type,
|
||||
vf_start_cid,
|
||||
p_cfg->cids_per_vf,
|
||||
p_map))
|
||||
goto cid_map_fail;
|
||||
}
|
||||
|
||||
p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
|
||||
"Type %08x start: %08x count %08x\n",
|
||||
type, p_mngr->acquired[type].start_cid,
|
||||
p_mngr->acquired[type].max_count);
|
||||
start_cid += cid_cnt;
|
||||
start_cid += p_cfg->cid_count;
|
||||
vf_start_cid += p_cfg->cids_per_vf;
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
@ -1171,18 +1205,34 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
|
||||
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
struct ecore_cid_acquired_map *p_map;
|
||||
struct ecore_conn_type_cfg *p_cfg;
|
||||
int type;
|
||||
u32 len;
|
||||
|
||||
/* Reset acquired cids */
|
||||
for (type = 0; type < MAX_CONN_TYPES; type++) {
|
||||
u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
|
||||
u32 i;
|
||||
u32 vf;
|
||||
|
||||
if (cid_cnt == 0)
|
||||
p_cfg = &p_mngr->conn_cfg[type];
|
||||
if (p_cfg->cid_count) {
|
||||
p_map = &p_mngr->acquired[type];
|
||||
len = DIV_ROUND_UP(p_map->max_count,
|
||||
BITS_PER_MAP_WORD) *
|
||||
MAP_WORD_SIZE;
|
||||
OSAL_MEM_ZERO(p_map->cid_map, len);
|
||||
}
|
||||
|
||||
if (!p_cfg->cids_per_vf)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
|
||||
p_mngr->acquired[type].cid_map[i] = 0;
|
||||
for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
|
||||
p_map = &p_mngr->acquired_vf[type][vf];
|
||||
len = DIV_ROUND_UP(p_map->max_count,
|
||||
BITS_PER_MAP_WORD) *
|
||||
MAP_WORD_SIZE;
|
||||
OSAL_MEM_ZERO(p_map->cid_map, len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1723,93 +1773,150 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
|
||||
ecore_prs_init_pf(p_hwfn);
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
|
||||
enum protocol_type type, u32 *p_cid)
|
||||
enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
|
||||
enum protocol_type type,
|
||||
u32 *p_cid, u8 vfid)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
struct ecore_cid_acquired_map *p_map;
|
||||
u32 rel_cid;
|
||||
|
||||
if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
|
||||
if (type >= MAX_CONN_TYPES) {
|
||||
DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
|
||||
return ECORE_INVAL;
|
||||
}
|
||||
|
||||
rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
|
||||
p_mngr->acquired[type].max_count);
|
||||
if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
|
||||
DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
|
||||
return ECORE_INVAL;
|
||||
}
|
||||
|
||||
if (rel_cid >= p_mngr->acquired[type].max_count) {
|
||||
/* Determine the right map to take this CID from */
|
||||
if (vfid == ECORE_CXT_PF_CID)
|
||||
p_map = &p_mngr->acquired[type];
|
||||
else
|
||||
p_map = &p_mngr->acquired_vf[type][vfid];
|
||||
|
||||
if (p_map->cid_map == OSAL_NULL) {
|
||||
DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
|
||||
return ECORE_INVAL;
|
||||
}
|
||||
|
||||
rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
|
||||
p_map->max_count);
|
||||
|
||||
if (rel_cid >= p_map->max_count) {
|
||||
DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
|
||||
type);
|
||||
return ECORE_NORESOURCES;
|
||||
}
|
||||
|
||||
OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
|
||||
OSAL_SET_BIT(rel_cid, p_map->cid_map);
|
||||
|
||||
*p_cid = rel_cid + p_mngr->acquired[type].start_cid;
|
||||
*p_cid = rel_cid + p_map->start_cid;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
|
||||
"Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
|
||||
*p_cid, rel_cid, vfid, type);
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
|
||||
enum protocol_type type,
|
||||
u32 *p_cid)
|
||||
{
|
||||
return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
|
||||
}
|
||||
|
||||
static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
|
||||
u32 cid, enum protocol_type *p_type)
|
||||
u32 cid, u8 vfid,
|
||||
enum protocol_type *p_type,
|
||||
struct ecore_cid_acquired_map **pp_map)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
struct ecore_cid_acquired_map *p_map;
|
||||
enum protocol_type p;
|
||||
u32 rel_cid;
|
||||
|
||||
/* Iterate over protocols and find matching cid range */
|
||||
for (p = 0; p < MAX_CONN_TYPES; p++) {
|
||||
p_map = &p_mngr->acquired[p];
|
||||
for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
|
||||
if (vfid == ECORE_CXT_PF_CID)
|
||||
*pp_map = &p_mngr->acquired[*p_type];
|
||||
else
|
||||
*pp_map = &p_mngr->acquired_vf[*p_type][vfid];
|
||||
|
||||
if (!p_map->cid_map)
|
||||
if (!((*pp_map)->cid_map))
|
||||
continue;
|
||||
if (cid >= p_map->start_cid &&
|
||||
cid < p_map->start_cid + p_map->max_count) {
|
||||
if (cid >= (*pp_map)->start_cid &&
|
||||
cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
*p_type = p;
|
||||
if (*p_type == MAX_CONN_TYPES) {
|
||||
DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (p == MAX_CONN_TYPES) {
|
||||
DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
|
||||
return false;
|
||||
}
|
||||
rel_cid = cid - p_map->start_cid;
|
||||
if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
|
||||
DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
|
||||
return false;
|
||||
rel_cid = cid - (*pp_map)->start_cid;
|
||||
if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
|
||||
DP_NOTICE(p_hwfn, true,
|
||||
"CID %d [vifd %02x] not acquired", cid, vfid);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return true;
|
||||
fail:
|
||||
*p_type = MAX_CONN_TYPES;
|
||||
*pp_map = OSAL_NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
|
||||
void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
struct ecore_cid_acquired_map *p_map = OSAL_NULL;
|
||||
enum protocol_type type;
|
||||
bool b_acquired;
|
||||
u32 rel_cid;
|
||||
|
||||
if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
|
||||
DP_NOTICE(p_hwfn, true,
|
||||
"Trying to return incorrect CID belonging to VF %02x\n",
|
||||
vfid);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Test acquired and find matching per-protocol map */
|
||||
b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
|
||||
b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
|
||||
&type, &p_map);
|
||||
|
||||
if (!b_acquired)
|
||||
return;
|
||||
|
||||
rel_cid = cid - p_mngr->acquired[type].start_cid;
|
||||
OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
|
||||
rel_cid = cid - p_map->start_cid;
|
||||
OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
|
||||
"Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
|
||||
cid, rel_cid, vfid, type);
|
||||
}
|
||||
|
||||
void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
|
||||
{
|
||||
_ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_cxt_info *p_info)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
struct ecore_cid_acquired_map *p_map = OSAL_NULL;
|
||||
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
|
||||
enum protocol_type type;
|
||||
bool b_acquired;
|
||||
|
||||
/* Test acquired and find matching per-protocol map */
|
||||
b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
|
||||
b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
|
||||
ECORE_CXT_PF_CID,
|
||||
&type, &p_map);
|
||||
|
||||
if (!b_acquired)
|
||||
return ECORE_INVAL;
|
||||
@ -1865,9 +1972,14 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
|
||||
struct ecore_eth_pf_params *p_params =
|
||||
&p_hwfn->pf_params.eth_pf_params;
|
||||
|
||||
/* TODO - we probably want to add VF number to the PF
|
||||
* params;
|
||||
* As of now, allocates 16 * 2 per-VF [to retain regular
|
||||
* functionality].
|
||||
*/
|
||||
ecore_cxt_set_proto_cid_count(p_hwfn,
|
||||
PROTOCOLID_ETH,
|
||||
p_params->num_cons, 1); /* FIXME VF count... */
|
||||
p_params->num_cons, 32);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -130,14 +130,53 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
|
||||
enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt);
|
||||
|
||||
#define ECORE_CXT_PF_CID (0xff)
|
||||
|
||||
/**
|
||||
* @brief ecore_cxt_release - Release a cid
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param cid
|
||||
*/
|
||||
void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
|
||||
u32 cid);
|
||||
* @brief ecore_cxt_release - Release a cid
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param cid
|
||||
*/
|
||||
void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
|
||||
|
||||
/**
|
||||
* @brief ecore_cxt_release - Release a cid belonging to a vf-queue
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param cid
|
||||
* @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
|
||||
*/
|
||||
void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
|
||||
u32 cid, u8 vfid);
|
||||
|
||||
/**
|
||||
* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param type
|
||||
* @param p_cid
|
||||
*
|
||||
* @return enum _ecore_status_t
|
||||
*/
|
||||
enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
|
||||
enum protocol_type type,
|
||||
u32 *p_cid);
|
||||
|
||||
/**
|
||||
* @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
|
||||
* for a vf-queue
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param type
|
||||
* @param p_cid
|
||||
* @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
|
||||
*
|
||||
* @return enum _ecore_status_t
|
||||
*/
|
||||
enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
|
||||
enum protocol_type type,
|
||||
u32 *p_cid, u8 vfid);
|
||||
|
||||
/**
|
||||
* @brief ecore_cxt_get_tid_mem_info - function checks if the
|
||||
|
@ -25,19 +25,6 @@ struct ecore_tid_mem {
|
||||
u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param type
|
||||
* @param p_cid
|
||||
*
|
||||
* @return enum _ecore_status_t
|
||||
*/
|
||||
enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
|
||||
enum protocol_type type,
|
||||
u32 *p_cid);
|
||||
|
||||
/**
|
||||
* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
|
||||
*
|
||||
|
@ -146,8 +146,11 @@ void ecore_resc_free(struct ecore_dev *p_dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (IS_VF(p_dev))
|
||||
if (IS_VF(p_dev)) {
|
||||
for_each_hwfn(p_dev, i)
|
||||
ecore_l2_free(&p_dev->hwfns[i]);
|
||||
return;
|
||||
}
|
||||
|
||||
OSAL_FREE(p_dev, p_dev->fw_data);
|
||||
|
||||
@ -163,6 +166,7 @@ void ecore_resc_free(struct ecore_dev *p_dev)
|
||||
ecore_consq_free(p_hwfn);
|
||||
ecore_int_free(p_hwfn);
|
||||
ecore_iov_free(p_hwfn);
|
||||
ecore_l2_free(p_hwfn);
|
||||
ecore_dmae_info_free(p_hwfn);
|
||||
ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
|
||||
/* @@@TBD Flush work-queue ? */
|
||||
@ -839,8 +843,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
int i;
|
||||
|
||||
if (IS_VF(p_dev))
|
||||
if (IS_VF(p_dev)) {
|
||||
for_each_hwfn(p_dev, i) {
|
||||
rc = ecore_l2_alloc(&p_dev->hwfns[i]);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
|
||||
sizeof(*p_dev->fw_data));
|
||||
@ -961,6 +971,10 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
|
||||
if (rc)
|
||||
goto alloc_err;
|
||||
|
||||
rc = ecore_l2_alloc(p_hwfn);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
goto alloc_err;
|
||||
|
||||
/* DMA info initialization */
|
||||
rc = ecore_dmae_info_alloc(p_hwfn);
|
||||
if (rc) {
|
||||
@ -999,8 +1013,11 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (IS_VF(p_dev))
|
||||
if (IS_VF(p_dev)) {
|
||||
for_each_hwfn(p_dev, i)
|
||||
ecore_l2_setup(&p_dev->hwfns[i]);
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_hwfn(p_dev, i) {
|
||||
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
|
||||
@ -1018,6 +1035,7 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
|
||||
|
||||
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
|
||||
|
||||
ecore_l2_setup(p_hwfn);
|
||||
ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
|
||||
}
|
||||
}
|
||||
|
@ -29,24 +29,172 @@
|
||||
#define ECORE_MAX_SGES_NUM 16
|
||||
#define CRC32_POLY 0x1edc6f41
|
||||
|
||||
struct ecore_l2_info {
|
||||
u32 queues;
|
||||
unsigned long **pp_qid_usage;
|
||||
|
||||
/* The lock is meant to synchronize access to the qid usage */
|
||||
osal_mutex_t lock;
|
||||
};
|
||||
|
||||
enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_l2_info *p_l2_info;
|
||||
unsigned long **pp_qids;
|
||||
u32 i;
|
||||
|
||||
if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
|
||||
return ECORE_SUCCESS;
|
||||
|
||||
p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
|
||||
if (!p_l2_info)
|
||||
return ECORE_NOMEM;
|
||||
p_hwfn->p_l2_info = p_l2_info;
|
||||
|
||||
if (IS_PF(p_hwfn->p_dev)) {
|
||||
p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
|
||||
} else {
|
||||
u8 rx = 0, tx = 0;
|
||||
|
||||
ecore_vf_get_num_rxqs(p_hwfn, &rx);
|
||||
ecore_vf_get_num_txqs(p_hwfn, &tx);
|
||||
|
||||
p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
|
||||
}
|
||||
|
||||
pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
|
||||
sizeof(unsigned long *) *
|
||||
p_l2_info->queues);
|
||||
if (pp_qids == OSAL_NULL)
|
||||
return ECORE_NOMEM;
|
||||
p_l2_info->pp_qid_usage = pp_qids;
|
||||
|
||||
for (i = 0; i < p_l2_info->queues; i++) {
|
||||
pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
|
||||
MAX_QUEUES_PER_QZONE / 8);
|
||||
if (pp_qids[i] == OSAL_NULL)
|
||||
return ECORE_NOMEM;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ECORE_LOCK_ALLOC
|
||||
OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
|
||||
#endif
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
|
||||
return;
|
||||
|
||||
OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
|
||||
}
|
||||
|
||||
void ecore_l2_free(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
|
||||
return;
|
||||
|
||||
if (p_hwfn->p_l2_info == OSAL_NULL)
|
||||
return;
|
||||
|
||||
if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
|
||||
goto out_l2_info;
|
||||
|
||||
/* Free until hit first uninitialized entry */
|
||||
for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
|
||||
if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
|
||||
break;
|
||||
OSAL_VFREE(p_hwfn->p_dev,
|
||||
p_hwfn->p_l2_info->pp_qid_usage[i]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ECORE_LOCK_ALLOC
|
||||
/* Lock is last to initialize, if everything else was */
|
||||
if (i == p_hwfn->p_l2_info->queues)
|
||||
OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
|
||||
#endif
|
||||
|
||||
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
|
||||
|
||||
out_l2_info:
|
||||
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
|
||||
p_hwfn->p_l2_info = OSAL_NULL;
|
||||
}
|
||||
|
||||
/* TODO - we'll need locking around these... */
|
||||
static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_queue_cid *p_cid)
|
||||
{
|
||||
struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
|
||||
u16 queue_id = p_cid->rel.queue_id;
|
||||
bool b_rc = true;
|
||||
u8 first;
|
||||
|
||||
OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
|
||||
|
||||
if (queue_id > p_l2_info->queues) {
|
||||
DP_NOTICE(p_hwfn, true,
|
||||
"Requested to increase usage for qzone %04x out of %08x\n",
|
||||
queue_id, p_l2_info->queues);
|
||||
b_rc = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
|
||||
MAX_QUEUES_PER_QZONE);
|
||||
if (first >= MAX_QUEUES_PER_QZONE) {
|
||||
b_rc = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
|
||||
p_cid->qid_usage_idx = first;
|
||||
|
||||
out:
|
||||
OSAL_MUTEX_RELEASE(&p_l2_info->lock);
|
||||
return b_rc;
|
||||
}
|
||||
|
||||
static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_queue_cid *p_cid)
|
||||
{
|
||||
OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
|
||||
|
||||
OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
|
||||
p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
|
||||
|
||||
OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
|
||||
}
|
||||
|
||||
void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_queue_cid *p_cid)
|
||||
{
|
||||
/* For VF-queues, stuff is a bit complicated as:
|
||||
* - They always maintain the qid_usage on their own.
|
||||
* - In legacy mode, they also maintain their CIDs.
|
||||
*/
|
||||
|
||||
/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
|
||||
if (!p_cid->is_vf && IS_PF(p_hwfn->p_dev))
|
||||
ecore_cxt_release_cid(p_hwfn, p_cid->cid);
|
||||
if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
|
||||
_ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
|
||||
if (!p_cid->b_legacy_vf)
|
||||
ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
|
||||
OSAL_VFREE(p_hwfn->p_dev, p_cid);
|
||||
}
|
||||
|
||||
/* The internal is only meant to be directly called by PFs initializeing CIDs
|
||||
* for their VFs.
|
||||
*/
|
||||
struct ecore_queue_cid *
|
||||
static struct ecore_queue_cid *
|
||||
_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
|
||||
u16 opaque_fid, u32 cid, u8 vf_qid,
|
||||
struct ecore_queue_start_common_params *p_params)
|
||||
u16 opaque_fid, u32 cid,
|
||||
struct ecore_queue_start_common_params *p_params,
|
||||
struct ecore_queue_cid_vf_params *p_vf_params)
|
||||
{
|
||||
bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
|
||||
struct ecore_queue_cid *p_cid;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
@ -56,13 +204,22 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
p_cid->opaque_fid = opaque_fid;
|
||||
p_cid->cid = cid;
|
||||
p_cid->vf_qid = vf_qid;
|
||||
p_cid->rel = *p_params;
|
||||
p_cid->p_owner = p_hwfn;
|
||||
|
||||
/* Fill-in bits related to VFs' queues if information was provided */
|
||||
if (p_vf_params != OSAL_NULL) {
|
||||
p_cid->vfid = p_vf_params->vfid;
|
||||
p_cid->vf_qid = p_vf_params->vf_qid;
|
||||
p_cid->b_legacy_vf = p_vf_params->b_legacy;
|
||||
} else {
|
||||
p_cid->vfid = ECORE_QUEUE_CID_PF;
|
||||
}
|
||||
|
||||
/* Don't try calculating the absolute indices for VFs */
|
||||
if (IS_VF(p_hwfn->p_dev)) {
|
||||
p_cid->abs = p_cid->rel;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -82,7 +239,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
|
||||
/* In case of a PF configuring its VF's queues, the stats-id is already
|
||||
* absolute [since there's a single index that's suitable per-VF].
|
||||
*/
|
||||
if (b_is_same) {
|
||||
if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
|
||||
rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
|
||||
&p_cid->abs.stats_id);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
@ -95,17 +252,23 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
|
||||
p_cid->abs.sb = p_cid->rel.sb;
|
||||
p_cid->abs.sb_idx = p_cid->rel.sb_idx;
|
||||
|
||||
/* This is tricky - we're actually interested in whehter this is a PF
|
||||
* entry meant for the VF.
|
||||
*/
|
||||
if (!b_is_same)
|
||||
p_cid->is_vf = true;
|
||||
out:
|
||||
/* VF-images have provided the qid_usage_idx on their own.
|
||||
* Otherwise, we need to allocate a unique one.
|
||||
*/
|
||||
if (!p_vf_params) {
|
||||
if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
|
||||
goto fail;
|
||||
} else {
|
||||
p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
|
||||
}
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
|
||||
"opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
|
||||
"opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
|
||||
p_cid->opaque_fid, p_cid->cid,
|
||||
p_cid->rel.vport_id, p_cid->abs.vport_id,
|
||||
p_cid->rel.queue_id, p_cid->abs.queue_id,
|
||||
p_cid->rel.queue_id, p_cid->qid_usage_idx,
|
||||
p_cid->abs.queue_id,
|
||||
p_cid->rel.stats_id, p_cid->abs.stats_id,
|
||||
p_cid->abs.sb, p_cid->abs.sb_idx);
|
||||
|
||||
@ -116,33 +279,56 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
|
||||
return OSAL_NULL;
|
||||
}
|
||||
|
||||
static struct ecore_queue_cid *
|
||||
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
struct ecore_queue_start_common_params *p_params)
|
||||
struct ecore_queue_cid *
|
||||
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
|
||||
struct ecore_queue_start_common_params *p_params,
|
||||
struct ecore_queue_cid_vf_params *p_vf_params)
|
||||
{
|
||||
struct ecore_queue_cid *p_cid;
|
||||
u8 vfid = ECORE_CXT_PF_CID;
|
||||
bool b_legacy_vf = false;
|
||||
u32 cid = 0;
|
||||
|
||||
/* In case of legacy VFs, The CID can be derived from the additional
|
||||
* VF parameters - the VF assumes queue X uses CID X, so we can simply
|
||||
* use the vf_qid for this purpose as well.
|
||||
*/
|
||||
if (p_vf_params) {
|
||||
vfid = p_vf_params->vfid;
|
||||
|
||||
if (p_vf_params->b_legacy) {
|
||||
b_legacy_vf = true;
|
||||
cid = p_vf_params->vf_qid;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get a unique firmware CID for this queue, in case it's a PF.
|
||||
* VF's don't need a CID as the queue configuration will be done
|
||||
* by PF.
|
||||
*/
|
||||
if (IS_PF(p_hwfn->p_dev)) {
|
||||
if (ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
|
||||
&cid) != ECORE_SUCCESS) {
|
||||
if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
|
||||
if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
|
||||
&cid, vfid) != ECORE_SUCCESS) {
|
||||
DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
|
||||
return OSAL_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
|
||||
if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev))
|
||||
ecore_cxt_release_cid(p_hwfn, cid);
|
||||
p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
|
||||
p_params, p_vf_params);
|
||||
if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
|
||||
_ecore_cxt_release_cid(p_hwfn, cid, vfid);
|
||||
|
||||
return p_cid;
|
||||
}
|
||||
|
||||
static struct ecore_queue_cid *
|
||||
ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
|
||||
struct ecore_queue_start_common_params *p_params)
|
||||
{
|
||||
return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
|
||||
}
|
||||
|
||||
enum _ecore_status_t
|
||||
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_sp_vport_start_params *p_params)
|
||||
@ -741,7 +927,7 @@ ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
|
||||
p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
|
||||
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
|
||||
|
||||
if (p_cid->is_vf) {
|
||||
if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
|
||||
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
|
||||
"Queue%s is meant for VF rxq[%02x]\n",
|
||||
@ -793,7 +979,7 @@ ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
/* Allocate a CID for the queue */
|
||||
p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
|
||||
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
|
||||
if (p_cid == OSAL_NULL)
|
||||
return ECORE_NOMEM;
|
||||
|
||||
@ -905,9 +1091,11 @@ ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
|
||||
/* Cleaning the queue requires the completion to arrive there.
|
||||
* In addition, VFs require the answer to come as eqe to PF.
|
||||
*/
|
||||
p_ramrod->complete_cqe_flg = (!p_cid->is_vf && !b_eq_completion_only) ||
|
||||
p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
|
||||
!b_eq_completion_only) ||
|
||||
b_cqe_completion;
|
||||
p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
|
||||
p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
|
||||
b_eq_completion_only;
|
||||
|
||||
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
|
||||
}
|
||||
@ -1007,7 +1195,7 @@ ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
|
||||
struct ecore_queue_cid *p_cid;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
|
||||
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
|
||||
if (p_cid == OSAL_NULL)
|
||||
return ECORE_INVAL;
|
||||
|
||||
|
@ -15,6 +15,34 @@
|
||||
#include "ecore_spq.h"
|
||||
#include "ecore_l2_api.h"
|
||||
|
||||
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
|
||||
#define ECORE_QUEUE_CID_PF (0xff)
|
||||
|
||||
/* Additional parameters required for initialization of the queue_cid
|
||||
* and are relevant only for a PF initializing one for its VFs.
|
||||
*/
|
||||
struct ecore_queue_cid_vf_params {
|
||||
/* Should match the VF's relative index */
|
||||
u8 vfid;
|
||||
|
||||
/* 0-based queue index. Should reflect the relative qzone the
|
||||
* VF thinks is associated with it [in its range].
|
||||
*/
|
||||
u8 vf_qid;
|
||||
|
||||
/* Indicates a VF is legacy, making it differ in several things:
|
||||
* - Producers would be placed in a different place.
|
||||
* - Makes assumptions regarding the CIDs.
|
||||
*/
|
||||
bool b_legacy;
|
||||
|
||||
/* For VFs, this index arrives via TLV to diffrentiate between
|
||||
* different queues opened on the same qzone, and is passed
|
||||
* [where the PF would have allocated it internally for its own].
|
||||
*/
|
||||
u8 qid_usage_idx;
|
||||
};
|
||||
|
||||
struct ecore_queue_cid {
|
||||
/* 'Relative' is a relative term ;-). Usually the indices [not counting
|
||||
* SBs] would be PF-relative, but there are some cases where that isn't
|
||||
@ -31,22 +59,32 @@ struct ecore_queue_cid {
|
||||
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
|
||||
* and not on the VF itself.
|
||||
*/
|
||||
bool is_vf;
|
||||
u8 vfid;
|
||||
u8 vf_qid;
|
||||
|
||||
/* We need an additional index to diffrentiate between queues opened
|
||||
* for same queue-zone, as VFs would have to communicate the info
|
||||
* to the PF [otherwise PF has no way to diffrentiate].
|
||||
*/
|
||||
u8 qid_usage_idx;
|
||||
|
||||
/* Legacy VFs might have Rx producer located elsewhere */
|
||||
bool b_legacy_vf;
|
||||
|
||||
struct ecore_hwfn *p_owner;
|
||||
};
|
||||
|
||||
enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn);
|
||||
void ecore_l2_setup(struct ecore_hwfn *p_hwfn);
|
||||
void ecore_l2_free(struct ecore_hwfn *p_hwfn);
|
||||
|
||||
void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_queue_cid *p_cid);
|
||||
|
||||
struct ecore_queue_cid *
|
||||
_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
|
||||
u16 opaque_fid, u32 cid, u8 vf_qid,
|
||||
struct ecore_queue_start_common_params *p_params);
|
||||
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
|
||||
struct ecore_queue_start_common_params *p_params,
|
||||
struct ecore_queue_cid_vf_params *p_vf_params);
|
||||
|
||||
enum _ecore_status_t
|
||||
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
|
||||
|
@ -192,28 +192,90 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
|
||||
return vf;
|
||||
}
|
||||
|
||||
static struct ecore_queue_cid *
|
||||
ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *p_vf,
|
||||
struct ecore_vf_queue *p_queue)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
|
||||
if (p_queue->cids[i].p_cid &&
|
||||
!p_queue->cids[i].b_is_tx)
|
||||
return p_queue->cids[i].p_cid;
|
||||
}
|
||||
|
||||
return OSAL_NULL;
|
||||
}
|
||||
|
||||
enum ecore_iov_validate_q_mode {
|
||||
ECORE_IOV_VALIDATE_Q_NA,
|
||||
ECORE_IOV_VALIDATE_Q_ENABLE,
|
||||
ECORE_IOV_VALIDATE_Q_DISABLE,
|
||||
};
|
||||
|
||||
static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *p_vf,
|
||||
u16 qid,
|
||||
enum ecore_iov_validate_q_mode mode,
|
||||
bool b_is_tx)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (mode == ECORE_IOV_VALIDATE_Q_NA)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
|
||||
struct ecore_vf_queue_cid *p_qcid;
|
||||
|
||||
p_qcid = &p_vf->vf_queues[qid].cids[i];
|
||||
|
||||
if (p_qcid->p_cid == OSAL_NULL)
|
||||
continue;
|
||||
|
||||
if (p_qcid->b_is_tx != b_is_tx)
|
||||
continue;
|
||||
|
||||
/* Found. It's enabled. */
|
||||
return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
|
||||
}
|
||||
|
||||
/* In case we haven't found any valid cid, then its disabled */
|
||||
return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
|
||||
}
|
||||
|
||||
static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *p_vf,
|
||||
u16 rx_qid)
|
||||
u16 rx_qid,
|
||||
enum ecore_iov_validate_q_mode mode)
|
||||
{
|
||||
if (rx_qid >= p_vf->num_rxqs)
|
||||
if (rx_qid >= p_vf->num_rxqs) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[0x%02x] - can't touch Rx queue[%04x];"
|
||||
" Only 0x%04x are allocated\n",
|
||||
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
|
||||
return rx_qid < p_vf->num_rxqs;
|
||||
return false;
|
||||
}
|
||||
|
||||
return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
|
||||
mode, false);
|
||||
}
|
||||
|
||||
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *p_vf,
|
||||
u16 tx_qid)
|
||||
u16 tx_qid,
|
||||
enum ecore_iov_validate_q_mode mode)
|
||||
{
|
||||
if (tx_qid >= p_vf->num_txqs)
|
||||
if (tx_qid >= p_vf->num_txqs) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[0x%02x] - can't touch Tx queue[%04x];"
|
||||
" Only 0x%04x are allocated\n",
|
||||
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
|
||||
return tx_qid < p_vf->num_txqs;
|
||||
return false;
|
||||
}
|
||||
|
||||
return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
|
||||
mode, true);
|
||||
}
|
||||
|
||||
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
|
||||
@ -234,13 +296,16 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Is there at least 1 queue open? */
|
||||
static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *p_vf)
|
||||
{
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < p_vf->num_rxqs; i++)
|
||||
if (p_vf->vf_queues[i].p_rx_cid)
|
||||
if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
|
||||
ECORE_IOV_VALIDATE_Q_ENABLE,
|
||||
false))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -251,8 +316,10 @@ static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
|
||||
{
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < p_vf->num_rxqs; i++)
|
||||
if (p_vf->vf_queues[i].p_tx_cid)
|
||||
for (i = 0; i < p_vf->num_txqs; i++)
|
||||
if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
|
||||
ECORE_IOV_VALIDATE_Q_ENABLE,
|
||||
true))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -1095,19 +1162,15 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
|
||||
vf->num_txqs = num_of_vf_available_chains;
|
||||
|
||||
for (i = 0; i < vf->num_rxqs; i++) {
|
||||
struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
|
||||
struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
|
||||
|
||||
p_queue->fw_rx_qid = p_params->req_rx_queue[i];
|
||||
p_queue->fw_tx_qid = p_params->req_tx_queue[i];
|
||||
|
||||
/* CIDs are per-VF, so no problem having them 0-based. */
|
||||
p_queue->fw_cid = i;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
|
||||
"VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
|
||||
vf->relative_vf_id, i, vf->igu_sbs[i],
|
||||
p_queue->fw_rx_qid, p_queue->fw_tx_qid,
|
||||
p_queue->fw_cid);
|
||||
p_queue->fw_rx_qid, p_queue->fw_tx_qid);
|
||||
}
|
||||
|
||||
/* Update the link configuration in bulletin.
|
||||
@ -1443,7 +1506,7 @@ struct ecore_public_vf_info
|
||||
static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *p_vf)
|
||||
{
|
||||
u32 i;
|
||||
u32 i, j;
|
||||
p_vf->vf_bulletin = 0;
|
||||
p_vf->vport_instance = 0;
|
||||
p_vf->configured_features = 0;
|
||||
@ -1455,18 +1518,15 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
|
||||
p_vf->num_active_rxqs = 0;
|
||||
|
||||
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
|
||||
struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
|
||||
struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
|
||||
|
||||
if (p_queue->p_rx_cid) {
|
||||
ecore_eth_queue_cid_release(p_hwfn,
|
||||
p_queue->p_rx_cid);
|
||||
p_queue->p_rx_cid = OSAL_NULL;
|
||||
}
|
||||
for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
|
||||
if (!p_queue->cids[j].p_cid)
|
||||
continue;
|
||||
|
||||
if (p_queue->p_tx_cid) {
|
||||
ecore_eth_queue_cid_release(p_hwfn,
|
||||
p_queue->p_tx_cid);
|
||||
p_queue->p_tx_cid = OSAL_NULL;
|
||||
p_queue->cids[j].p_cid);
|
||||
p_queue->cids[j].p_cid = OSAL_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1481,7 +1541,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
|
||||
struct vf_pf_resc_request *p_req,
|
||||
struct pf_vf_resc *p_resp)
|
||||
{
|
||||
int i;
|
||||
u8 i;
|
||||
|
||||
/* Queue related information */
|
||||
p_resp->num_rxqs = p_vf->num_rxqs;
|
||||
@ -1502,7 +1562,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
|
||||
for (i = 0; i < p_resp->num_rxqs; i++) {
|
||||
ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
|
||||
(u16 *)&p_resp->hw_qid[i]);
|
||||
p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
|
||||
p_resp->cid[i] = i;
|
||||
}
|
||||
|
||||
/* Filter related information */
|
||||
@ -1905,9 +1965,12 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
/* Update all the Rx queues */
|
||||
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
|
||||
struct ecore_queue_cid *p_cid;
|
||||
struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
|
||||
struct ecore_queue_cid *p_cid = OSAL_NULL;
|
||||
|
||||
p_cid = p_vf->vf_queues[i].p_rx_cid;
|
||||
/* There can be at most 1 Rx queue on qzone. Find it */
|
||||
p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
|
||||
p_queue);
|
||||
if (p_cid == OSAL_NULL)
|
||||
continue;
|
||||
|
||||
@ -2113,19 +2176,32 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *vf)
|
||||
{
|
||||
struct ecore_queue_start_common_params params;
|
||||
struct ecore_queue_cid_vf_params vf_params;
|
||||
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
|
||||
u8 status = PFVF_STATUS_NO_RESOURCE;
|
||||
struct ecore_vf_q_info *p_queue;
|
||||
struct ecore_vf_queue *p_queue;
|
||||
struct vfpf_start_rxq_tlv *req;
|
||||
struct ecore_queue_cid *p_cid;
|
||||
bool b_legacy_vf = false;
|
||||
u8 qid_usage_idx;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
req = &mbx->req_virt->start_rxq;
|
||||
|
||||
if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
|
||||
if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
|
||||
ECORE_IOV_VALIDATE_Q_DISABLE) ||
|
||||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
|
||||
goto out;
|
||||
|
||||
/* Legacy VFs made assumptions on the CID their queues connected to,
|
||||
* assuming queue X used CID X.
|
||||
* TODO - need to validate that there was no official release post
|
||||
* the current legacy scheme that still made that assumption.
|
||||
*/
|
||||
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
|
||||
ETH_HSI_VER_NO_PKT_LEN_TUNN)
|
||||
b_legacy_vf = true;
|
||||
|
||||
/* Acquire a new queue-cid */
|
||||
p_queue = &vf->vf_queues[req->rx_qid];
|
||||
|
||||
@ -2136,39 +2212,42 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
|
||||
params.sb = req->hw_sb;
|
||||
params.sb_idx = req->sb_index;
|
||||
|
||||
p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
|
||||
vf->opaque_fid,
|
||||
p_queue->fw_cid,
|
||||
(u8)req->rx_qid,
|
||||
¶ms);
|
||||
if (p_queue->p_rx_cid == OSAL_NULL)
|
||||
/* TODO - set qid_usage_idx according to extended TLV. For now, use
|
||||
* '0' for Rx.
|
||||
*/
|
||||
qid_usage_idx = 0;
|
||||
|
||||
OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
|
||||
vf_params.vfid = vf->relative_vf_id;
|
||||
vf_params.vf_qid = (u8)req->rx_qid;
|
||||
vf_params.b_legacy = b_legacy_vf;
|
||||
vf_params.qid_usage_idx = qid_usage_idx;
|
||||
|
||||
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
|
||||
¶ms, &vf_params);
|
||||
if (p_cid == OSAL_NULL)
|
||||
goto out;
|
||||
|
||||
/* Legacy VFs have their Producers in a different location, which they
|
||||
* calculate on their own and clean the producer prior to this.
|
||||
*/
|
||||
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
|
||||
ETH_HSI_VER_NO_PKT_LEN_TUNN)
|
||||
b_legacy_vf = true;
|
||||
else
|
||||
if (!b_legacy_vf)
|
||||
REG_WR(p_hwfn,
|
||||
GTT_BAR0_MAP_REG_MSDM_RAM +
|
||||
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
|
||||
0);
|
||||
p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
|
||||
|
||||
|
||||
rc = ecore_eth_rxq_start_ramrod(p_hwfn,
|
||||
p_queue->p_rx_cid,
|
||||
rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
|
||||
req->bd_max_bytes,
|
||||
req->rxq_addr,
|
||||
req->cqe_pbl_addr,
|
||||
req->cqe_pbl_size);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
status = PFVF_STATUS_FAILURE;
|
||||
ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
|
||||
p_queue->p_rx_cid = OSAL_NULL;
|
||||
ecore_eth_queue_cid_release(p_hwfn, p_cid);
|
||||
} else {
|
||||
p_queue->cids[qid_usage_idx].p_cid = p_cid;
|
||||
p_queue->cids[qid_usage_idx].b_is_tx = false;
|
||||
status = PFVF_STATUS_SUCCESS;
|
||||
vf->num_active_rxqs++;
|
||||
}
|
||||
@ -2331,6 +2410,7 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
|
||||
static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
struct ecore_vf_info *p_vf,
|
||||
u32 cid,
|
||||
u8 status)
|
||||
{
|
||||
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
|
||||
@ -2359,12 +2439,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
|
||||
sizeof(struct channel_list_end_tlv));
|
||||
|
||||
/* Update the TLV with the response */
|
||||
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
|
||||
u16 qid = mbx->req_virt->start_txq.tx_qid;
|
||||
|
||||
p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
|
||||
DQ_DEMS_LEGACY);
|
||||
}
|
||||
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
|
||||
p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
|
||||
|
||||
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
|
||||
}
|
||||
@ -2374,20 +2450,34 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *vf)
|
||||
{
|
||||
struct ecore_queue_start_common_params params;
|
||||
struct ecore_queue_cid_vf_params vf_params;
|
||||
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
|
||||
u8 status = PFVF_STATUS_NO_RESOURCE;
|
||||
struct ecore_vf_q_info *p_queue;
|
||||
struct ecore_vf_queue *p_queue;
|
||||
struct vfpf_start_txq_tlv *req;
|
||||
struct ecore_queue_cid *p_cid;
|
||||
bool b_legacy_vf = false;
|
||||
u8 qid_usage_idx;
|
||||
u32 cid = 0;
|
||||
enum _ecore_status_t rc;
|
||||
u16 pq;
|
||||
|
||||
OSAL_MEMSET(¶ms, 0, sizeof(params));
|
||||
req = &mbx->req_virt->start_txq;
|
||||
|
||||
if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
|
||||
if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
|
||||
ECORE_IOV_VALIDATE_Q_NA) ||
|
||||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
|
||||
goto out;
|
||||
|
||||
/* In case this is a legacy VF - need to know to use the right cids.
|
||||
* TODO - need to validate that there was no official release post
|
||||
* the current legacy scheme that still made that assumption.
|
||||
*/
|
||||
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
|
||||
ETH_HSI_VER_NO_PKT_LEN_TUNN)
|
||||
b_legacy_vf = true;
|
||||
|
||||
/* Acquire a new queue-cid */
|
||||
p_queue = &vf->vf_queues[req->tx_qid];
|
||||
|
||||
@ -2397,29 +2487,42 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
|
||||
params.sb = req->hw_sb;
|
||||
params.sb_idx = req->sb_index;
|
||||
|
||||
p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
|
||||
vf->opaque_fid,
|
||||
p_queue->fw_cid,
|
||||
(u8)req->tx_qid,
|
||||
¶ms);
|
||||
if (p_queue->p_tx_cid == OSAL_NULL)
|
||||
/* TODO - set qid_usage_idx according to extended TLV. For now, use
|
||||
* '1' for Tx.
|
||||
*/
|
||||
qid_usage_idx = 1;
|
||||
|
||||
if (p_queue->cids[qid_usage_idx].p_cid)
|
||||
goto out;
|
||||
|
||||
OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
|
||||
vf_params.vfid = vf->relative_vf_id;
|
||||
vf_params.vf_qid = (u8)req->tx_qid;
|
||||
vf_params.b_legacy = b_legacy_vf;
|
||||
vf_params.qid_usage_idx = qid_usage_idx;
|
||||
|
||||
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
|
||||
¶ms, &vf_params);
|
||||
if (p_cid == OSAL_NULL)
|
||||
goto out;
|
||||
|
||||
pq = ecore_get_cm_pq_idx_vf(p_hwfn,
|
||||
vf->relative_vf_id);
|
||||
rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
|
||||
rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
|
||||
req->pbl_addr, req->pbl_size, pq);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
status = PFVF_STATUS_FAILURE;
|
||||
ecore_eth_queue_cid_release(p_hwfn,
|
||||
p_queue->p_tx_cid);
|
||||
p_queue->p_tx_cid = OSAL_NULL;
|
||||
ecore_eth_queue_cid_release(p_hwfn, p_cid);
|
||||
} else {
|
||||
status = PFVF_STATUS_SUCCESS;
|
||||
p_queue->cids[qid_usage_idx].p_cid = p_cid;
|
||||
p_queue->cids[qid_usage_idx].b_is_tx = true;
|
||||
cid = p_cid->cid;
|
||||
}
|
||||
|
||||
out:
|
||||
ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
|
||||
ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
|
||||
cid, status);
|
||||
}
|
||||
|
||||
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
|
||||
@ -2428,26 +2531,38 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
|
||||
u8 num_rxqs,
|
||||
bool cqe_completion)
|
||||
{
|
||||
struct ecore_vf_q_info *p_queue;
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
int qid;
|
||||
int qid, i;
|
||||
|
||||
/* TODO - improve validation [wrap around] */
|
||||
if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
|
||||
return ECORE_INVAL;
|
||||
|
||||
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
|
||||
p_queue = &vf->vf_queues[qid];
|
||||
struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
|
||||
struct ecore_queue_cid **pp_cid = OSAL_NULL;
|
||||
|
||||
if (!p_queue->p_rx_cid)
|
||||
/* There can be at most a single Rx per qzone. Find it */
|
||||
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
|
||||
if (p_queue->cids[i].p_cid &&
|
||||
!p_queue->cids[i].b_is_tx) {
|
||||
pp_cid = &p_queue->cids[i].p_cid;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (pp_cid == OSAL_NULL) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
|
||||
vf->relative_vf_id, qid);
|
||||
continue;
|
||||
}
|
||||
|
||||
rc = ecore_eth_rx_queue_stop(p_hwfn,
|
||||
p_queue->p_rx_cid,
|
||||
rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
|
||||
false, cqe_completion);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
|
||||
*pp_cid = OSAL_NULL;
|
||||
vf->num_active_rxqs--;
|
||||
}
|
||||
|
||||
@ -2459,24 +2574,33 @@ static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
|
||||
u16 txq_id, u8 num_txqs)
|
||||
{
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
struct ecore_vf_q_info *p_queue;
|
||||
int qid;
|
||||
struct ecore_vf_queue *p_queue;
|
||||
int qid, j;
|
||||
|
||||
if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
|
||||
if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
|
||||
ECORE_IOV_VALIDATE_Q_NA) ||
|
||||
!ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
|
||||
ECORE_IOV_VALIDATE_Q_NA))
|
||||
return ECORE_INVAL;
|
||||
|
||||
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
|
||||
p_queue = &vf->vf_queues[qid];
|
||||
if (!p_queue->p_tx_cid)
|
||||
continue;
|
||||
for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
|
||||
if (p_queue->cids[j].p_cid == OSAL_NULL)
|
||||
continue;
|
||||
|
||||
rc = ecore_eth_tx_queue_stop(p_hwfn,
|
||||
p_queue->p_tx_cid);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
if (!p_queue->cids[j].b_is_tx)
|
||||
continue;
|
||||
|
||||
p_queue->p_tx_cid = OSAL_NULL;
|
||||
rc = ecore_eth_tx_queue_stop(p_hwfn,
|
||||
p_queue->cids[j].p_cid);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
p_queue->cids[j].p_cid = OSAL_NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2538,33 +2662,32 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
|
||||
u8 status = PFVF_STATUS_FAILURE;
|
||||
u8 complete_event_flg;
|
||||
u8 complete_cqe_flg;
|
||||
u16 qid;
|
||||
enum _ecore_status_t rc;
|
||||
u8 i;
|
||||
u16 i;
|
||||
|
||||
req = &mbx->req_virt->update_rxq;
|
||||
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
|
||||
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
|
||||
|
||||
/* Validaute inputs */
|
||||
if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
|
||||
!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
|
||||
DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
|
||||
vf->relative_vf_id, req->rx_qid, req->num_rxqs);
|
||||
goto out;
|
||||
/* Validate inputs */
|
||||
for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
|
||||
if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
|
||||
ECORE_IOV_VALIDATE_Q_ENABLE)) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
|
||||
vf->relative_vf_id, req->rx_qid,
|
||||
req->num_rxqs);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < req->num_rxqs; i++) {
|
||||
qid = req->rx_qid + i;
|
||||
struct ecore_vf_queue *p_queue;
|
||||
u16 qid = req->rx_qid + i;
|
||||
|
||||
if (!vf->vf_queues[qid].p_rx_cid) {
|
||||
DP_INFO(p_hwfn,
|
||||
"VF[%d] rx_qid = %d isn`t active!\n",
|
||||
vf->relative_vf_id, qid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
handlers[i] = vf->vf_queues[qid].p_rx_cid;
|
||||
p_queue = &vf->vf_queues[qid];
|
||||
handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
|
||||
p_queue);
|
||||
}
|
||||
|
||||
rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
|
||||
@ -2796,8 +2919,11 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
|
||||
(1 << p_rss_tlv->rss_table_size_log));
|
||||
|
||||
for (i = 0; i < table_size; i++) {
|
||||
struct ecore_queue_cid *p_cid;
|
||||
|
||||
q_idx = p_rss_tlv->rss_ind_table[i];
|
||||
if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx)) {
|
||||
if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
|
||||
ECORE_IOV_VALIDATE_Q_ENABLE)) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%d]: Omitting RSS due to wrong queue %04x\n",
|
||||
vf->relative_vf_id, q_idx);
|
||||
@ -2805,15 +2931,9 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!vf->vf_queues[q_idx].p_rx_cid) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%d]: Omitting RSS due to inactive queue %08x\n",
|
||||
vf->relative_vf_id, q_idx);
|
||||
b_reject = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
|
||||
p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
|
||||
&vf->vf_queues[q_idx]);
|
||||
p_rss->rss_ind_table[i] = p_cid;
|
||||
}
|
||||
|
||||
p_data->rss_params = p_rss;
|
||||
@ -3272,22 +3392,26 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
|
||||
u8 status = PFVF_STATUS_FAILURE;
|
||||
struct ecore_queue_cid *p_cid;
|
||||
u16 rx_coal, tx_coal;
|
||||
u16 qid;
|
||||
u16 qid;
|
||||
int i;
|
||||
|
||||
req = &mbx->req_virt->update_coalesce;
|
||||
|
||||
rx_coal = req->rx_coal;
|
||||
tx_coal = req->tx_coal;
|
||||
qid = req->qid;
|
||||
p_cid = vf->vf_queues[qid].p_rx_cid;
|
||||
|
||||
if (!ecore_iov_validate_rxq(p_hwfn, vf, qid)) {
|
||||
if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
|
||||
ECORE_IOV_VALIDATE_Q_ENABLE) &&
|
||||
rx_coal) {
|
||||
DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
|
||||
vf->abs_vf_id, qid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ecore_iov_validate_txq(p_hwfn, vf, qid)) {
|
||||
if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
|
||||
ECORE_IOV_VALIDATE_Q_ENABLE) &&
|
||||
tx_coal) {
|
||||
DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
|
||||
vf->abs_vf_id, qid);
|
||||
goto out;
|
||||
@ -3296,7 +3420,11 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
|
||||
vf->abs_vf_id, rx_coal, tx_coal, qid);
|
||||
|
||||
if (rx_coal) {
|
||||
p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
|
||||
&vf->vf_queues[qid]);
|
||||
|
||||
rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
@ -3305,13 +3433,28 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO - in future, it might be possible to pass this in a per-cid
|
||||
* granularity. For now, do this for all Tx queues.
|
||||
*/
|
||||
if (tx_coal) {
|
||||
rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%d]: Unable to set tx queue = %d coalesce\n",
|
||||
vf->abs_vf_id, vf->vf_queues[qid].fw_tx_qid);
|
||||
goto out;
|
||||
struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
|
||||
|
||||
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
|
||||
if (p_queue->cids[i].p_cid == OSAL_NULL)
|
||||
continue;
|
||||
|
||||
if (!p_queue->cids[i].b_is_tx)
|
||||
continue;
|
||||
|
||||
rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
|
||||
p_queue->cids[i].p_cid);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%d]: Unable to set tx queue coalesce\n",
|
||||
vf->abs_vf_id);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "ecore_vfpf_if.h"
|
||||
#include "ecore_iov_api.h"
|
||||
#include "ecore_hsi_common.h"
|
||||
#include "ecore_l2.h"
|
||||
|
||||
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
|
||||
(E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
|
||||
@ -62,12 +63,18 @@ struct ecore_iov_vf_mbx {
|
||||
*/
|
||||
};
|
||||
|
||||
struct ecore_vf_q_info {
|
||||
struct ecore_vf_queue_cid {
|
||||
bool b_is_tx;
|
||||
struct ecore_queue_cid *p_cid;
|
||||
};
|
||||
|
||||
/* Describes a qzone associated with the VF */
|
||||
struct ecore_vf_queue {
|
||||
/* Input from upper-layer, mapping relateive queue to queue-zone */
|
||||
u16 fw_rx_qid;
|
||||
struct ecore_queue_cid *p_rx_cid;
|
||||
u16 fw_tx_qid;
|
||||
struct ecore_queue_cid *p_tx_cid;
|
||||
u8 fw_cid;
|
||||
|
||||
struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
|
||||
};
|
||||
|
||||
enum vf_state {
|
||||
@ -127,7 +134,7 @@ struct ecore_vf_info {
|
||||
u8 num_mac_filters;
|
||||
u8 num_vlan_filters;
|
||||
|
||||
struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
|
||||
struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
|
||||
u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
|
||||
|
||||
/* TODO - Only windows is using it - should be removed */
|
||||
|
@ -1582,6 +1582,12 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
|
||||
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
|
||||
}
|
||||
|
||||
void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
|
||||
u8 *num_txqs)
|
||||
{
|
||||
*num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
|
||||
}
|
||||
|
||||
void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
|
||||
{
|
||||
OSAL_MEMCPY(port_mac,
|
||||
|
@ -60,6 +60,15 @@ void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
|
||||
void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
|
||||
u8 *num_rxqs);
|
||||
|
||||
/**
|
||||
* @brief Get number of Rx queues allocated for VF by ecore
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param num_txqs - allocated RX queues
|
||||
*/
|
||||
void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
|
||||
u8 *num_txqs);
|
||||
|
||||
/**
|
||||
* @brief Get port mac address for VF
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user