net/bnxt: update RM to support HCAPI only
- For the EM Module there is a need to only allocate the EM Records in HCAPI RM but the storage control is requested to be outside of the RM DB. - Add TF_RM_ELEM_CFG_HCAPI_BA. - Return error when the number of reserved entries for wc tcam is odd number in tf_tcam_bind. - Remove em_pool from session - Use RM provided start offset and size - HCAPI returns entry index instead of row index for WC TCAM. - Move resource type conversion to hrwm set/free tcam functions. Signed-off-by: Peter Spreadborough <peter.spreadborough@broadcom.com> Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com> Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com> Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
parent
ced3cded44
commit
e2a002d88c
@ -68,6 +68,8 @@ tf_dev_p4_get_tcam_slice_info(struct tf *tfp __rte_unused,
|
||||
*num_slices_per_row = CFA_P4_WC_TCAM_SLICES_PER_ROW;
|
||||
if (key_sz > *num_slices_per_row * CFA_P4_WC_TCAM_SLICE_SIZE)
|
||||
return -ENOTSUP;
|
||||
|
||||
*num_slices_per_row = 1;
|
||||
} else { /* for other type of tcam */
|
||||
*num_slices_per_row = 1;
|
||||
}
|
||||
|
@ -12,19 +12,19 @@
|
||||
#include "tf_rm.h"
|
||||
|
||||
struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = {
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_PROF_FUNC },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_PROF_ID },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID },
|
||||
/* CFA_RESOURCE_TYPE_P4_L2_FUNC */
|
||||
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID }
|
||||
};
|
||||
|
||||
struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_PROF_TCAM },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_WC_TCAM },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_TCAM },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM },
|
||||
/* CFA_RESOURCE_TYPE_P4_CT_RULE_TCAM */
|
||||
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
|
||||
/* CFA_RESOURCE_TYPE_P4_VEB_TCAM */
|
||||
@ -32,26 +32,26 @@ struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {
|
||||
};
|
||||
|
||||
struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_FULL_ACTION },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_MCG },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_8B },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_16B },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B },
|
||||
/* CFA_RESOURCE_TYPE_P4_ENCAP_32B */
|
||||
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_64B },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_COUNTER_64B },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_SPORT },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_DPORT },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_S_IPV4 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_D_IPV4 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_S_IPV6 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_D_IPV6 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_METER_PROF },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_METER },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_MIRROR },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_SPORT },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_DPORT },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_S_IPV4 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_D_IPV4 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_S_IPV6 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_D_IPV6 },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR },
|
||||
/* CFA_RESOURCE_TYPE_P4_UPAR */
|
||||
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
|
||||
/* CFA_RESOURCE_TYPE_P4_EPOC */
|
||||
@ -79,7 +79,7 @@ struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
|
||||
struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = {
|
||||
/* CFA_RESOURCE_TYPE_P4_EM_REC */
|
||||
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
|
||||
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_TBL_SCOPE },
|
||||
{ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE },
|
||||
};
|
||||
|
||||
struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = {
|
||||
|
@ -23,20 +23,28 @@
|
||||
*/
|
||||
static void *em_db[TF_DIR_MAX];
|
||||
|
||||
#define TF_EM_DB_EM_REC 0
|
||||
|
||||
/**
|
||||
* Init flag, set on bind and cleared on unbind
|
||||
*/
|
||||
static uint8_t init;
|
||||
|
||||
|
||||
/**
|
||||
* EM Pool
|
||||
*/
|
||||
static struct stack em_pool[TF_DIR_MAX];
|
||||
|
||||
/**
|
||||
* Create EM Tbl pool of memory indexes.
|
||||
*
|
||||
* [in] session
|
||||
* Pointer to session
|
||||
* [in] dir
|
||||
* direction
|
||||
* [in] num_entries
|
||||
* number of entries to write
|
||||
* [in] start
|
||||
* starting offset
|
||||
*
|
||||
* Return:
|
||||
* 0 - Success, entry allocated - no search support
|
||||
@ -44,54 +52,66 @@ static uint8_t init;
|
||||
* - Failure, entry not allocated, out of resources
|
||||
*/
|
||||
static int
|
||||
tf_create_em_pool(struct tf_session *session,
|
||||
enum tf_dir dir,
|
||||
uint32_t num_entries)
|
||||
tf_create_em_pool(enum tf_dir dir,
|
||||
uint32_t num_entries,
|
||||
uint32_t start)
|
||||
{
|
||||
struct tfp_calloc_parms parms;
|
||||
uint32_t i, j;
|
||||
int rc = 0;
|
||||
struct stack *pool = &session->em_pool[dir];
|
||||
struct stack *pool = &em_pool[dir];
|
||||
|
||||
parms.nitems = num_entries;
|
||||
/* Assumes that num_entries has been checked before we get here */
|
||||
parms.nitems = num_entries / TF_SESSION_EM_ENTRY_SIZE;
|
||||
parms.size = sizeof(uint32_t);
|
||||
parms.alignment = 0;
|
||||
|
||||
rc = tfp_calloc(&parms);
|
||||
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR, "EM pool allocation failure %s\n",
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s, EM pool allocation failure %s\n",
|
||||
tf_dir_2_str(dir),
|
||||
strerror(-rc));
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Create empty stack
|
||||
*/
|
||||
rc = stack_init(num_entries, (uint32_t *)parms.mem_va, pool);
|
||||
rc = stack_init(num_entries / TF_SESSION_EM_ENTRY_SIZE,
|
||||
(uint32_t *)parms.mem_va,
|
||||
pool);
|
||||
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR, "EM pool stack init failure %s\n",
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s, EM pool stack init failure %s\n",
|
||||
tf_dir_2_str(dir),
|
||||
strerror(-rc));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Fill pool with indexes
|
||||
*/
|
||||
j = num_entries - 1;
|
||||
j = start + num_entries - TF_SESSION_EM_ENTRY_SIZE;
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
for (i = 0; i < (num_entries / TF_SESSION_EM_ENTRY_SIZE); i++) {
|
||||
rc = stack_push(pool, j);
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR, "EM pool stack push failure %s\n",
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s, EM pool stack push failure %s\n",
|
||||
tf_dir_2_str(dir),
|
||||
strerror(-rc));
|
||||
goto cleanup;
|
||||
}
|
||||
j--;
|
||||
|
||||
j -= TF_SESSION_EM_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
if (!stack_is_full(pool)) {
|
||||
rc = -EINVAL;
|
||||
TFP_DRV_LOG(ERR, "EM pool stack failure %s\n",
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s, EM pool stack failure %s\n",
|
||||
tf_dir_2_str(dir),
|
||||
strerror(-rc));
|
||||
goto cleanup;
|
||||
}
|
||||
@ -105,18 +125,15 @@ tf_create_em_pool(struct tf_session *session,
|
||||
/**
|
||||
* Create EM Tbl pool of memory indexes.
|
||||
*
|
||||
* [in] session
|
||||
* Pointer to session
|
||||
* [in] dir
|
||||
* direction
|
||||
*
|
||||
* Return:
|
||||
*/
|
||||
static void
|
||||
tf_free_em_pool(struct tf_session *session,
|
||||
enum tf_dir dir)
|
||||
tf_free_em_pool(enum tf_dir dir)
|
||||
{
|
||||
struct stack *pool = &session->em_pool[dir];
|
||||
struct stack *pool = &em_pool[dir];
|
||||
uint32_t *ptr;
|
||||
|
||||
ptr = stack_items(pool);
|
||||
@ -140,22 +157,19 @@ tf_em_insert_int_entry(struct tf *tfp,
|
||||
uint16_t rptr_index = 0;
|
||||
uint8_t rptr_entry = 0;
|
||||
uint8_t num_of_entries = 0;
|
||||
struct tf_session *session =
|
||||
(struct tf_session *)(tfp->session->core_data);
|
||||
struct stack *pool = &session->em_pool[parms->dir];
|
||||
struct stack *pool = &em_pool[parms->dir];
|
||||
uint32_t index;
|
||||
|
||||
rc = stack_pop(pool, &index);
|
||||
|
||||
if (rc) {
|
||||
PMD_DRV_LOG
|
||||
(ERR,
|
||||
"dir:%d, EM entry index allocation failed\n",
|
||||
parms->dir);
|
||||
PMD_DRV_LOG(ERR,
|
||||
"%s, EM entry index allocation failed\n",
|
||||
tf_dir_2_str(parms->dir));
|
||||
return rc;
|
||||
}
|
||||
|
||||
rptr_index = index * TF_SESSION_EM_ENTRY_SIZE;
|
||||
rptr_index = index;
|
||||
rc = tf_msg_insert_em_internal_entry(tfp,
|
||||
parms,
|
||||
&rptr_index,
|
||||
@ -166,8 +180,9 @@ tf_em_insert_int_entry(struct tf *tfp,
|
||||
|
||||
PMD_DRV_LOG
|
||||
(ERR,
|
||||
"Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
|
||||
index * TF_SESSION_EM_ENTRY_SIZE,
|
||||
"%s, Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
|
||||
tf_dir_2_str(parms->dir),
|
||||
index,
|
||||
rptr_index,
|
||||
rptr_entry,
|
||||
num_of_entries);
|
||||
@ -204,15 +219,13 @@ tf_em_delete_int_entry(struct tf *tfp,
|
||||
struct tf_delete_em_entry_parms *parms)
|
||||
{
|
||||
int rc = 0;
|
||||
struct tf_session *session =
|
||||
(struct tf_session *)(tfp->session->core_data);
|
||||
struct stack *pool = &session->em_pool[parms->dir];
|
||||
struct stack *pool = &em_pool[parms->dir];
|
||||
|
||||
rc = tf_msg_delete_em_entry(tfp, parms);
|
||||
|
||||
/* Return resource to pool */
|
||||
if (rc == 0)
|
||||
stack_push(pool, parms->index / TF_SESSION_EM_ENTRY_SIZE);
|
||||
stack_push(pool, parms->index);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -224,8 +237,9 @@ tf_em_int_bind(struct tf *tfp,
|
||||
int rc;
|
||||
int i;
|
||||
struct tf_rm_create_db_parms db_cfg = { 0 };
|
||||
struct tf_session *session;
|
||||
uint8_t db_exists = 0;
|
||||
struct tf_rm_get_alloc_info_parms iparms;
|
||||
struct tf_rm_alloc_info info;
|
||||
|
||||
TF_CHECK_PARMS2(tfp, parms);
|
||||
|
||||
@ -235,14 +249,6 @@ tf_em_int_bind(struct tf *tfp,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
session = (struct tf_session *)tfp->session->core_data;
|
||||
|
||||
for (i = 0; i < TF_DIR_MAX; i++) {
|
||||
tf_create_em_pool(session,
|
||||
i,
|
||||
TF_SESSION_EM_POOL_SIZE);
|
||||
}
|
||||
|
||||
db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
|
||||
db_cfg.num_elements = parms->num_elements;
|
||||
db_cfg.cfg = parms->cfg;
|
||||
@ -257,6 +263,18 @@ tf_em_int_bind(struct tf *tfp,
|
||||
if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_EM_RECORD] == 0)
|
||||
continue;
|
||||
|
||||
if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_EM_RECORD] %
|
||||
TF_SESSION_EM_ENTRY_SIZE != 0) {
|
||||
rc = -ENOMEM;
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s, EM Allocation must be in blocks of %d, failure %s\n",
|
||||
tf_dir_2_str(i),
|
||||
TF_SESSION_EM_ENTRY_SIZE,
|
||||
strerror(-rc));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
db_cfg.rm_db = &em_db[i];
|
||||
rc = tf_rm_create_db(tfp, &db_cfg);
|
||||
if (rc) {
|
||||
@ -272,6 +290,28 @@ tf_em_int_bind(struct tf *tfp,
|
||||
if (db_exists)
|
||||
init = 1;
|
||||
|
||||
for (i = 0; i < TF_DIR_MAX; i++) {
|
||||
iparms.rm_db = em_db[i];
|
||||
iparms.db_index = TF_EM_DB_EM_REC;
|
||||
iparms.info = &info;
|
||||
|
||||
rc = tf_rm_get_info(&iparms);
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s: EM DB get info failed\n",
|
||||
tf_dir_2_str(i));
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = tf_create_em_pool(i,
|
||||
iparms.info->entry.stride,
|
||||
iparms.info->entry.start);
|
||||
/* Logging handled in tf_create_em_pool */
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -281,7 +321,6 @@ tf_em_int_unbind(struct tf *tfp)
|
||||
int rc;
|
||||
int i;
|
||||
struct tf_rm_free_db_parms fparms = { 0 };
|
||||
struct tf_session *session;
|
||||
|
||||
TF_CHECK_PARMS1(tfp);
|
||||
|
||||
@ -292,10 +331,8 @@ tf_em_int_unbind(struct tf *tfp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
session = (struct tf_session *)tfp->session->core_data;
|
||||
|
||||
for (i = 0; i < TF_DIR_MAX; i++)
|
||||
tf_free_em_pool(session, i);
|
||||
tf_free_em_pool(i);
|
||||
|
||||
for (i = 0; i < TF_DIR_MAX; i++) {
|
||||
fparms.dir = i;
|
||||
|
@ -857,12 +857,12 @@ tf_msg_get_tbl_entry(struct tf *tfp,
|
||||
return rc;
|
||||
|
||||
/* Verify that we got enough buffer to return the requested data */
|
||||
if (resp.size < size)
|
||||
if (tfp_le_to_cpu_32(resp.size) != size)
|
||||
return -EINVAL;
|
||||
|
||||
tfp_memcpy(data,
|
||||
&resp.data,
|
||||
resp.size);
|
||||
size);
|
||||
|
||||
return tfp_le_to_cpu_32(parms.tf_resp_code);
|
||||
}
|
||||
@ -919,7 +919,7 @@ tf_msg_bulk_get_tbl_entry(struct tf *tfp,
|
||||
return rc;
|
||||
|
||||
/* Verify that we got enough buffer to return the requested data */
|
||||
if (resp.size < data_size)
|
||||
if (tfp_le_to_cpu_32(resp.size) != data_size)
|
||||
return -EINVAL;
|
||||
|
||||
return tfp_le_to_cpu_32(parms.tf_resp_code);
|
||||
|
@ -106,7 +106,8 @@ tf_rm_count_hcapi_reservations(enum tf_dir dir,
|
||||
uint16_t cnt = 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI &&
|
||||
if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
|
||||
cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) &&
|
||||
reservations[i] > 0)
|
||||
cnt++;
|
||||
|
||||
@ -467,7 +468,8 @@ tf_rm_create_db(struct tf *tfp,
|
||||
/* Build the request */
|
||||
for (i = 0, j = 0; i < parms->num_elements; i++) {
|
||||
/* Skip any non HCAPI cfg elements */
|
||||
if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI) {
|
||||
if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
|
||||
parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
|
||||
/* Only perform reservation for entries that
|
||||
* has been requested
|
||||
*/
|
||||
@ -529,7 +531,8 @@ tf_rm_create_db(struct tf *tfp,
|
||||
/* Skip any non HCAPI types as we didn't include them
|
||||
* in the reservation request.
|
||||
*/
|
||||
if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI)
|
||||
if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI &&
|
||||
parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
|
||||
continue;
|
||||
|
||||
/* If the element didn't request an allocation no need
|
||||
@ -551,29 +554,32 @@ tf_rm_create_db(struct tf *tfp,
|
||||
resv[j].start,
|
||||
resv[j].stride);
|
||||
|
||||
/* Create pool */
|
||||
pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
|
||||
sizeof(struct bitalloc));
|
||||
/* Alloc request, alignment already set */
|
||||
cparms.nitems = pool_size;
|
||||
cparms.size = sizeof(struct bitalloc);
|
||||
rc = tfp_calloc(&cparms);
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s: Pool alloc failed, type:%d\n",
|
||||
tf_dir_2_str(parms->dir),
|
||||
db[i].cfg_type);
|
||||
goto fail;
|
||||
}
|
||||
db[i].pool = (struct bitalloc *)cparms.mem_va;
|
||||
/* Only allocate BA pool if so requested */
|
||||
if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
|
||||
/* Create pool */
|
||||
pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
|
||||
sizeof(struct bitalloc));
|
||||
/* Alloc request, alignment already set */
|
||||
cparms.nitems = pool_size;
|
||||
cparms.size = sizeof(struct bitalloc);
|
||||
rc = tfp_calloc(&cparms);
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s: Pool alloc failed, type:%d\n",
|
||||
tf_dir_2_str(parms->dir),
|
||||
db[i].cfg_type);
|
||||
goto fail;
|
||||
}
|
||||
db[i].pool = (struct bitalloc *)cparms.mem_va;
|
||||
|
||||
rc = ba_init(db[i].pool, resv[j].stride);
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s: Pool init failed, type:%d\n",
|
||||
tf_dir_2_str(parms->dir),
|
||||
db[i].cfg_type);
|
||||
goto fail;
|
||||
rc = ba_init(db[i].pool, resv[j].stride);
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s: Pool init failed, type:%d\n",
|
||||
tf_dir_2_str(parms->dir),
|
||||
db[i].cfg_type);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
j++;
|
||||
} else {
|
||||
@ -682,6 +688,9 @@ tf_rm_free_db(struct tf *tfp,
|
||||
tf_device_module_type_2_str(rm_db->type));
|
||||
}
|
||||
|
||||
/* No need to check for configuration type, even if we do not
|
||||
* have a BA pool we just delete on a null ptr, no harm
|
||||
*/
|
||||
for (i = 0; i < rm_db->num_entries; i++)
|
||||
tfp_free((void *)rm_db->db[i].pool);
|
||||
|
||||
@ -705,8 +714,7 @@ tf_rm_allocate(struct tf_rm_allocate_parms *parms)
|
||||
cfg_type = rm_db->db[parms->db_index].cfg_type;
|
||||
|
||||
/* Bail out if not controlled by RM */
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
|
||||
cfg_type != TF_RM_ELEM_CFG_PRIVATE)
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
|
||||
return -ENOTSUP;
|
||||
|
||||
/* Bail out if the pool is not valid, should never happen */
|
||||
@ -770,8 +778,7 @@ tf_rm_free(struct tf_rm_free_parms *parms)
|
||||
cfg_type = rm_db->db[parms->db_index].cfg_type;
|
||||
|
||||
/* Bail out if not controlled by RM */
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
|
||||
cfg_type != TF_RM_ELEM_CFG_PRIVATE)
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
|
||||
return -ENOTSUP;
|
||||
|
||||
/* Bail out if the pool is not valid, should never happen */
|
||||
@ -816,8 +823,7 @@ tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
|
||||
cfg_type = rm_db->db[parms->db_index].cfg_type;
|
||||
|
||||
/* Bail out if not controlled by RM */
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
|
||||
cfg_type != TF_RM_ELEM_CFG_PRIVATE)
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
|
||||
return -ENOTSUP;
|
||||
|
||||
/* Bail out if the pool is not valid, should never happen */
|
||||
@ -857,9 +863,9 @@ tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
|
||||
rm_db = (struct tf_rm_new_db *)parms->rm_db;
|
||||
cfg_type = rm_db->db[parms->db_index].cfg_type;
|
||||
|
||||
/* Bail out if not controlled by RM */
|
||||
/* Bail out if not controlled by HCAPI */
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
|
||||
cfg_type != TF_RM_ELEM_CFG_PRIVATE)
|
||||
cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
|
||||
return -ENOTSUP;
|
||||
|
||||
memcpy(parms->info,
|
||||
@ -880,9 +886,9 @@ tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
|
||||
rm_db = (struct tf_rm_new_db *)parms->rm_db;
|
||||
cfg_type = rm_db->db[parms->db_index].cfg_type;
|
||||
|
||||
/* Bail out if not controlled by RM */
|
||||
/* Bail out if not controlled by HCAPI */
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
|
||||
cfg_type != TF_RM_ELEM_CFG_PRIVATE)
|
||||
cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
|
||||
return -ENOTSUP;
|
||||
|
||||
*parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
|
||||
@ -903,8 +909,7 @@ tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
|
||||
cfg_type = rm_db->db[parms->db_index].cfg_type;
|
||||
|
||||
/* Bail out if not controlled by RM */
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
|
||||
cfg_type != TF_RM_ELEM_CFG_PRIVATE)
|
||||
if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
|
||||
return -ENOTSUP;
|
||||
|
||||
/* Bail silently (no logging), if the pool is not valid there
|
||||
|
@ -56,12 +56,18 @@ struct tf_rm_new_entry {
|
||||
* ULP layer that is not controlled by HCAPI within the Firmware.
|
||||
*/
|
||||
enum tf_rm_elem_cfg_type {
|
||||
/** No configuration */
|
||||
/**
|
||||
* No configuration
|
||||
*/
|
||||
TF_RM_ELEM_CFG_NULL,
|
||||
/** HCAPI 'controlled', uses a Pool for internal storage */
|
||||
/** HCAPI 'controlled', no RM storage thus the Device Module
|
||||
* using the RM can chose to handle storage locally.
|
||||
*/
|
||||
TF_RM_ELEM_CFG_HCAPI,
|
||||
/** Private thus not HCAPI 'controlled', creates a Pool for storage */
|
||||
TF_RM_ELEM_CFG_PRIVATE,
|
||||
/** HCAPI 'controlled', uses a Bit Allocator Pool for internal
|
||||
* storage in the RM.
|
||||
*/
|
||||
TF_RM_ELEM_CFG_HCAPI_BA,
|
||||
/**
|
||||
* Shared element thus it belongs to a shared FW Session and
|
||||
* is not controlled by the Host.
|
||||
|
@ -103,11 +103,6 @@ struct tf_session {
|
||||
|
||||
/** Table scope array */
|
||||
struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
|
||||
|
||||
/**
|
||||
* EM Pools
|
||||
*/
|
||||
struct stack em_pool[TF_DIR_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -43,6 +43,7 @@ tf_tcam_bind(struct tf *tfp,
|
||||
{
|
||||
int rc;
|
||||
int i;
|
||||
struct tf_tcam_resources *tcam_cnt;
|
||||
struct tf_rm_create_db_parms db_cfg = { 0 };
|
||||
|
||||
TF_CHECK_PARMS2(tfp, parms);
|
||||
@ -53,6 +54,14 @@ tf_tcam_bind(struct tf *tfp,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tcam_cnt = parms->resources->tcam_cnt;
|
||||
if ((tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] % 2) ||
|
||||
(tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] % 2)) {
|
||||
TFP_DRV_LOG(ERR,
|
||||
"Number of WC TCAM entries cannot be odd num\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
db_cfg.type = TF_DEVICE_MODULE_TYPE_TCAM;
|
||||
db_cfg.num_elements = parms->num_elements;
|
||||
db_cfg.cfg = parms->cfg;
|
||||
@ -168,6 +177,18 @@ tf_tcam_alloc(struct tf *tfp,
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (parms->type == TF_TCAM_TBL_TYPE_WC_TCAM &&
|
||||
(parms->idx % 2) != 0) {
|
||||
rc = tf_rm_allocate(&aparms);
|
||||
if (rc) {
|
||||
TFP_DRV_LOG(ERR,
|
||||
"%s: Failed tcam, type:%d\n",
|
||||
tf_dir_2_str(parms->dir),
|
||||
parms->type);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
parms->idx *= num_slice_per_row;
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user