net/ice/base: introduce and use for each bit iterator

A number of code flows iterate over a block of memory to do something
for every bit set in that memory. Use existing bit operations in a new
iterator macro to make those code flows cleaner.

Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Acked-by: Qiming Yang <qiming.yang@intel.com>
This commit is contained in:
Qi Zhang 2020-08-26 17:34:31 +08:00 committed by Ferruh Yigit
parent 4406ca307a
commit bd0438d9d4
5 changed files with 86 additions and 138 deletions

View File

@ -1028,9 +1028,8 @@ ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
entry_tcam = ICE_ACL_TBL_TCAM_IDX(scen->start);
idx = ICE_ACL_TBL_TCAM_ENTRY_IDX(scen->start + entry_idx);
i = ice_find_first_bit(scen->act_mem_bitmap,
ICE_AQC_MAX_ACTION_MEMORIES);
while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
ice_for_each_set_bit(i, scen->act_mem_bitmap,
ICE_AQC_MAX_ACTION_MEMORIES) {
struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
if (actx_idx >= acts_cnt)
@ -1057,9 +1056,6 @@ ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
}
actx_idx++;
}
i = ice_find_next_bit(scen->act_mem_bitmap,
ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
}
if (!status && actx_idx < acts_cnt)
@ -1111,9 +1107,9 @@ ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
}
ice_memset(&act_buf, 0, sizeof(act_buf), ICE_NONDMA_MEM);
i = ice_find_first_bit(scen->act_mem_bitmap,
ICE_AQC_MAX_ACTION_MEMORIES);
while (i < ICE_AQC_MAX_ACTION_MEMORIES) {
ice_for_each_set_bit(i, scen->act_mem_bitmap,
ICE_AQC_MAX_ACTION_MEMORIES) {
struct ice_acl_act_mem *mem = &hw->acl_tbl->act_mems[i];
if (mem->member_of_tcam >= entry_tcam &&
@ -1126,9 +1122,6 @@ ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
"program actpair failed.status: %d\n",
status);
}
i = ice_find_next_bit(scen->act_mem_bitmap,
ICE_AQC_MAX_ACTION_MEMORIES, i + 1);
}
ice_acl_scen_free_entry_idx(scen, entry_idx);

View File

@ -346,6 +346,11 @@ static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size)
return ice_find_next_bit(bitmap, size, 0);
}
#define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \
for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \
(_bitpos) < (_maxlen); \
(_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1))
/**
* ice_is_any_bit_set - Return true of any bit in the bitmap is set
* @bitmap: the bitmap to check

View File

@ -4665,50 +4665,42 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
byte++;
continue;
}
/* Examine 8 bits per byte */
for (bit = 0; bit < 8; bit++) {
if (ptypes[byte] & BIT(bit)) {
u16 ptype;
u8 ptg;
u8 m;
ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
BITS_PER_BYTE) {
u16 ptype;
u8 ptg;
ptype = byte * BITS_PER_BYTE + bit;
ptype = byte * BITS_PER_BYTE + bit;
/* The package should place all ptypes in a
* non-zero PTG, so the following call should
* never fail.
/* The package should place all ptypes in a non-zero
* PTG, so the following call should never fail.
*/
if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
/* If PTG is already added, skip and continue */
if (ice_is_bit_set(ptgs_used, ptg))
continue;
ice_set_bit(ptg, ptgs_used);
/* Check to see there are any attributes for this
* ptype, and add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype, attr,
attr_cnt);
if (status == ICE_ERR_MAX_LIMIT)
break;
if (status) {
/* This is simple a ptype/PTG with no
* attribute
*/
if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
prof->ptg[prof->ptg_cnt] = ptg;
prof->attr[prof->ptg_cnt].flags = 0;
prof->attr[prof->ptg_cnt].mask = 0;
/* If PTG is already added, skip and continue */
if (ice_is_bit_set(ptgs_used, ptg))
continue;
ice_set_bit(ptg, ptgs_used);
/* Check to see there are any attributes for
* this ptype, and add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt);
if (status == ICE_ERR_MAX_LIMIT)
break;
if (status) {
/* This is simple a ptype/PTG with no
* attribute
*/
prof->ptg[prof->ptg_cnt] = ptg;
prof->attr[prof->ptg_cnt].flags = 0;
prof->attr[prof->ptg_cnt].mask = 0;
if (++prof->ptg_cnt >=
ICE_MAX_PTG_PER_PROFILE)
break;
}
/* nothing left in byte, then exit */
m = ~(u8)((1 << (bit + 1)) - 1);
if (!(ptypes[byte] & m))
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
}
}

View File

@ -1340,16 +1340,12 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
u64 match = params->prof->segs[i].match;
enum ice_flow_field j;
for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
const u64 bit = BIT_ULL(j);
if (match & bit) {
status = ice_flow_xtract_fld(hw, params, i, j,
match);
if (status)
return status;
match &= ~bit;
}
ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
ICE_FLOW_FIELD_IDX_MAX) {
status = ice_flow_xtract_fld(hw, params, i, j, match);
if (status)
return status;
ice_clear_bit(j, (ice_bitmap_t *)&match);
}
/* Process raw matching bytes */
@ -1406,17 +1402,12 @@ ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
for (i = 0; i < params->prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &params->prof->segs[i];
u64 match = seg->match;
u8 j;
for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
struct ice_flow_fld_info *fld;
const u64 bit = BIT_ULL(j);
ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
ICE_FLOW_FIELD_IDX_MAX) {
struct ice_flow_fld_info *fld = &seg->fields[j];
if (!(match & bit))
continue;
fld = &seg->fields[j];
fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
if (fld->type == ICE_FLOW_FLD_TYPE_RANGE) {
@ -1448,8 +1439,6 @@ ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
fld->entry.val = index;
index += fld->entry.last;
}
match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
@ -2028,25 +2017,18 @@ ice_flow_acl_set_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof *prof)
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
u64 match = seg->match;
u16 j;
for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
const u64 bit = BIT_ULL(j);
if (!(match & bit))
continue;
ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
ICE_FLOW_FIELD_IDX_MAX) {
info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
buf.word_selection[info->entry.val] =
info->xtrct.idx;
info->xtrct.idx;
else
ice_flow_acl_set_xtrct_seq_fld(&buf,
info);
match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
@ -2549,17 +2531,11 @@ ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
u64 match = seg->match;
u16 j;
u8 j;
for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
struct ice_flow_fld_info *info;
const u64 bit = BIT_ULL(j);
if (!(match & bit))
continue;
info = &seg->fields[j];
ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
ICE_FLOW_FIELD_IDX_MAX) {
struct ice_flow_fld_info *info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
ice_flow_acl_frmt_entry_range(j, info,
@ -2568,8 +2544,6 @@ ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
else
ice_flow_acl_frmt_entry_fld(j, info, buf,
dontcare, data);
match &= ~bit;
}
for (j = 0; j < seg->raws_cnt; j++) {
@ -3271,20 +3245,15 @@ static enum ice_status
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
u64 val = hash_fields;
u64 val;
u8 i;
for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
u64 bit = BIT_ULL(i);
ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
ICE_FLOW_FIELD_IDX_MAX)
ice_flow_set_fld(segs, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
if (val & bit) {
ice_flow_set_fld(segs, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
val &= ~bit;
}
}
ICE_FLOW_SET_HDRS(segs, flow_hdr);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &

View File

@ -1374,9 +1374,8 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
continue;
ice_cp_bitmap(profile_to_recipe[i], r_bitmap,
ICE_MAX_NUM_RECIPES);
for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
if (ice_is_bit_set(r_bitmap, j))
ice_set_bit(i, recipe_to_profile[j]);
ice_for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
ice_set_bit(i, recipe_to_profile[j]);
}
}
@ -5946,26 +5945,21 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles,
* the set of recipes that our recipe may collide with. Also, determine
* what possible result indexes are usable given this set of profiles.
*/
bit = 0;
while (ICE_MAX_NUM_PROFILES >
(bit = ice_find_next_bit(profiles, ICE_MAX_NUM_PROFILES, bit))) {
ice_for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
ice_or_bitmap(recipes, recipes, profile_to_recipe[bit],
ICE_MAX_NUM_RECIPES);
ice_and_bitmap(possible_idx, possible_idx,
hw->switch_info->prof_res_bm[bit],
ICE_MAX_FV_WORDS);
bit++;
}
/* For each recipe that our new recipe may collide with, determine
* which indexes have been used.
*/
for (bit = 0; bit < ICE_MAX_NUM_RECIPES; bit++)
if (ice_is_bit_set(recipes, bit)) {
ice_or_bitmap(used_idx, used_idx,
hw->switch_info->recp_list[bit].res_idxs,
ICE_MAX_FV_WORDS);
}
ice_for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
ice_or_bitmap(used_idx, used_idx,
hw->switch_info->recp_list[bit].res_idxs,
ICE_MAX_FV_WORDS);
ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
@ -6650,18 +6644,17 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (LIST_EMPTY(&rm->fv_list)) {
u16 j;
for (j = 0; j < ICE_MAX_NUM_PROFILES; j++)
if (ice_is_bit_set(fv_bitmap, j)) {
struct ice_sw_fv_list_entry *fvl;
ice_for_each_set_bit(j, fv_bitmap, ICE_MAX_NUM_PROFILES) {
struct ice_sw_fv_list_entry *fvl;
fvl = (struct ice_sw_fv_list_entry *)
ice_malloc(hw, sizeof(*fvl));
if (!fvl)
goto err_unroll;
fvl->fv_ptr = NULL;
fvl->profile_id = j;
LIST_ADD(&fvl->list_entry, &rm->fv_list);
}
fvl = (struct ice_sw_fv_list_entry *)
ice_malloc(hw, sizeof(*fvl));
if (!fvl)
goto err_unroll;
fvl->fv_ptr = NULL;
fvl->profile_id = j;
LIST_ADD(&fvl->list_entry, &rm->fv_list);
}
}
/* get bitmap of all profiles the recipe will be associated with */
@ -6716,10 +6709,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ICE_MAX_NUM_RECIPES);
/* Update recipe to profile bitmap array */
for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
if (ice_is_bit_set(r_bitmap, j))
ice_set_bit((u16)fvit->profile_id,
recipe_to_profile[j]);
ice_for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
ice_set_bit((u16)fvit->profile_id,
recipe_to_profile[j]);
}
*rid = rm->root_rid;
@ -7909,6 +7901,7 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
list_entry) {
struct ice_fltr_list_entry f_entry;
u16 vsi_handle;
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
@ -7920,12 +7913,8 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
}
/* Add a filter per VSI separately */
while (1) {
u16 vsi_handle;
vsi_handle =
ice_find_first_bit(itr->vsi_list_info->vsi_map,
ICE_MAX_VSI);
ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
ICE_MAX_VSI) {
if (!ice_is_vsi_valid(hw, vsi_handle))
break;