net/cxgbe: validate flows offloaded to HASH region

Fetch supported match items in HASH region. Ensure the mask
is all set for all the supported match items to be offloaded
to HASH region. Otherwise, offload them to LE-TCAM region.

Signed-off-by: Shagun Agrawal <shaguna@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
This commit is contained in:
Shagun Agrawal 2018-06-29 23:42:17 +05:30 committed by Ferruh Yigit
parent 3a381a4116
commit 78192b38a8
5 changed files with 71 additions and 0 deletions

View File

@ -156,6 +156,9 @@ struct tp_params {
int vnic_shift;
int port_shift;
int protocol_shift;
int ethertype_shift;
u64 hash_filter_mask;
};
struct vpd_params {

View File

@ -5032,6 +5032,8 @@ int t4_init_tp_params(struct adapter *adap)
adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
F_PROTOCOL);
adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
F_ETHERTYPE);
/*
* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
@ -5040,6 +5042,11 @@ int t4_init_tp_params(struct adapter *adap)
if ((adap->params.tp.ingress_config & F_VNIC) == 0)
adap->params.tp.vnic_shift = -1;
v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
adap->params.tp.hash_filter_mask = v;
v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
adap->params.tp.hash_filter_mask |= ((u64)v << 32);
return 0;
}

View File

@ -946,3 +946,6 @@
#define F_HASHEN V_HASHEN(1U)
#define A_LE_DB_TID_HASHBASE 0x19df8
#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0

View File

@ -86,6 +86,7 @@ struct ch_filter_specification {
* matching that doesn't exist as a (value, mask) tuple.
*/
uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */
/*
* Packet dispatch information. Ingress packets which match the

View File

@ -48,6 +48,58 @@ cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
return 0;
}
static void
cxgbe_fill_filter_region(struct adapter *adap,
struct ch_filter_specification *fs)
{
struct tp_params *tp = &adap->params.tp;
u64 hash_filter_mask = tp->hash_filter_mask;
u64 ntuple_mask = 0;
fs->cap = 0;
if (!is_hashfilter(adap))
return;
if (fs->type) {
uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff};
uint8_t bitoff[16] = {0};
if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
!memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
memcmp(fs->mask.lip, biton, sizeof(biton)) ||
memcmp(fs->mask.fip, biton, sizeof(biton)))
return;
} else {
uint32_t biton = 0xffffffff;
uint32_t bitoff = 0x0U;
if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
!memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
memcmp(fs->mask.fip, &biton, sizeof(biton)))
return;
}
if (!fs->val.lport || fs->mask.lport != 0xffff)
return;
if (!fs->val.fport || fs->mask.fport != 0xffff)
return;
if (tp->protocol_shift >= 0)
ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
if (tp->ethertype_shift >= 0)
ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
if (ntuple_mask != hash_filter_mask)
return;
fs->cap = 1; /* use hash region */
}
static int
ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
struct ch_filter_specification *fs,
@ -222,6 +274,8 @@ cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
static int
cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
{
if (flow->fs.cap)
return 0; /* Hash filters */
return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
cxgbe_validate_fidxonadd(&flow->fs,
ethdev2adap(flow->dev), fidx);
@ -329,6 +383,7 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
const struct rte_flow_item items[],
struct rte_flow_error *e)
{
struct adapter *adap = ethdev2adap(flow->dev);
const struct rte_flow_item *i;
char repeat[ARRAY_SIZE(parseitem)] = {0};
@ -369,6 +424,8 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
}
}
cxgbe_fill_filter_region(adap, &flow->fs);
return 0;
}