net/sfc: support counters in tunnel offload jump rules

Such a counter will only report the number of hits, which is actually
a sum of two contributions (the JUMP rule's own counter + indirect
increments issued by counters of the associated GROUP rules.

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
This commit is contained in:
Ivan Malov 2021-10-13 16:15:12 +03:00 committed by Ferruh Yigit
parent 8cd7725169
commit 9df2d8f5cc
7 changed files with 125 additions and 11 deletions

View File

@ -2993,6 +2993,8 @@ sfc_flow_start(struct sfc_adapter *sa)
SFC_ASSERT(sfc_adapter_is_locked(sa));
sfc_flow_tunnel_reset_hit_counters(sa);
TAILQ_FOREACH(flow, &sa->flow_list, entries) {
rc = sfc_flow_insert(sa, flow, NULL);
if (rc != 0)

View File

@ -88,6 +88,8 @@ sfc_flow_tunnel_detect_jump_rule(struct sfc_adapter *sa,
}
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
break;
case RTE_FLOW_ACTION_TYPE_MARK:
if (action_mark == NULL) {
action_mark = actions->conf;
@ -460,3 +462,19 @@ sfc_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"tunnel offload: get_restore_info failed");
}
void
sfc_flow_tunnel_reset_hit_counters(struct sfc_adapter *sa)
{
unsigned int i;
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(sa->state != SFC_ETHDEV_STARTED);
for (i = 0; i < RTE_DIM(sa->flow_tunnels); ++i) {
struct sfc_flow_tunnel *ft = &sa->flow_tunnels[i];
ft->reset_jump_hit_counter = 0;
ft->group_hit_counter = 0;
}
}

View File

@ -63,6 +63,9 @@ struct sfc_flow_tunnel {
struct rte_flow_item_mark item_mark_v;
struct rte_flow_item_mark item_mark_m;
struct rte_flow_item item;
uint64_t reset_jump_hit_counter;
uint64_t group_hit_counter;
};
struct sfc_adapter;
@ -106,6 +109,8 @@ int sfc_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
struct rte_flow_restore_info *info,
struct rte_flow_error *err);
void sfc_flow_tunnel_reset_hit_counters(struct sfc_adapter *sa);
#ifdef __cplusplus
}
#endif

View File

@ -737,6 +737,8 @@ sfc_mae_action_set_add(struct sfc_adapter *sa,
const struct rte_flow_action actions[],
efx_mae_actions_t *spec,
struct sfc_mae_encap_header *encap_header,
uint64_t *ft_group_hit_counter,
struct sfc_flow_tunnel *ft,
unsigned int n_counters,
struct sfc_mae_action_set **action_setp)
{
@ -763,6 +765,16 @@ sfc_mae_action_set_add(struct sfc_adapter *sa,
return ENOMEM;
}
for (i = 0; i < n_counters; ++i) {
action_set->counters[i].rte_id_valid = B_FALSE;
action_set->counters[i].mae_id.id =
EFX_MAE_RSRC_ID_INVALID;
action_set->counters[i].ft_group_hit_counter =
ft_group_hit_counter;
action_set->counters[i].ft = ft;
}
for (action = actions, i = 0;
action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
++action) {
@ -773,8 +785,7 @@ sfc_mae_action_set_add(struct sfc_adapter *sa,
conf = action->conf;
action_set->counters[i].mae_id.id =
EFX_MAE_RSRC_ID_INVALID;
action_set->counters[i].rte_id_valid = B_TRUE;
action_set->counters[i].rte_id = conf->id;
i++;
}
@ -3499,10 +3510,12 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
{
struct sfc_mae_encap_header *encap_header = NULL;
struct sfc_mae_actions_bundle bundle = {0};
struct sfc_flow_tunnel *counter_ft = NULL;
uint64_t *ft_group_hit_counter = NULL;
const struct rte_flow_action *action;
struct sfc_mae *mae = &sa->mae;
unsigned int n_count = 0;
efx_mae_actions_t *spec;
unsigned int n_count;
int rc;
rte_errno = 0;
@ -3517,11 +3530,31 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
if (rc != 0)
goto fail_action_set_spec_init;
for (action = actions;
action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
++n_count;
}
if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
/* JUMP rules don't decapsulate packets. GROUP rules do. */
rc = efx_mae_action_set_populate_decap(spec);
if (rc != 0)
goto fail_enforce_ft_decap;
if (n_count == 0 && sfc_mae_counter_stream_enabled(sa)) {
/*
* The user opted not to use action COUNT in this rule,
* but the counter should be enabled implicitly because
* packets hitting this rule contribute to the tunnel's
* total number of hits. See sfc_mae_counter_get().
*/
rc = efx_mae_action_set_populate_count(spec);
if (rc != 0)
goto fail_enforce_ft_count;
n_count = 1;
}
}
/* Cleanup after previous encap. header bounce buffer usage. */
@ -3547,7 +3580,6 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
if (rc != 0)
goto fail_process_encap_header;
n_count = efx_mae_action_set_get_nb_count(spec);
if (n_count > 1) {
rc = ENOTSUP;
sfc_err(sa, "too many count actions requested: %u", n_count);
@ -3562,6 +3594,8 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
if (rc != 0)
goto fail_workaround_jump_delivery;
counter_ft = spec_mae->ft;
break;
case SFC_FT_RULE_GROUP:
/*
@ -3571,6 +3605,8 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
* MARK above, so don't check the return value here.
*/
(void)efx_mae_action_set_populate_mark(spec, 0);
ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
break;
default:
SFC_ASSERT(B_FALSE);
@ -3584,7 +3620,8 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
return 0;
}
rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
rc = sfc_mae_action_set_add(sa, actions, spec, encap_header,
ft_group_hit_counter, counter_ft, n_count,
&spec_mae->action_set);
if (rc != 0)
goto fail_action_set_add;
@ -3600,6 +3637,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
fail_rule_parse_action:
efx_mae_action_set_spec_fini(sa->nic, spec);
fail_enforce_ft_count:
fail_enforce_ft_decap:
fail_action_set_spec_init:
if (rc > 0 && rte_errno == 0) {
@ -3747,6 +3785,11 @@ sfc_mae_flow_insert(struct sfc_adapter *sa,
goto fail_outer_rule_enable;
}
if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
spec_mae->ft->reset_jump_hit_counter =
spec_mae->ft->group_hit_counter;
}
if (action_set == NULL) {
sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
return 0;
@ -3846,7 +3889,8 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
for (i = 0; i < action_set->n_counters; i++) {
/*
* Get the first available counter of the flow rule if
* counter ID is not specified.
* counter ID is not specified, provided that this
* counter is not an automatic (implicit) one.
*/
if (conf != NULL && action_set->counters[i].rte_id != conf->id)
continue;
@ -3864,7 +3908,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
return rte_flow_error_set(error, ENOENT,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"No such flow rule action count ID");
"no such flow rule action or such count ID");
}
int

View File

@ -62,6 +62,13 @@ struct sfc_mae_counter_id {
efx_counter_t mae_id;
/* ID of a counter in RTE */
uint32_t rte_id;
/* RTE counter ID validity status */
bool rte_id_valid;
/* Flow Tunnel (FT) GROUP hit counter (or NULL) */
uint64_t *ft_group_hit_counter;
/* Flow Tunnel (FT) context (for JUMP rules; otherwise, NULL) */
struct sfc_flow_tunnel *ft;
};
/** Action set registry entry */
@ -101,6 +108,8 @@ struct sfc_mae_counter {
uint32_t generation_count;
union sfc_pkts_bytes value;
union sfc_pkts_bytes reset;
uint64_t *ft_group_hit_counter;
};
struct sfc_mae_counters_xstats {

View File

@ -99,6 +99,8 @@ sfc_mae_counter_enable(struct sfc_adapter *sa,
&p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
p->generation_count = generation_count;
p->ft_group_hit_counter = counterp->ft_group_hit_counter;
/*
* The flag is set at the very end of add operation and reset
* at the beginning of delete operation. Release ordering is
@ -210,6 +212,14 @@ sfc_mae_counter_increment(struct sfc_adapter *sa,
__atomic_store(&p->value.pkts_bytes,
&cnt_val.pkts_bytes, __ATOMIC_RELAXED);
if (p->ft_group_hit_counter != NULL) {
uint64_t ft_group_hit_counter;
ft_group_hit_counter = *p->ft_group_hit_counter + pkts;
__atomic_store_n(p->ft_group_hit_counter, ft_group_hit_counter,
__ATOMIC_RELAXED);
}
sfc_info(sa, "update MAE counter #%u: pkts+%" PRIu64 "=%" PRIu64
", bytes+%" PRIu64 "=%" PRIu64, mae_counter_id,
pkts, cnt_val.pkts, bytes, cnt_val.bytes);
@ -799,6 +809,8 @@ sfc_mae_counter_get(struct sfc_mae_counters *counters,
const struct sfc_mae_counter_id *counter,
struct rte_flow_query_count *data)
{
struct sfc_flow_tunnel *ft = counter->ft;
uint64_t non_reset_jump_hit_counter;
struct sfc_mae_counter *p;
union sfc_pkts_bytes value;
@ -814,14 +826,35 @@ sfc_mae_counter_get(struct sfc_mae_counters *counters,
__ATOMIC_RELAXED);
data->hits_set = 1;
data->bytes_set = 1;
data->hits = value.pkts - p->reset.pkts;
data->bytes = value.bytes - p->reset.bytes;
if (ft != NULL) {
data->hits += ft->group_hit_counter;
non_reset_jump_hit_counter = data->hits;
data->hits -= ft->reset_jump_hit_counter;
} else {
data->bytes_set = 1;
data->bytes = value.bytes - p->reset.bytes;
}
if (data->reset != 0) {
p->reset.pkts = value.pkts;
p->reset.bytes = value.bytes;
if (ft != NULL) {
ft->reset_jump_hit_counter = non_reset_jump_hit_counter;
} else {
p->reset.pkts = value.pkts;
p->reset.bytes = value.bytes;
}
}
return 0;
}
bool
sfc_mae_counter_stream_enabled(struct sfc_adapter *sa)
{
if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0 ||
sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
return B_FALSE;
else
return B_TRUE;
}

View File

@ -52,6 +52,9 @@ int sfc_mae_counter_get(struct sfc_mae_counters *counters,
int sfc_mae_counter_start(struct sfc_adapter *sa);
void sfc_mae_counter_stop(struct sfc_adapter *sa);
/* Check whether MAE Counter-on-Queue (CoQ) prerequisites are satisfied */
bool sfc_mae_counter_stream_enabled(struct sfc_adapter *sa);
#ifdef __cplusplus
}
#endif