flow_classify: remove void pointer cast

Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Zhiyong Yang 2018-02-26 16:10:59 +08:00 committed by Ferruh Yigit
parent a371fd7903
commit ee6c1f770b
2 changed files with 13 additions and 15 deletions

View File

@ -635,9 +635,7 @@ action_apply(struct rte_flow_classifier *cls,
} }
if (count) { if (count) {
ret = 0; ret = 0;
ntuple_stats = ntuple_stats = stats->stats;
(struct rte_flow_classify_ipv4_5tuple_stats *)
stats->stats;
ntuple_stats->counter1 = count; ntuple_stats->counter1 = count;
ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple; ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
} }

View File

@ -279,7 +279,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
} }
ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; ipv4_mask = item->mask;
/** /**
* Only support src & dst addresses, protocol, * Only support src & dst addresses, protocol,
* others should be masked. * others should be masked.
@ -301,7 +301,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id; filter->proto_mask = ipv4_mask->hdr.next_proto_id;
ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr; filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id; filter->proto = ipv4_spec->hdr.next_proto_id;
@ -339,7 +339,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
} }
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
tcp_mask = (const struct rte_flow_item_tcp *)item->mask; tcp_mask = item->mask;
/** /**
* Only support src & dst ports, tcp flags, * Only support src & dst ports, tcp flags,
@ -373,12 +373,12 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -EINVAL; return -EINVAL;
} }
tcp_spec = (const struct rte_flow_item_tcp *)item->spec; tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port; filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port; filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags; filter->tcp_flags = tcp_spec->hdr.tcp_flags;
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
udp_mask = (const struct rte_flow_item_udp *)item->mask; udp_mask = item->mask;
/** /**
* Only support src & dst ports, * Only support src & dst ports,
@ -397,11 +397,11 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = udp_mask->hdr.dst_port; filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port; filter->src_port_mask = udp_mask->hdr.src_port;
udp_spec = (const struct rte_flow_item_udp *)item->spec; udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port; filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port; filter->src_port = udp_spec->hdr.src_port;
} else { } else {
sctp_mask = (const struct rte_flow_item_sctp *)item->mask; sctp_mask = item->mask;
/** /**
* Only support src & dst ports, * Only support src & dst ports,
@ -420,7 +420,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = sctp_mask->hdr.dst_port; filter->dst_port_mask = sctp_mask->hdr.dst_port;
filter->src_port_mask = sctp_mask->hdr.src_port; filter->src_port_mask = sctp_mask->hdr.src_port;
sctp_spec = (const struct rte_flow_item_sctp *)item->spec; sctp_spec = item->spec;
filter->dst_port = sctp_spec->hdr.dst_port; filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port; filter->src_port = sctp_spec->hdr.src_port;
} }
@ -480,12 +480,12 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
switch (act->type) { switch (act->type) {
case RTE_FLOW_ACTION_TYPE_COUNT: case RTE_FLOW_ACTION_TYPE_COUNT:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT; action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
count = (const struct rte_flow_action_count *)act->conf; count = act->conf;
memcpy(&action.act.counter, count, sizeof(action.act.counter)); memcpy(&action.act.counter, count, sizeof(action.act.counter));
break; break;
case RTE_FLOW_ACTION_TYPE_MARK: case RTE_FLOW_ACTION_TYPE_MARK:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK; action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
mark_spec = (const struct rte_flow_action_mark *)act->conf; mark_spec = act->conf;
memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark)); memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
break; break;
default: default:
@ -502,12 +502,12 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
switch (act->type) { switch (act->type) {
case RTE_FLOW_ACTION_TYPE_COUNT: case RTE_FLOW_ACTION_TYPE_COUNT:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT; action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
count = (const struct rte_flow_action_count *)act->conf; count = act->conf;
memcpy(&action.act.counter, count, sizeof(action.act.counter)); memcpy(&action.act.counter, count, sizeof(action.act.counter));
break; break;
case RTE_FLOW_ACTION_TYPE_MARK: case RTE_FLOW_ACTION_TYPE_MARK:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK; action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
mark_spec = (const struct rte_flow_action_mark *)act->conf; mark_spec = act->conf;
memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark)); memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
break; break;
case RTE_FLOW_ACTION_TYPE_END: case RTE_FLOW_ACTION_TYPE_END: