net/i40e: support tunnel filter to VF

Previously, only tunnel filter to PF is supported.
This patch adds i40e_dev_consistent_tunnel_filter_set
function for consistent filter API to support tunnel
filter to VF.

Signed-off-by: Yong Liu <yong.liu@intel.com>
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
This commit is contained in:
Beilei Xing 2017-03-28 17:28:43 +08:00 committed by Ferruh Yigit
parent 6e9de780e2
commit c50474f31e
3 changed files with 203 additions and 15 deletions

View File

@ -6937,6 +6937,142 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
return ret;
}
int
i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_tunnel_filter_conf *tunnel_filter,
uint8_t add)
{
uint16_t ip_type;
uint32_t ipv4_addr;
uint8_t i, tun_type = 0;
/* internal variable to convert ipv6 byte order */
uint32_t convert_ipv6[4];
int val, ret = 0;
struct i40e_pf_vf *vf = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi;
struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
struct i40e_tunnel_filter *tunnel, *node;
struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
0);
if (cld_filter == NULL) {
PMD_DRV_LOG(ERR, "Failed to alloc memory.");
return -ENOMEM;
}
pfilter = cld_filter;
ether_addr_copy(&tunnel_filter->outer_mac,
(struct ether_addr *)&pfilter->element.outer_mac);
ether_addr_copy(&tunnel_filter->inner_mac,
(struct ether_addr *)&pfilter->element.inner_mac);
pfilter->element.inner_vlan =
rte_cpu_to_le_16(tunnel_filter->inner_vlan);
if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
rte_memcpy(&pfilter->element.ipaddr.v4.data,
&rte_cpu_to_le_32(ipv4_addr),
sizeof(pfilter->element.ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
for (i = 0; i < 4; i++) {
convert_ipv6[i] =
rte_cpu_to_le_32(rte_be_to_cpu_32(
tunnel_filter->ip_addr.ipv6_addr[i]));
}
rte_memcpy(&pfilter->element.ipaddr.v6.data,
&convert_ipv6,
sizeof(pfilter->element.ipaddr.v6.data));
}
/* check tunneled type */
switch (tunnel_filter->tunnel_type) {
case RTE_TUNNEL_TYPE_VXLAN:
tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
break;
case RTE_TUNNEL_TYPE_NVGRE:
tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
break;
case RTE_TUNNEL_TYPE_IP_IN_GRE:
tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
break;
default:
/* Other tunnel types is not supported. */
PMD_DRV_LOG(ERR, "tunnel type is not supported.");
rte_free(cld_filter);
return -EINVAL;
}
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
&pfilter->element.flags);
if (val < 0) {
rte_free(cld_filter);
return -EINVAL;
}
pfilter->element.flags |= rte_cpu_to_le_16(
I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->element.queue_number =
rte_cpu_to_le_16(tunnel_filter->queue_id);
if (!tunnel_filter->is_to_vf)
vsi = pf->main_vsi;
else {
if (tunnel_filter->vf_id >= pf->vf_num) {
PMD_DRV_LOG(ERR, "Invalid argument.");
return -EINVAL;
}
vf = &pf->vfs[tunnel_filter->vf_id];
vsi = vf->vsi;
}
/* Check if there is the filter in SW list */
memset(&check_filter, 0, sizeof(check_filter));
i40e_tunnel_filter_convert(cld_filter, &check_filter);
node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
if (add && node) {
PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
return -EINVAL;
}
if (!add && !node) {
PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
return -EINVAL;
}
if (add) {
ret = i40e_aq_add_cloud_filters(hw,
vsi->seid, &cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
return -ENOTSUP;
}
tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
} else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
&cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
return -ENOTSUP;
}
ret = i40e_sw_tunnel_filter_del(pf, &node->input);
}
rte_free(cld_filter);
return ret;
}
static int
i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
{

View File

@ -515,6 +515,7 @@ struct i40e_tunnel_filter_input {
uint16_t flags; /* Filter type flag */
uint32_t tenant_id; /* Tenant id to match */
uint16_t general_fields[32]; /* Big buffer */
uint16_t vf_id; /* VF id for tunnel filtering. */
};
struct i40e_tunnel_filter {
@ -531,6 +532,33 @@ struct i40e_tunnel_rule {
struct rte_hash *hash_table;
};
/**
* Tunneling Packet filter configuration.
*/
struct i40e_tunnel_filter_conf {
struct ether_addr outer_mac; /**< Outer MAC address to match. */
struct ether_addr inner_mac; /**< Inner MAC address to match. */
uint16_t inner_vlan; /**< Inner VLAN to match. */
uint32_t outer_vlan; /**< Outer VLAN to match */
enum rte_tunnel_iptype ip_type; /**< IP address type. */
/**
* Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP
* is set in filter_type, or inner destination IP address to match
* if ETH_TUNNEL_FILTER_IIP is set in filter_type.
*/
union {
uint32_t ipv4_addr; /**< IPv4 address in big endian. */
uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */
} ip_addr;
/** Flags from ETH_TUNNEL_FILTER_XX - see above. */
uint16_t filter_type;
enum rte_eth_tunnel_type tunnel_type; /**< Tunnel Type. */
uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */
uint16_t queue_id; /**< Queue assigned to if match. */
uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */
uint16_t vf_id; /**< VF id for tunnel filter insertion. */
};
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@ -719,6 +747,7 @@ union i40e_filter_t {
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_fdir_filter fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
};
typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@ -807,6 +836,9 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_tunnel_filter_conf *tunnel_filter,
uint8_t add);
int i40e_fdir_flush(struct rte_eth_dev *dev);
#define I40E_DEV_TO_PCI(eth_dev) \

View File

@ -93,7 +93,7 @@ static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_tunnel_filter_conf *filter);
struct i40e_tunnel_filter_conf *filter);
static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
@ -1127,34 +1127,54 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
}
/* Parse to get the action info of a tunnle filter
* Tunnel action only supports QUEUE.
* Tunnel action only supports PF, VF and QUEUE.
*/
static int
i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_tunnel_filter_conf *filter)
struct i40e_tunnel_filter_conf *filter)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_vf *act_vf;
uint32_t index = 0;
/* Check if the first non-void action is QUEUE. */
/* Check if the first non-void action is PF or VF. */
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
act->type != RTE_FLOW_ACTION_TYPE_VF) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -rte_errno;
}
act_q = (const struct rte_flow_action_queue *)act->conf;
filter->queue_id = act_q->index;
if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
act_vf = (const struct rte_flow_action_vf *)act->conf;
filter->vf_id = act_vf->id;
filter->is_to_vf = 1;
if (filter->vf_id >= pf->vf_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Invalid VF ID for tunnel filter");
return -rte_errno;
}
}
/* Check if the next non-void item is QUEUE */
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
act_q = (const struct rte_flow_action_queue *)act->conf;
filter->queue_id = act_q->index;
if (!filter->is_to_vf)
if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Invalid queue ID for tunnel filter");
return -rte_errno;
return -rte_errno;
}
}
/* Check if the next non-void item is END */
@ -1204,7 +1224,7 @@ static int
i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct rte_eth_tunnel_filter_conf *filter)
struct i40e_tunnel_filter_conf *filter)
{
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec;
@ -1473,8 +1493,8 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
struct rte_flow_error *error,
union i40e_filter_t *filter)
{
struct rte_eth_tunnel_filter_conf *tunnel_filter =
&filter->tunnel_filter;
struct i40e_tunnel_filter_conf *tunnel_filter =
&filter->consistent_tunnel_filter;
int ret;
ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
@ -1605,8 +1625,8 @@ i40e_flow_create(struct rte_eth_dev *dev,
i40e_fdir_filter_list);
break;
case RTE_ETH_FILTER_TUNNEL:
ret = i40e_dev_tunnel_filter_set(pf,
&cons_filter.tunnel_filter, 1);
ret = i40e_dev_consistent_tunnel_filter_set(pf,
&cons_filter.consistent_tunnel_filter, 1);
if (ret)
goto free_flow;
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,