net/qede: support ntuple and flow director filter
Add limited support for ntuple filter and flow director configuration. The filtering is based on 4-tuples viz src-ip, dst-ip, src-port, dst-port. The mask fields, tcp_flags, flex masks, priority fields, Rx queue drop etc are not supported. Signed-off-by: Harish Patil <harish.patil@qlogic.com>
This commit is contained in:
parent
5343700290
commit
622075356e
@ -34,3 +34,5 @@ Multiprocess aware = Y
|
||||
Linux UIO = Y
|
||||
x86-64 = Y
|
||||
Usage doc = Y
|
||||
N-tuple filter = Y
|
||||
Flow director = Y
|
||||
|
@ -60,6 +60,7 @@ Supported Features
|
||||
- Multiprocess aware
|
||||
- Scatter-Gather
|
||||
- VXLAN tunneling offload
|
||||
- N-tuple filter and flow director (limited support)
|
||||
|
||||
Non-supported Features
|
||||
----------------------
|
||||
|
@ -99,5 +99,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
|
||||
|
||||
include $(RTE_SDK)/mk/rte.lib.mk
|
||||
|
@ -602,6 +602,9 @@ struct ecore_hwfn {
|
||||
|
||||
/* L2-related */
|
||||
struct ecore_l2_info *p_l2_info;
|
||||
|
||||
/* @DPDK */
|
||||
struct ecore_ptt *p_arfs_ptt;
|
||||
};
|
||||
|
||||
#ifndef __EXTRACT__LINUX__
|
||||
|
@ -924,6 +924,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Flow director mode check */
|
||||
rc = qede_check_fdir_support(eth_dev);
|
||||
if (rc) {
|
||||
qdev->ops->vport_stop(edev, 0);
|
||||
qede_dealloc_fp_resc(eth_dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
SLIST_INIT(&qdev->fdir_info.fdir_list_head);
|
||||
|
||||
SLIST_INIT(&qdev->vlan_list_head);
|
||||
|
||||
/* Add primary mac for PF */
|
||||
@ -1124,6 +1133,8 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
|
||||
|
||||
PMD_INIT_FUNC_TRACE(edev);
|
||||
|
||||
qede_fdir_dealloc_resc(eth_dev);
|
||||
|
||||
/* dev_stop() shall cleanup fp resources in hw but without releasing
|
||||
* dma memories and sw structures so that dev_start() can be called
|
||||
* by the app without reconfiguration. However, in dev_close() we
|
||||
@ -1962,11 +1973,13 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
|
||||
}
|
||||
break;
|
||||
case RTE_ETH_FILTER_FDIR:
|
||||
return qede_fdir_filter_conf(eth_dev, filter_op, arg);
|
||||
case RTE_ETH_FILTER_NTUPLE:
|
||||
return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
|
||||
case RTE_ETH_FILTER_MACVLAN:
|
||||
case RTE_ETH_FILTER_ETHERTYPE:
|
||||
case RTE_ETH_FILTER_FLEXIBLE:
|
||||
case RTE_ETH_FILTER_SYN:
|
||||
case RTE_ETH_FILTER_NTUPLE:
|
||||
case RTE_ETH_FILTER_HASH:
|
||||
case RTE_ETH_FILTER_L2_TUNNEL:
|
||||
case RTE_ETH_FILTER_MAX:
|
||||
@ -2057,6 +2070,7 @@ static void qede_update_pf_params(struct ecore_dev *edev)
|
||||
|
||||
memset(&pf_params, 0, sizeof(struct ecore_pf_params));
|
||||
pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
|
||||
pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
|
||||
qed_ops->common->update_pf_params(edev, &pf_params);
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "base/nvm_cfg.h"
|
||||
#include "base/ecore_iov_api.h"
|
||||
#include "base/ecore_sp_commands.h"
|
||||
#include "base/ecore_l2.h"
|
||||
#include "base/ecore_dev_api.h"
|
||||
|
||||
#include "qede_logs.h"
|
||||
#include "qede_if.h"
|
||||
@ -131,6 +133,9 @@ extern char fw_file[];
|
||||
/* Number of PF connections - 32 RX + 32 TX */
|
||||
#define QEDE_PF_NUM_CONNS (64)
|
||||
|
||||
/* Maximum number of flowdir filters */
|
||||
#define QEDE_RFS_MAX_FLTR (256)
|
||||
|
||||
/* Port/function states */
|
||||
enum qede_dev_state {
|
||||
QEDE_DEV_INIT, /* Init the chip and Slowpath */
|
||||
@ -156,6 +161,21 @@ struct qede_ucast_entry {
|
||||
SLIST_ENTRY(qede_ucast_entry) list;
|
||||
};
|
||||
|
||||
struct qede_fdir_entry {
|
||||
uint32_t soft_id; /* unused for now */
|
||||
uint16_t pkt_len; /* actual packet length to match */
|
||||
uint16_t rx_queue; /* queue to be steered to */
|
||||
const struct rte_memzone *mz; /* mz used to hold L2 frame */
|
||||
SLIST_ENTRY(qede_fdir_entry) list;
|
||||
};
|
||||
|
||||
struct qede_fdir_info {
|
||||
struct ecore_arfs_config_params arfs;
|
||||
uint16_t filter_count;
|
||||
SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Structure to store private data for each port.
|
||||
*/
|
||||
@ -190,6 +210,7 @@ struct qede_dev {
|
||||
bool handle_hw_err;
|
||||
uint16_t num_tunn_filters;
|
||||
uint16_t vxlan_filter_type;
|
||||
struct qede_fdir_info fdir_info;
|
||||
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
|
||||
};
|
||||
|
||||
@ -208,6 +229,11 @@ static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf);
|
||||
|
||||
static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags);
|
||||
|
||||
static uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
|
||||
struct rte_eth_fdir_filter *fdir,
|
||||
void *buff,
|
||||
struct ecore_arfs_config_params *param);
|
||||
|
||||
/* Non-static functions */
|
||||
void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf);
|
||||
|
||||
@ -215,4 +241,17 @@ int qed_fill_eth_dev_info(struct ecore_dev *edev,
|
||||
struct qed_dev_eth_info *info);
|
||||
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
|
||||
|
||||
int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
|
||||
enum rte_filter_op op, void *arg);
|
||||
|
||||
int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
|
||||
enum rte_filter_op filter_op, void *arg);
|
||||
|
||||
int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
|
||||
enum rte_filter_op filter_op, void *arg);
|
||||
|
||||
int qede_check_fdir_support(struct rte_eth_dev *eth_dev);
|
||||
|
||||
void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
|
||||
|
||||
#endif /* _QEDE_ETHDEV_H_ */
|
||||
|
487
drivers/net/qede/qede_fdir.c
Normal file
487
drivers/net/qede/qede_fdir.c
Normal file
@ -0,0 +1,487 @@
|
||||
/*
|
||||
* Copyright (c) 2017 QLogic Corporation.
|
||||
* All rights reserved.
|
||||
* www.qlogic.com
|
||||
*
|
||||
* See LICENSE.qede_pmd for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#include <rte_udp.h>
|
||||
#include <rte_tcp.h>
|
||||
#include <rte_sctp.h>
|
||||
#include <rte_errno.h>
|
||||
|
||||
#include "qede_ethdev.h"
|
||||
|
||||
#define IP_VERSION (0x40)
|
||||
#define IP_HDRLEN (0x5)
|
||||
#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
|
||||
#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
|
||||
#define QEDE_FDIR_IPV4_DEF_TTL (64)
|
||||
|
||||
/* Sum of length of header types of L2, L3, L4.
|
||||
* L2 : ether_hdr + vlan_hdr + vxlan_hdr
|
||||
* L3 : ipv6_hdr
|
||||
* L4 : tcp_hdr
|
||||
*/
|
||||
#define QEDE_MAX_FDIR_PKT_LEN (86)
|
||||
|
||||
#ifndef IPV6_ADDR_LEN
|
||||
#define IPV6_ADDR_LEN (16)
|
||||
#endif
|
||||
|
||||
#define QEDE_VALID_FLOW(flow_type) \
|
||||
((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
|
||||
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
|
||||
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
|
||||
(flow_type) == RTE_ETH_FLOW_FRAG_IPV6 || \
|
||||
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
|
||||
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
|
||||
|
||||
/* Note: Flowdir support is only partial.
|
||||
* For ex: drop_queue, FDIR masks, flex_conf are not supported.
|
||||
* Parameters like pballoc/status fields are irrelevant here.
|
||||
*/
|
||||
int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct rte_fdir_conf *fdir = ð_dev->data->dev_conf.fdir_conf;
|
||||
|
||||
/* check FDIR modes */
|
||||
switch (fdir->mode) {
|
||||
case RTE_FDIR_MODE_NONE:
|
||||
qdev->fdir_info.arfs.arfs_enable = false;
|
||||
DP_INFO(edev, "flowdir is disabled\n");
|
||||
break;
|
||||
case RTE_FDIR_MODE_PERFECT:
|
||||
if (edev->num_hwfns > 1) {
|
||||
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
|
||||
qdev->fdir_info.arfs.arfs_enable = false;
|
||||
return -ENOTSUP;
|
||||
}
|
||||
qdev->fdir_info.arfs.arfs_enable = true;
|
||||
DP_INFO(edev, "flowdir is enabled\n");
|
||||
break;
|
||||
case RTE_FDIR_MODE_PERFECT_TUNNEL:
|
||||
case RTE_FDIR_MODE_SIGNATURE:
|
||||
case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
|
||||
DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct qede_fdir_entry *tmp = NULL;
|
||||
struct qede_fdir_entry *fdir;
|
||||
|
||||
SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
|
||||
if (tmp) {
|
||||
if (tmp->mz)
|
||||
rte_memzone_free(tmp->mz);
|
||||
SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
|
||||
qede_fdir_entry, list);
|
||||
rte_free(tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
|
||||
struct rte_eth_fdir_filter *fdir_filter,
|
||||
bool add)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
|
||||
struct qede_fdir_entry *tmp = NULL;
|
||||
struct qede_fdir_entry *fdir;
|
||||
const struct rte_memzone *mz;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
enum _ecore_status_t rc;
|
||||
uint16_t pkt_len;
|
||||
uint16_t len;
|
||||
void *pkt;
|
||||
|
||||
if (add) {
|
||||
if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
|
||||
DP_ERR(edev, "Reached max flowdir filter limit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (!fdir) {
|
||||
DP_ERR(edev, "Did not allocate memory for fdir\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
/* soft_id could have been used as memzone string, but soft_id is
|
||||
* not currently used so it has no significance.
|
||||
*/
|
||||
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
|
||||
(unsigned long)rte_get_timer_cycles());
|
||||
mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
|
||||
SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
|
||||
if (!mz) {
|
||||
DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
|
||||
rte_strerror(rte_errno));
|
||||
rc = -rte_errno;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
pkt = mz->addr;
|
||||
memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
|
||||
pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
|
||||
&qdev->fdir_info.arfs);
|
||||
if (pkt_len == 0) {
|
||||
rc = -EINVAL;
|
||||
goto err2;
|
||||
}
|
||||
DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
|
||||
if (add) {
|
||||
SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
|
||||
if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
|
||||
DP_ERR(edev, "flowdir filter exist\n");
|
||||
rc = -EEXIST;
|
||||
goto err2;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
|
||||
if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
|
||||
break;
|
||||
}
|
||||
if (!tmp) {
|
||||
DP_ERR(edev, "flowdir filter does not exist\n");
|
||||
rc = -EEXIST;
|
||||
goto err2;
|
||||
}
|
||||
}
|
||||
p_hwfn = ECORE_LEADING_HWFN(edev);
|
||||
if (add) {
|
||||
if (!qdev->fdir_info.arfs.arfs_enable) {
|
||||
/* Force update */
|
||||
eth_dev->data->dev_conf.fdir_conf.mode =
|
||||
RTE_FDIR_MODE_PERFECT;
|
||||
qdev->fdir_info.arfs.arfs_enable = true;
|
||||
DP_INFO(edev, "Force enable flowdir in perfect mode\n");
|
||||
}
|
||||
/* Enable ARFS searcher with updated flow_types */
|
||||
ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
|
||||
&qdev->fdir_info.arfs);
|
||||
}
|
||||
/* configure filter with ECORE_SPQ_MODE_EBLOCK */
|
||||
rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
|
||||
(dma_addr_t)mz->phys_addr,
|
||||
pkt_len,
|
||||
fdir_filter->action.rx_queue,
|
||||
0, add);
|
||||
if (rc == ECORE_SUCCESS) {
|
||||
if (add) {
|
||||
fdir->rx_queue = fdir_filter->action.rx_queue;
|
||||
fdir->pkt_len = pkt_len;
|
||||
fdir->mz = mz;
|
||||
SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
|
||||
fdir, list);
|
||||
qdev->fdir_info.filter_count++;
|
||||
DP_INFO(edev, "flowdir filter added, count = %d\n",
|
||||
qdev->fdir_info.filter_count);
|
||||
} else {
|
||||
rte_memzone_free(tmp->mz);
|
||||
SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
|
||||
qede_fdir_entry, list);
|
||||
rte_free(tmp); /* the node deleted */
|
||||
rte_memzone_free(mz); /* temp node allocated */
|
||||
qdev->fdir_info.filter_count--;
|
||||
DP_INFO(edev, "Fdir filter deleted, count = %d\n",
|
||||
qdev->fdir_info.filter_count);
|
||||
}
|
||||
} else {
|
||||
DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
|
||||
rc, qdev->fdir_info.filter_count);
|
||||
}
|
||||
|
||||
/* Disable ARFS searcher if there are no more filters */
|
||||
if (qdev->fdir_info.filter_count == 0) {
|
||||
memset(&qdev->fdir_info.arfs, 0,
|
||||
sizeof(struct ecore_arfs_config_params));
|
||||
DP_INFO(edev, "Disabling flowdir\n");
|
||||
qdev->fdir_info.arfs.arfs_enable = false;
|
||||
ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
|
||||
&qdev->fdir_info.arfs);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
rte_memzone_free(mz);
|
||||
err1:
|
||||
if (add)
|
||||
rte_free(fdir);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
|
||||
struct rte_eth_fdir_filter *fdir,
|
||||
bool add)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
|
||||
if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
|
||||
DP_ERR(edev, "invalid flow_type input\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
|
||||
DP_ERR(edev, "invalid queue number %u\n",
|
||||
fdir->action.rx_queue);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fdir->input.flow_ext.is_vf) {
|
||||
DP_ERR(edev, "flowdir is not supported over VF\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
|
||||
}
|
||||
|
||||
/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
|
||||
static uint16_t
|
||||
qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
|
||||
struct rte_eth_fdir_filter *fdir,
|
||||
void *buff,
|
||||
struct ecore_arfs_config_params *params)
|
||||
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
uint16_t *ether_type;
|
||||
uint8_t *raw_pkt;
|
||||
struct rte_eth_fdir_input *input;
|
||||
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
|
||||
struct ipv4_hdr *ip;
|
||||
struct ipv6_hdr *ip6;
|
||||
struct udp_hdr *udp;
|
||||
struct tcp_hdr *tcp;
|
||||
struct sctp_hdr *sctp;
|
||||
uint8_t size, dst = 0;
|
||||
uint16_t len;
|
||||
static const uint8_t next_proto[] = {
|
||||
[RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
|
||||
[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
|
||||
[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
|
||||
[RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
|
||||
[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
|
||||
[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
|
||||
};
|
||||
raw_pkt = (uint8_t *)buff;
|
||||
input = &fdir->input;
|
||||
DP_INFO(edev, "flow_type %d\n", input->flow_type);
|
||||
|
||||
len = 2 * sizeof(struct ether_addr);
|
||||
raw_pkt += 2 * sizeof(struct ether_addr);
|
||||
if (input->flow_ext.vlan_tci) {
|
||||
DP_INFO(edev, "adding VLAN header\n");
|
||||
rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
|
||||
rte_memcpy(raw_pkt + sizeof(uint16_t),
|
||||
&input->flow_ext.vlan_tci,
|
||||
sizeof(uint16_t));
|
||||
raw_pkt += sizeof(vlan_frame);
|
||||
len += sizeof(vlan_frame);
|
||||
}
|
||||
ether_type = (uint16_t *)raw_pkt;
|
||||
raw_pkt += sizeof(uint16_t);
|
||||
len += sizeof(uint16_t);
|
||||
|
||||
/* fill the common ip header */
|
||||
switch (input->flow_type) {
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
|
||||
case RTE_ETH_FLOW_FRAG_IPV4:
|
||||
ip = (struct ipv4_hdr *)raw_pkt;
|
||||
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
|
||||
ip->total_length = sizeof(struct ipv4_hdr);
|
||||
ip->next_proto_id = input->flow.ip4_flow.proto ?
|
||||
input->flow.ip4_flow.proto :
|
||||
next_proto[input->flow_type];
|
||||
ip->time_to_live = input->flow.ip4_flow.ttl ?
|
||||
input->flow.ip4_flow.ttl :
|
||||
QEDE_FDIR_IPV4_DEF_TTL;
|
||||
ip->type_of_service = input->flow.ip4_flow.tos;
|
||||
ip->dst_addr = input->flow.ip4_flow.dst_ip;
|
||||
ip->src_addr = input->flow.ip4_flow.src_ip;
|
||||
len += sizeof(struct ipv4_hdr);
|
||||
params->ipv4 = true;
|
||||
break;
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
|
||||
case RTE_ETH_FLOW_FRAG_IPV6:
|
||||
ip6 = (struct ipv6_hdr *)raw_pkt;
|
||||
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
ip6->proto = input->flow.ipv6_flow.proto ?
|
||||
input->flow.ipv6_flow.proto :
|
||||
next_proto[input->flow_type];
|
||||
rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
|
||||
IPV6_ADDR_LEN);
|
||||
rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
|
||||
IPV6_ADDR_LEN);
|
||||
len += sizeof(struct ipv6_hdr);
|
||||
break;
|
||||
default:
|
||||
DP_ERR(edev, "Unsupported flow_type %u\n",
|
||||
input->flow_type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* fill the L4 header */
|
||||
raw_pkt = (uint8_t *)buff;
|
||||
switch (input->flow_type) {
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
|
||||
udp = (struct udp_hdr *)(raw_pkt + len);
|
||||
udp->dst_port = input->flow.udp4_flow.dst_port;
|
||||
udp->src_port = input->flow.udp4_flow.src_port;
|
||||
udp->dgram_len = sizeof(struct udp_hdr);
|
||||
len += sizeof(struct udp_hdr);
|
||||
/* adjust ip total_length */
|
||||
ip->total_length += sizeof(struct udp_hdr);
|
||||
params->udp = true;
|
||||
break;
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
|
||||
tcp = (struct tcp_hdr *)(raw_pkt + len);
|
||||
tcp->src_port = input->flow.tcp4_flow.src_port;
|
||||
tcp->dst_port = input->flow.tcp4_flow.dst_port;
|
||||
tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
|
||||
len += sizeof(struct tcp_hdr);
|
||||
/* adjust ip total_length */
|
||||
ip->total_length += sizeof(struct tcp_hdr);
|
||||
params->tcp = true;
|
||||
break;
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
|
||||
tcp = (struct tcp_hdr *)(raw_pkt + len);
|
||||
tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
|
||||
tcp->src_port = input->flow.udp6_flow.src_port;
|
||||
tcp->dst_port = input->flow.udp6_flow.dst_port;
|
||||
/* adjust ip total_length */
|
||||
len += sizeof(struct tcp_hdr);
|
||||
params->tcp = true;
|
||||
break;
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
|
||||
udp = (struct udp_hdr *)(raw_pkt + len);
|
||||
udp->src_port = input->flow.udp6_flow.dst_port;
|
||||
udp->dst_port = input->flow.udp6_flow.src_port;
|
||||
/* adjust ip total_length */
|
||||
len += sizeof(struct udp_hdr);
|
||||
params->udp = true;
|
||||
break;
|
||||
default:
|
||||
DP_ERR(edev, "Unsupported flow_type %d\n", input->flow_type);
|
||||
return 0;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
int
|
||||
qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
|
||||
enum rte_filter_op filter_op,
|
||||
void *arg)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct rte_eth_fdir_filter *fdir;
|
||||
int ret;
|
||||
|
||||
fdir = (struct rte_eth_fdir_filter *)arg;
|
||||
switch (filter_op) {
|
||||
case RTE_ETH_FILTER_NOP:
|
||||
/* Typically used to query flowdir support */
|
||||
if (edev->num_hwfns > 1) {
|
||||
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
return 0; /* means supported */
|
||||
case RTE_ETH_FILTER_ADD:
|
||||
ret = qede_fdir_filter_add(eth_dev, fdir, 1);
|
||||
break;
|
||||
case RTE_ETH_FILTER_DELETE:
|
||||
ret = qede_fdir_filter_add(eth_dev, fdir, 0);
|
||||
break;
|
||||
case RTE_ETH_FILTER_FLUSH:
|
||||
case RTE_ETH_FILTER_UPDATE:
|
||||
case RTE_ETH_FILTER_INFO:
|
||||
return -ENOTSUP;
|
||||
break;
|
||||
default:
|
||||
DP_ERR(edev, "unknown operation %u", filter_op);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
|
||||
enum rte_filter_op filter_op,
|
||||
void *arg)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct rte_eth_ntuple_filter *ntuple;
|
||||
struct rte_eth_fdir_filter fdir_entry;
|
||||
struct rte_eth_tcpv4_flow *tcpv4_flow;
|
||||
struct rte_eth_udpv4_flow *udpv4_flow;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
bool add;
|
||||
|
||||
switch (filter_op) {
|
||||
case RTE_ETH_FILTER_NOP:
|
||||
/* Typically used to query fdir support */
|
||||
if (edev->num_hwfns > 1) {
|
||||
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
|
||||
return -ENOTSUP;
|
||||
}
|
||||
return 0; /* means supported */
|
||||
case RTE_ETH_FILTER_ADD:
|
||||
add = true;
|
||||
break;
|
||||
case RTE_ETH_FILTER_DELETE:
|
||||
add = false;
|
||||
break;
|
||||
case RTE_ETH_FILTER_INFO:
|
||||
case RTE_ETH_FILTER_GET:
|
||||
case RTE_ETH_FILTER_UPDATE:
|
||||
case RTE_ETH_FILTER_FLUSH:
|
||||
case RTE_ETH_FILTER_SET:
|
||||
case RTE_ETH_FILTER_STATS:
|
||||
case RTE_ETH_FILTER_OP_MAX:
|
||||
DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
ntuple = (struct rte_eth_ntuple_filter *)arg;
|
||||
/* Internally convert ntuple to fdir entry */
|
||||
memset(&fdir_entry, 0, sizeof(fdir_entry));
|
||||
if (ntuple->proto == IPPROTO_TCP) {
|
||||
fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
|
||||
tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
|
||||
tcpv4_flow->ip.src_ip = ntuple->src_ip;
|
||||
tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
|
||||
tcpv4_flow->ip.proto = IPPROTO_TCP;
|
||||
tcpv4_flow->src_port = ntuple->src_port;
|
||||
tcpv4_flow->dst_port = ntuple->dst_port;
|
||||
} else {
|
||||
fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
|
||||
udpv4_flow = &fdir_entry.input.flow.udp4_flow;
|
||||
udpv4_flow->ip.src_ip = ntuple->src_ip;
|
||||
udpv4_flow->ip.dst_ip = ntuple->dst_ip;
|
||||
udpv4_flow->ip.proto = IPPROTO_TCP;
|
||||
udpv4_flow->src_port = ntuple->src_port;
|
||||
udpv4_flow->dst_port = ntuple->dst_port;
|
||||
}
|
||||
return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
|
||||
}
|
@ -12,8 +12,6 @@
|
||||
|
||||
#include "qede_ethdev.h"
|
||||
|
||||
static uint8_t npar_tx_switching = 1;
|
||||
|
||||
/* Alarm timeout. */
|
||||
#define QEDE_ALARM_TIMEOUT_US 100000
|
||||
|
||||
@ -224,23 +222,34 @@ static void qed_stop_iov_task(struct ecore_dev *edev)
|
||||
static int qed_slowpath_start(struct ecore_dev *edev,
|
||||
struct qed_slowpath_params *params)
|
||||
{
|
||||
bool allow_npar_tx_switching;
|
||||
const uint8_t *data = NULL;
|
||||
struct ecore_hwfn *hwfn;
|
||||
struct ecore_mcp_drv_version drv_version;
|
||||
struct ecore_hw_init_params hw_init_params;
|
||||
struct qede_dev *qdev = (struct qede_dev *)edev;
|
||||
struct ecore_ptt *p_ptt;
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_ECORE_BINARY_FW
|
||||
if (IS_PF(edev)) {
|
||||
#ifdef CONFIG_ECORE_BINARY_FW
|
||||
rc = qed_load_firmware_data(edev);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
hwfn = ECORE_LEADING_HWFN(edev);
|
||||
if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */
|
||||
p_ptt = ecore_ptt_acquire(hwfn);
|
||||
if (p_ptt) {
|
||||
ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt;
|
||||
} else {
|
||||
DP_ERR(edev, "Failed to acquire PTT for flowdir\n");
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc = qed_nic_setup(edev);
|
||||
if (rc)
|
||||
@ -268,13 +277,11 @@ static int qed_slowpath_start(struct ecore_dev *edev,
|
||||
data = (const uint8_t *)edev->firmware + sizeof(u32);
|
||||
#endif
|
||||
|
||||
allow_npar_tx_switching = npar_tx_switching ? true : false;
|
||||
|
||||
/* Start the slowpath */
|
||||
memset(&hw_init_params, 0, sizeof(hw_init_params));
|
||||
hw_init_params.b_hw_start = true;
|
||||
hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
|
||||
hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching;
|
||||
hw_init_params.allow_npar_tx_switch = true;
|
||||
hw_init_params.bin_fw_data = data;
|
||||
hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
|
||||
hw_init_params.avoid_eng_reset = false;
|
||||
|
Loading…
Reference in New Issue
Block a user