net/qede: add support for GENEVE tunneling offload

This patch refactors existing VXLAN tunneling offload code and enables
following features for GENEVE:
 - destination UDP port configuration
 - checksum offloads
 - filter configuration

Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
Acked-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
Shahed Shaikh 2017-12-13 22:36:03 -08:00 committed by Ferruh Yigit
parent e60644c442
commit d378cefab8
4 changed files with 357 additions and 193 deletions

View File

@ -15,7 +15,7 @@ static const struct qed_eth_ops *qed_ops;
static int64_t timer_period = 1;
/* VXLAN tunnel classification mapping */
const struct _qede_vxlan_tunn_types {
const struct _qede_udp_tunn_types {
uint16_t rte_filter_type;
enum ecore_filter_ucast_type qede_type;
enum ecore_tunn_clss qede_tunn_clss;
@ -611,49 +611,119 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
ECORE_SPQ_MODE_CB, NULL);
}
static int
qede_tunnel_update(struct qede_dev *qdev,
struct ecore_tunnel_info *tunn_info)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
enum _ecore_status_t rc = ECORE_INVAL;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
int i;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
p_ptt = IS_PF(edev) ? ecore_ptt_acquire(p_hwfn) : NULL;
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
tunn_info, ECORE_SPQ_MODE_CB, NULL);
if (IS_PF(edev))
ecore_ptt_release(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
break;
}
return rc;
}
static int
qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
bool enable, bool mask)
bool enable)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
enum _ecore_status_t rc = ECORE_INVAL;
struct ecore_ptt *p_ptt;
struct ecore_tunnel_info tunn;
struct ecore_hwfn *p_hwfn;
int i;
if (qdev->vxlan.enable == enable)
return ECORE_SUCCESS;
memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
tunn.vxlan.b_update_mode = enable;
tunn.vxlan.b_mode_enabled = mask;
tunn.vxlan.b_update_mode = true;
tunn.vxlan.b_mode_enabled = enable;
tunn.b_update_rx_cls = true;
tunn.b_update_tx_cls = true;
tunn.vxlan.tun_cls = clss;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
if (IS_PF(edev)) {
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
} else {
p_ptt = NULL;
}
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
&tunn, ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Failed to update tunn_clss %u\n",
tunn.vxlan.tun_cls);
if (IS_PF(edev))
ecore_ptt_release(p_hwfn, p_ptt);
break;
}
}
tunn.vxlan_port.b_update_port = true;
tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
rc = qede_tunnel_update(qdev, &tunn);
if (rc == ECORE_SUCCESS) {
qdev->vxlan.enable = enable;
qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
} else {
DP_ERR(edev, "Failed to update tunn_clss %u\n",
tunn.vxlan.tun_cls);
}
return rc;
}
static int
qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
bool enable)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
enum _ecore_status_t rc = ECORE_INVAL;
struct ecore_tunnel_info tunn;
memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
tunn.l2_geneve.b_update_mode = true;
tunn.l2_geneve.b_mode_enabled = enable;
tunn.ip_geneve.b_update_mode = true;
tunn.ip_geneve.b_mode_enabled = enable;
tunn.l2_geneve.tun_cls = clss;
tunn.ip_geneve.tun_cls = clss;
tunn.b_update_rx_cls = true;
tunn.b_update_tx_cls = true;
tunn.geneve_port.b_update_port = true;
tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
rc = qede_tunnel_update(qdev, &tunn);
if (rc == ECORE_SUCCESS) {
qdev->geneve.enable = enable;
qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
enable ? "enabled" : "disabled", qdev->geneve.udp_port);
} else {
DP_ERR(edev, "Failed to update tunn_clss %u\n",
clss);
}
return rc;
}
static int
qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
enum rte_eth_tunnel_type tunn_type, bool enable)
{
int rc = -EINVAL;
switch (tunn_type) {
case RTE_TUNNEL_TYPE_VXLAN:
rc = qede_vxlan_enable(eth_dev, clss, enable);
break;
case RTE_TUNNEL_TYPE_GENEVE:
rc = qede_geneve_enable(eth_dev, clss, enable);
break;
default:
rc = -EINVAL;
break;
}
return rc;
@ -1367,7 +1437,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
@ -1873,6 +1944,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_TUNNEL_VXLAN,
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_TUNNEL_GENEVE,
/* Inner */
RTE_PTYPE_INNER_L2_ETHER,
RTE_PTYPE_INNER_L2_ETHER_VLAN,
@ -2221,74 +2293,36 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
static int
qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *tunnel_udp,
bool add)
qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *tunnel_udp)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_tunnel_info tunn; /* @DPDK */
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
uint16_t udp_port;
int rc, i;
int rc;
PMD_INIT_FUNC_TRACE(edev);
memset(&tunn, 0, sizeof(tunn));
if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
/* Enable VxLAN tunnel if needed before UDP port update using
* default MAC/VLAN classification.
*/
if (add) {
if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
"UDP port %u was already configured\n",
tunnel_udp->udp_port);
return ECORE_SUCCESS;
}
/* Enable VXLAN if it was not enabled while adding
* VXLAN filter.
*/
if (!qdev->vxlan.enable) {
rc = qede_vxlan_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, true, true);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Failed to enable VXLAN "
"prior to updating UDP port\n");
return rc;
}
}
udp_port = tunnel_udp->udp_port;
} else {
if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
tunnel_udp->udp_port);
return ECORE_INVAL;
}
udp_port = 0;
switch (tunnel_udp->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
tunnel_udp->udp_port);
return ECORE_INVAL;
}
udp_port = 0;
tunn.vxlan_port.b_update_port = true;
tunn.vxlan_port.port = udp_port;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
if (IS_PF(edev)) {
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EAGAIN;
} else {
p_ptt = NULL;
}
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
tunn.vxlan_port.port);
if (IS_PF(edev))
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
rc = qede_tunnel_update(qdev, &tunn);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
tunn.vxlan_port.port);
return rc;
}
qdev->vxlan.udp_port = udp_port;
@ -2296,26 +2330,145 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
* VXLAN filters have reached 0 then VxLAN offload can be be
* disabled.
*/
if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
return qede_vxlan_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, false, true);
ECORE_TUNN_CLSS_MAC_VLAN, false);
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
tunnel_udp->udp_port);
return ECORE_INVAL;
}
udp_port = 0;
tunn.geneve_port.b_update_port = true;
tunn.geneve_port.port = udp_port;
rc = qede_tunnel_update(qdev, &tunn);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
tunn.vxlan_port.port);
return rc;
}
qdev->vxlan.udp_port = udp_port;
/* If the request is to delete UDP port and if the number of
* GENEVE filters have reached 0 then GENEVE offload can be be
* disabled.
*/
if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
return qede_geneve_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, false);
break;
default:
return ECORE_INVAL;
}
return 0;
}
static int
qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *tunnel_udp)
{
return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
}
static int
qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *tunnel_udp)
{
return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_tunnel_info tunn; /* @DPDK */
uint16_t udp_port;
int rc;
PMD_INIT_FUNC_TRACE(edev);
memset(&tunn, 0, sizeof(tunn));
switch (tunnel_udp->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
"UDP port %u for VXLAN was already configured\n",
tunnel_udp->udp_port);
return ECORE_SUCCESS;
}
/* Enable VxLAN tunnel with default MAC/VLAN classification if
* it was not enabled while adding VXLAN filter before UDP port
* update.
*/
if (!qdev->vxlan.enable) {
rc = qede_vxlan_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, true);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Failed to enable VXLAN "
"prior to updating UDP port\n");
return rc;
}
}
udp_port = tunnel_udp->udp_port;
tunn.vxlan_port.b_update_port = true;
tunn.vxlan_port.port = udp_port;
rc = qede_tunnel_update(qdev, &tunn);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
udp_port);
return rc;
}
DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
qdev->vxlan.udp_port = udp_port;
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
"UDP port %u for GENEVE was already configured\n",
tunnel_udp->udp_port);
return ECORE_SUCCESS;
}
/* Enable GENEVE tunnel with default MAC/VLAN classification if
* it was not enabled while adding GENEVE filter before UDP port
* update.
*/
if (!qdev->geneve.enable) {
rc = qede_geneve_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, true);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Failed to enable GENEVE "
"prior to updating UDP port\n");
return rc;
}
}
udp_port = tunnel_udp->udp_port;
tunn.geneve_port.b_update_port = true;
tunn.geneve_port.port = udp_port;
rc = qede_tunnel_update(qdev, &tunn);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
udp_port);
return rc;
}
DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
qdev->geneve.udp_port = udp_port;
break;
default:
return ECORE_INVAL;
}
return 0;
}
static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
@ -2382,115 +2535,118 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
return ECORE_SUCCESS;
}
static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op,
const struct rte_eth_tunnel_filter_conf *conf)
static int
_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
const struct rte_eth_tunnel_filter_conf *conf,
__attribute__((unused)) enum rte_filter_op filter_op,
enum ecore_tunn_clss *clss,
bool add)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
enum ecore_filter_ucast_type type;
enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
struct ecore_filter_ucast ucast = {0};
char str[80];
enum ecore_filter_ucast_type type;
uint16_t filter_type = 0;
char str[80];
int rc;
filter_type = conf->filter_type;
/* Determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, clss, str);
if (*clss == MAX_ECORE_TUNN_CLSS) {
DP_ERR(edev, "Unsupported filter type\n");
return -EINVAL;
}
/* Init tunnel ucast params */
rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
conf->filter_type);
return rc;
}
DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
str, filter_op, ucast.type);
ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
/* Skip MAC/VLAN if filter is based on VNI */
if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
rc = qede_mac_int_ops(eth_dev, &ucast, add);
if ((rc == 0) && add) {
/* Enable accept anyvlan */
qede_config_accept_any_vlan(qdev, true);
}
} else {
rc = qede_ucast_filter(eth_dev, &ucast, add);
if (rc == 0)
rc = ecore_filter_ucast_cmd(edev, &ucast,
ECORE_SPQ_MODE_CB, NULL);
}
return rc;
}
static int
qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op,
const struct rte_eth_tunnel_filter_conf *conf)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
bool add;
int rc;
PMD_INIT_FUNC_TRACE(edev);
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
if (IS_VF(edev))
return qede_vxlan_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, true, true);
filter_type = conf->filter_type;
/* Determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
if (clss == MAX_ECORE_TUNN_CLSS) {
DP_ERR(edev, "Unsupported filter type\n");
return -EINVAL;
}
/* Init tunnel ucast params */
rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
conf->filter_type);
return rc;
}
DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
str, filter_op, ucast.type);
ucast.opcode = ECORE_FILTER_ADD;
/* Skip MAC/VLAN if filter is based on VNI */
if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
rc = qede_mac_int_ops(eth_dev, &ucast, 1);
if (rc == 0) {
/* Enable accept anyvlan */
qede_config_accept_any_vlan(qdev, true);
}
} else {
rc = qede_ucast_filter(eth_dev, &ucast, 1);
if (rc == 0)
rc = ecore_filter_ucast_cmd(edev, &ucast,
ECORE_SPQ_MODE_CB, NULL);
}
if (rc != ECORE_SUCCESS)
return rc;
qdev->vxlan.num_filters++;
qdev->vxlan.filter_type = filter_type;
if (!qdev->vxlan.enable)
return qede_vxlan_enable(eth_dev, clss, true, true);
break;
add = true;
break;
case RTE_ETH_FILTER_DELETE:
if (IS_VF(edev))
return qede_vxlan_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, false, true);
filter_type = conf->filter_type;
/* Determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
if (clss == MAX_ECORE_TUNN_CLSS) {
DP_ERR(edev, "Unsupported filter type\n");
return -EINVAL;
}
/* Init tunnel ucast params */
rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
conf->filter_type);
return rc;
}
DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
str, filter_op, ucast.type);
ucast.opcode = ECORE_FILTER_REMOVE;
if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
rc = qede_mac_int_ops(eth_dev, &ucast, 0);
} else {
rc = qede_ucast_filter(eth_dev, &ucast, 0);
if (rc == 0)
rc = ecore_filter_ucast_cmd(edev, &ucast,
ECORE_SPQ_MODE_CB, NULL);
}
if (rc != ECORE_SUCCESS)
return rc;
qdev->vxlan.num_filters--;
/* Disable VXLAN if VXLAN filters become 0 */
if (qdev->vxlan.num_filters == 0)
return qede_vxlan_enable(eth_dev, clss, false, true);
break;
add = false;
break;
default:
DP_ERR(edev, "Unsupported operation %d\n", filter_op);
return -EINVAL;
}
if (IS_VF(edev))
return qede_tunn_enable(eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN,
conf->tunnel_type, add);
rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
if (rc != ECORE_SUCCESS)
return rc;
if (add) {
if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
qdev->vxlan.num_filters++;
qdev->vxlan.filter_type = conf->filter_type;
} else { /* GENEVE */
qdev->geneve.num_filters++;
qdev->geneve.filter_type = conf->filter_type;
}
if (!qdev->vxlan.enable || !qdev->geneve.enable)
return qede_tunn_enable(eth_dev, clss,
conf->tunnel_type,
true);
} else {
if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
qdev->vxlan.num_filters--;
else /*GENEVE*/
qdev->geneve.num_filters--;
/* Disable VXLAN if VXLAN filters become 0 */
if ((qdev->vxlan.num_filters == 0) ||
(qdev->geneve.num_filters == 0))
return qede_tunn_enable(eth_dev, clss,
conf->tunnel_type,
false);
}
return 0;
}
@ -2508,13 +2664,13 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
case RTE_ETH_FILTER_TUNNEL:
switch (filter_conf->tunnel_type) {
case RTE_TUNNEL_TYPE_VXLAN:
case RTE_TUNNEL_TYPE_GENEVE:
DP_INFO(edev,
"Packet steering to the specified Rx queue"
" is not supported with VXLAN tunneling");
return(qede_vxlan_tunn_config(eth_dev, filter_op,
" is not supported with UDP tunneling");
return(qede_tunn_filter_config(eth_dev, filter_op,
filter_conf));
/* Place holders for future tunneling support */
case RTE_TUNNEL_TYPE_GENEVE:
case RTE_TUNNEL_TYPE_TEREDO:
case RTE_TUNNEL_TYPE_NVGRE:
case RTE_TUNNEL_TYPE_IP_IN_GRE:

View File

@ -166,11 +166,14 @@ struct qede_fdir_info {
SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
};
struct qede_vxlan_tunn {
/* IANA assigned default UDP ports for encapsulation protocols */
#define QEDE_VXLAN_DEF_PORT (4789)
#define QEDE_GENEVE_DEF_PORT (6081)
struct qede_udp_tunn {
bool enable;
uint16_t num_filters;
uint16_t filter_type;
#define QEDE_VXLAN_DEF_PORT (4789)
uint16_t udp_port;
};
@ -202,7 +205,8 @@ struct qede_dev {
SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
uint16_t num_uc_addr;
bool handle_hw_err;
struct qede_vxlan_tunn vxlan;
struct qede_udp_tunn vxlan;
struct qede_udp_tunn geneve;
struct qede_fdir_info fdir_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];

View File

@ -1792,7 +1792,9 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
PKT_TX_TUNNEL_VXLAN) ||
((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
PKT_TX_TUNNEL_MPLSINUDP)) {
PKT_TX_TUNNEL_MPLSINUDP) ||
((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
PKT_TX_TUNNEL_GENEVE)) {
/* Check against max which is Tunnel IPv6 + ext */
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))

View File

@ -73,7 +73,8 @@
ETH_RSS_IPV6 |\
ETH_RSS_NONFRAG_IPV6_TCP |\
ETH_RSS_NONFRAG_IPV6_UDP |\
ETH_RSS_VXLAN)
ETH_RSS_VXLAN |\
ETH_RSS_GENEVE)
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
@ -151,6 +152,7 @@
PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_VXLAN | \
PKT_TX_TUNNEL_GENEVE | \
PKT_TX_TUNNEL_MPLSINUDP)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \