numam-dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h
Venkat Duvvuru 675e31d877 net/bnxt: support VXLAN decap offload
VXLAN decap offload can happen in stages. The offload request may
not come as a single flow request rather may come as two flow offload
requests F1 & F2. This patch is adding support for this two stage
offload design. The match criteria for F1 is O_DMAC, O_SMAC,
O_DST_IP, O_UDP_DPORT and actions are COUNT, MARK, JUMP. The match
criteria for F2 is O_SRC_IP, O_DST_IP, VNI and inner header fields.
F1 and F2 flow offload requests can come in any order. If F2 flow
offload request comes first then F2 can’t be offloaded as there is
no O_DMAC information in F2. In this case, F2 will be deferred until
F1 flow offload request arrives. When F1 flow offload request is
received it will have O_DMAC information. Using F1’s O_DMAC, driver
creates an L2 context entry in the hardware as part of offloading F1.
F2 will now use F1’s O_DMAC to get the L2 context id associated with
this O_DMAC and other flow fields that are cached already at the time
of deferring F2 for offloading. F2s that arrive after F1 is offloaded
will be directly programmed and not cached.

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
2020-11-03 23:35:03 +01:00

75 lines
1.9 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2014-2019 Broadcom
* All rights reserved.
*/
#ifndef _BNXT_TF_COMMON_H_
#define _BNXT_TF_COMMON_H_
#include <inttypes.h>
#include "bnxt_ulp.h"
#include "ulp_template_db_enum.h"
#define BNXT_TF_DBG(lvl, fmt, args...) PMD_DRV_LOG(lvl, fmt, ## args)
#define BNXT_ULP_EM_FLOWS 8192
#define BNXT_ULP_1M_FLOWS 1000000
#define BNXT_EEM_RX_GLOBAL_ID_MASK (BNXT_ULP_1M_FLOWS - 1)
#define BNXT_EEM_TX_GLOBAL_ID_MASK (BNXT_ULP_1M_FLOWS - 1)
#define BNXT_EEM_HASH_KEY2_USED 0x8000000
#define BNXT_EEM_RX_HW_HASH_KEY2_BIT BNXT_ULP_1M_FLOWS
#define BNXT_ULP_DFLT_RX_MAX_KEY 512
#define BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY 256
#define BNXT_ULP_DFLT_RX_MEM 0
#define BNXT_ULP_RX_NUM_FLOWS 32
#define BNXT_ULP_RX_TBL_IF_ID 0
#define BNXT_ULP_DFLT_TX_MAX_KEY 512
#define BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY 256
#define BNXT_ULP_DFLT_TX_MEM 0
#define BNXT_ULP_TX_NUM_FLOWS 32
#define BNXT_ULP_TX_TBL_IF_ID 0
enum bnxt_tf_rc {
BNXT_TF_RC_PARSE_ERR = -2,
BNXT_TF_RC_ERROR = -1,
BNXT_TF_RC_SUCCESS = 0,
BNXT_TF_RC_NORMAL = 1,
BNXT_TF_RC_FID = 2,
};
/* eth IPv4 Type */
enum bnxt_ulp_eth_ip_type {
BNXT_ULP_ETH_IPV4 = 4,
BNXT_ULP_ETH_IPV6 = 5,
BNXT_ULP_MAX_ETH_IP_TYPE = 0
};
/* ulp direction Type */
enum bnxt_ulp_direction_type {
BNXT_ULP_DIR_INVALID,
BNXT_ULP_DIR_INGRESS,
BNXT_ULP_DIR_EGRESS,
};
/* enumeration of the interface types */
enum bnxt_ulp_intf_type {
BNXT_ULP_INTF_TYPE_INVALID = 0,
BNXT_ULP_INTF_TYPE_PF,
BNXT_ULP_INTF_TYPE_TRUSTED_VF,
BNXT_ULP_INTF_TYPE_VF,
BNXT_ULP_INTF_TYPE_PF_REP,
BNXT_ULP_INTF_TYPE_VF_REP,
BNXT_ULP_INTF_TYPE_PHY_PORT,
BNXT_ULP_INTF_TYPE_LAST
};
struct bnxt_ulp_mark_tbl *
bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx);
int32_t
bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
struct bnxt_ulp_mark_tbl *mark_tbl);
#endif /* _BNXT_TF_COMMON_H_ */