net/iavf: support IPsec inline crypto
Add support for inline crypto for IPsec, for ESP transport and tunnel over IPv4 and IPv6, as well as supporting the offload for ESP over UDP, and in conjunction with TSO for UDP and TCP flows. Implement support for rte_security packet metadata Add definition for IPsec descriptors, extend support for offload in data and context descriptor to support Add support to virtual channel mailbox for IPsec Crypto request operations. IPsec Crypto requests receive an initial acknowledgment from physical function driver of receipt of request and then an asynchronous response with success/failure of request including any response data. Add enhanced descriptor debugging Refactor of scalar tx burst function to support integration of offload Signed-off-by: Declan Doherty <declan.doherty@intel.com> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com> Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
This commit is contained in:
parent
8410842505
commit
6bc987ecb8
@ -27,6 +27,7 @@ L4 checksum offload = P
|
||||
Packet type parsing = Y
|
||||
Rx descriptor status = Y
|
||||
Tx descriptor status = Y
|
||||
Inline crypto = Y
|
||||
Basic stats = Y
|
||||
Multiprocess aware = Y
|
||||
FreeBSD = Y
|
||||
@ -65,3 +66,4 @@ mark = Y
|
||||
passthru = Y
|
||||
queue = Y
|
||||
rss = Y
|
||||
security = Y
|
||||
|
@ -633,3 +633,13 @@ Windows Support
|
||||
|
||||
* To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
|
||||
<https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
|
||||
|
||||
|
||||
Inline IPsec Support
|
||||
--------------------
|
||||
|
||||
* IAVF PMD supports inline crypto processing depending on the underlying
|
||||
hardware crypto capabilities. IPsec Security Gateway Sample Application
|
||||
supports inline IPsec processing for IAVF PMD. For more details see the
|
||||
IPsec Security Gateway Sample Application and Security library
|
||||
documentation.
|
||||
|
@ -163,6 +163,7 @@ New Features
|
||||
* Added Intel iavf support on Windows.
|
||||
* Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
|
||||
* Added PPPoL2TPv2oUDP RSS hash based on inner IP address and TCP/UDP port.
|
||||
* Added Intel iavf inline crypto support.
|
||||
|
||||
* **Updated Intel ice driver.**
|
||||
|
||||
|
@ -221,6 +221,7 @@ struct iavf_info {
|
||||
rte_spinlock_t flow_ops_lock;
|
||||
struct iavf_parser_list rss_parser_list;
|
||||
struct iavf_parser_list dist_parser_list;
|
||||
struct iavf_parser_list ipsec_crypto_parser_list;
|
||||
|
||||
struct iavf_fdir_info fdir; /* flow director info */
|
||||
/* indicate large VF support enabled or not */
|
||||
@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
|
||||
IAVF_PROTO_XTR_IPV6_FLOW,
|
||||
IAVF_PROTO_XTR_TCP,
|
||||
IAVF_PROTO_XTR_IP_OFFSET,
|
||||
IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
|
||||
IAVF_PROTO_XTR_MAX,
|
||||
};
|
||||
|
||||
@ -256,11 +258,14 @@ struct iavf_devargs {
|
||||
uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
|
||||
};
|
||||
|
||||
struct iavf_security_ctx;
|
||||
|
||||
/* Structure to store private data for each VF instance. */
|
||||
struct iavf_adapter {
|
||||
struct iavf_hw hw;
|
||||
struct rte_eth_dev_data *dev_data;
|
||||
struct iavf_info vf;
|
||||
struct iavf_security_ctx *security_ctx;
|
||||
|
||||
bool rx_bulk_alloc_allowed;
|
||||
/* For vector PMD */
|
||||
@ -279,6 +284,8 @@ struct iavf_adapter {
|
||||
(&((struct iavf_adapter *)adapter)->vf)
|
||||
#define IAVF_DEV_PRIVATE_TO_HW(adapter) \
|
||||
(&((struct iavf_adapter *)adapter)->hw)
|
||||
#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
|
||||
(((struct iavf_adapter *)adapter)->security_ctx)
|
||||
|
||||
/* IAVF_VSI_TO */
|
||||
#define IAVF_VSI_TO_HW(vsi) \
|
||||
@ -424,5 +431,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
|
||||
uint16_t size);
|
||||
void iavf_tm_conf_init(struct rte_eth_dev *dev);
|
||||
void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
|
||||
int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
|
||||
uint8_t *msg, size_t msg_len,
|
||||
uint8_t *resp_msg, size_t resp_msg_len);
|
||||
extern const struct rte_tm_ops iavf_tm_ops;
|
||||
#endif /* _IAVF_ETHDEV_H_ */
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "iavf_rxtx.h"
|
||||
#include "iavf_generic_flow.h"
|
||||
#include "rte_pmd_iavf.h"
|
||||
#include "iavf_ipsec_crypto.h"
|
||||
|
||||
/* devargs */
|
||||
#define IAVF_PROTO_XTR_ARG "proto_xtr"
|
||||
@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
|
||||
[IAVF_PROTO_XTR_IP_OFFSET] = {
|
||||
.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
|
||||
.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
|
||||
[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
|
||||
.param = {
|
||||
.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
|
||||
.ol_flag =
|
||||
&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
|
||||
};
|
||||
|
||||
static int iavf_dev_configure(struct rte_eth_dev *dev);
|
||||
@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
|
||||
iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
|
||||
false);
|
||||
|
||||
/* free iAVF security device context all related resources */
|
||||
iavf_security_ctx_destroy(adapter);
|
||||
|
||||
adapter->stopped = 1;
|
||||
dev->data->dev_started = 0;
|
||||
|
||||
@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
|
||||
static int
|
||||
iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
{
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
|
||||
struct iavf_adapter *adapter =
|
||||
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct iavf_info *vf = &adapter->vf;
|
||||
|
||||
dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
|
||||
dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
|
||||
@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
|
||||
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
|
||||
|
||||
if (iavf_ipsec_crypto_supported(adapter)) {
|
||||
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
|
||||
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
|
||||
}
|
||||
|
||||
dev_info->default_rxconf = (struct rte_eth_rxconf) {
|
||||
.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
|
||||
.rx_drop_en = 0,
|
||||
@ -1718,6 +1734,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
|
||||
{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
|
||||
{ "tcp", IAVF_PROTO_XTR_TCP },
|
||||
{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
|
||||
{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
|
||||
};
|
||||
uint32_t i;
|
||||
|
||||
@ -1726,8 +1743,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
|
||||
return xtr_type_map[i].type;
|
||||
}
|
||||
|
||||
PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
|
||||
"it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
|
||||
PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
|
||||
"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
|
||||
|
||||
return -1;
|
||||
}
|
||||
@ -2375,6 +2392,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
goto flow_init_err;
|
||||
}
|
||||
|
||||
/** Check if the IPsec Crypto offload is supported and create
|
||||
* security_ctx if it is.
|
||||
*/
|
||||
if (iavf_ipsec_crypto_supported(adapter)) {
|
||||
/* Initialize security_ctx only for primary process*/
|
||||
ret = iavf_security_ctx_create(adapter);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = iavf_security_init(adapter);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
iavf_default_rss_disable(adapter);
|
||||
|
||||
return 0;
|
||||
|
@ -1766,6 +1766,7 @@ iavf_flow_init(struct iavf_adapter *ad)
|
||||
TAILQ_INIT(&vf->flow_list);
|
||||
TAILQ_INIT(&vf->rss_parser_list);
|
||||
TAILQ_INIT(&vf->dist_parser_list);
|
||||
TAILQ_INIT(&vf->ipsec_crypto_parser_list);
|
||||
rte_spinlock_init(&vf->flow_ops_lock);
|
||||
|
||||
RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
|
||||
@ -1840,6 +1841,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
|
||||
} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
|
||||
list = &vf->dist_parser_list;
|
||||
TAILQ_INSERT_HEAD(list, parser_node, node);
|
||||
} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
|
||||
list = &vf->ipsec_crypto_parser_list;
|
||||
TAILQ_INSERT_HEAD(list, parser_node, node);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2149,6 +2153,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
|
||||
|
||||
*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
|
||||
actions, error);
|
||||
if (*engine)
|
||||
return 0;
|
||||
|
||||
*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
|
||||
pattern, actions, error);
|
||||
if (*engine)
|
||||
return 0;
|
||||
|
||||
if (!*engine) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
@ -2195,6 +2206,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
|
||||
return flow;
|
||||
}
|
||||
|
||||
/* Special case for inline crypto egress flows */
|
||||
if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
|
||||
goto free_flow;
|
||||
|
||||
ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
|
||||
&engine, iavf_parse_engine_create, error);
|
||||
if (ret < 0) {
|
||||
|
@ -464,6 +464,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
|
||||
/* engine types. */
|
||||
enum iavf_flow_engine_type {
|
||||
IAVF_FLOW_ENGINE_NONE = 0,
|
||||
IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
|
||||
IAVF_FLOW_ENGINE_FDIR,
|
||||
IAVF_FLOW_ENGINE_HASH,
|
||||
IAVF_FLOW_ENGINE_MAX,
|
||||
@ -477,6 +478,7 @@ enum iavf_flow_engine_type {
|
||||
*/
|
||||
enum iavf_flow_classification_stage {
|
||||
IAVF_FLOW_STAGE_NONE = 0,
|
||||
IAVF_FLOW_STAGE_IPSEC_CRYPTO,
|
||||
IAVF_FLOW_STAGE_RSS,
|
||||
IAVF_FLOW_STAGE_DISTRIBUTOR,
|
||||
IAVF_FLOW_STAGE_MAX,
|
||||
|
1894
drivers/net/iavf/iavf_ipsec_crypto.c
Normal file
1894
drivers/net/iavf/iavf_ipsec_crypto.c
Normal file
File diff suppressed because it is too large
Load Diff
160
drivers/net/iavf/iavf_ipsec_crypto.h
Normal file
160
drivers/net/iavf/iavf_ipsec_crypto.h
Normal file
@ -0,0 +1,160 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _IAVF_IPSEC_CRYPTO_H_
|
||||
#define _IAVF_IPSEC_CRYPTO_H_
|
||||
|
||||
#include <rte_security.h>
|
||||
|
||||
#include "iavf.h"
|
||||
|
||||
|
||||
|
||||
struct iavf_tx_ipsec_desc {
|
||||
union {
|
||||
struct {
|
||||
__le64 qw0;
|
||||
__le64 qw1;
|
||||
};
|
||||
struct {
|
||||
__le16 l4payload_length;
|
||||
__le32 esn;
|
||||
__le16 trailer_length;
|
||||
u8 type:4;
|
||||
u8 rsv:1;
|
||||
u8 udp:1;
|
||||
u8 ivlen:2;
|
||||
u8 next_header;
|
||||
__le16 ipv6_ext_hdr_length;
|
||||
__le32 said;
|
||||
} __rte_packed;
|
||||
};
|
||||
} __rte_packed;
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT 0
|
||||
#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK (0x3FFFULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT 16
|
||||
#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK (0xFFFFFFFFULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT 48
|
||||
#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK (0x3FULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT 5
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK (0x1ULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT 6
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK (0x3ULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT 8
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK (0xFFULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT 16
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK (0xFFULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
|
||||
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT 32
|
||||
#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK (0xFFFFFULL << \
|
||||
IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
|
||||
|
||||
/* Initialization Vector Length type */
|
||||
enum iavf_ipsec_iv_len {
|
||||
IAVF_IPSEC_IV_LEN_NONE, /* No IV */
|
||||
IAVF_IPSEC_IV_LEN_DW, /* 4B IV */
|
||||
IAVF_IPSEC_IV_LEN_DDW, /* 8B IV */
|
||||
IAVF_IPSEC_IV_LEN_QDW, /* 16B IV */
|
||||
};
|
||||
|
||||
|
||||
/* IPsec Crypto Packet Metaday offload flags */
|
||||
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN (0x1 << 0)
|
||||
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN (0x1 << 1)
|
||||
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS (0x1 << 2)
|
||||
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT (0x1 << 3)
|
||||
|
||||
/**
|
||||
* Packet metadata data structure used to hold parameters required by the iAVF
|
||||
* transmit data path. Parameters set for session by calling
|
||||
* rte_security_set_pkt_metadata() API.
|
||||
*/
|
||||
struct iavf_ipsec_crypto_pkt_metadata {
|
||||
uint32_t sa_idx; /* SA hardware index (20b/4B) */
|
||||
|
||||
uint8_t ol_flags; /* flags (1B) */
|
||||
uint8_t len_iv; /* IV length (2b/1B) */
|
||||
uint8_t ctx_desc_ipsec_params; /* IPsec params for ctx desc (7b/1B) */
|
||||
uint8_t esp_trailer_len; /* ESP trailer length (6b/1B) */
|
||||
|
||||
uint16_t l4_payload_len; /* L4 payload length */
|
||||
uint8_t ipv6_ext_hdrs_len; /* IPv6 extender headers len (5b/1B) */
|
||||
uint8_t next_proto; /* Next Protocol (8b/1B) */
|
||||
|
||||
uint32_t esn; /* Extended Sequence Number (32b/4B) */
|
||||
} __rte_packed;
|
||||
|
||||
/**
|
||||
* Inline IPsec Crypto offload is supported
|
||||
*/
|
||||
int
|
||||
iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
|
||||
|
||||
/**
|
||||
* Create security context
|
||||
*/
|
||||
int iavf_security_ctx_create(struct iavf_adapter *adapter);
|
||||
|
||||
/**
|
||||
* Create security context
|
||||
*/
|
||||
int iavf_security_init(struct iavf_adapter *adapter);
|
||||
|
||||
/**
|
||||
* Set security capabilities
|
||||
*/
|
||||
int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
|
||||
*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
|
||||
|
||||
|
||||
int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
|
||||
|
||||
/**
|
||||
* Destroy security context
|
||||
*/
|
||||
int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
|
||||
|
||||
/**
|
||||
* Verify that the inline IPsec Crypto action is valid for this device
|
||||
*/
|
||||
uint32_t
|
||||
iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
|
||||
const struct rte_security_session *session, uint32_t spi);
|
||||
|
||||
/**
|
||||
* Add inbound security policy rule to hardware
|
||||
*/
|
||||
int
|
||||
iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
|
||||
uint32_t esp_spi,
|
||||
uint8_t is_v4,
|
||||
rte_be32_t v4_dst_addr,
|
||||
uint8_t *v6_dst_addr,
|
||||
uint8_t drop);
|
||||
|
||||
/**
|
||||
* Delete inbound security policy rule from hardware
|
||||
*/
|
||||
int
|
||||
iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
|
||||
uint8_t is_v4, uint32_t flow_id);
|
||||
|
||||
int
|
||||
iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
|
||||
|
||||
#endif /* _IAVF_IPSEC_CRYPTO_H_ */
|
383
drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
Normal file
383
drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
Normal file
@ -0,0 +1,383 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
|
||||
#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
|
||||
|
||||
static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
|
||||
{ /* SHA1 HMAC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
|
||||
.block_size = 64,
|
||||
.key_size = {
|
||||
.min = 1,
|
||||
.max = 64,
|
||||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 20,
|
||||
.max = 20,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* SHA256 HMAC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
|
||||
.block_size = 64,
|
||||
.key_size = {
|
||||
.min = 1,
|
||||
.max = 64,
|
||||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 32,
|
||||
.max = 32,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* SHA384 HMAC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
|
||||
.block_size = 128,
|
||||
.key_size = {
|
||||
.min = 1,
|
||||
.max = 128,
|
||||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 48,
|
||||
.max = 48,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* SHA512 HMAC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
|
||||
.block_size = 128,
|
||||
.key_size = {
|
||||
.min = 1,
|
||||
.max = 128,
|
||||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 64,
|
||||
.max = 64,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* MD5 HMAC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
|
||||
.block_size = 64,
|
||||
.key_size = {
|
||||
.min = 1,
|
||||
.max = 64,
|
||||
.increment = 1
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 16,
|
||||
.max = 16,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* AES XCBC MAC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 16,
|
||||
.max = 16,
|
||||
.increment = 0
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 16,
|
||||
.max = 16,
|
||||
.increment = 0
|
||||
},
|
||||
.aad_size = { 0 },
|
||||
.iv_size = { 0 }
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* AES GCM */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
|
||||
{.aead = {
|
||||
.algo = RTE_CRYPTO_AEAD_AES_GCM,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 16,
|
||||
.max = 32,
|
||||
.increment = 8
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 8,
|
||||
.max = 16,
|
||||
.increment = 4
|
||||
},
|
||||
.aad_size = {
|
||||
.min = 0,
|
||||
.max = 240,
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 8,
|
||||
.max = 8,
|
||||
.increment = 0
|
||||
},
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* ChaCha20-Poly1305 */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
|
||||
{.aead = {
|
||||
.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 32,
|
||||
.max = 32,
|
||||
.increment = 0
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 8,
|
||||
.max = 16,
|
||||
.increment = 4
|
||||
},
|
||||
.aad_size = {
|
||||
.min = 0,
|
||||
.max = 240,
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 12,
|
||||
.max = 12,
|
||||
.increment = 0
|
||||
},
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* AES CCM */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
|
||||
{.aead = {
|
||||
.algo = RTE_CRYPTO_AEAD_AES_CCM,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 16,
|
||||
.max = 32,
|
||||
.increment = 8
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 8,
|
||||
.max = 16,
|
||||
.increment = 4
|
||||
},
|
||||
.aad_size = {
|
||||
.min = 0,
|
||||
.max = 240,
|
||||
.increment = 1
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 12,
|
||||
.max = 12,
|
||||
.increment = 0
|
||||
},
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* AES GMAC (AUTH) */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_AES_GMAC,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 16,
|
||||
.max = 32,
|
||||
.increment = 8
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 8,
|
||||
.max = 16,
|
||||
.increment = 4
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 12,
|
||||
.max = 12,
|
||||
.increment = 0
|
||||
}
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* AES CMAC (AUTH) */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_AES_CMAC,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 16,
|
||||
.max = 32,
|
||||
.increment = 8
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 8,
|
||||
.max = 16,
|
||||
.increment = 4
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 12,
|
||||
.max = 12,
|
||||
.increment = 0
|
||||
}
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* AES CBC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
|
||||
{.cipher = {
|
||||
.algo = RTE_CRYPTO_CIPHER_AES_CBC,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 16,
|
||||
.max = 32,
|
||||
.increment = 8
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 16,
|
||||
.max = 16,
|
||||
.increment = 0
|
||||
}
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* AES CTR */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
|
||||
{.cipher = {
|
||||
.algo = RTE_CRYPTO_CIPHER_AES_CTR,
|
||||
.block_size = 16,
|
||||
.key_size = {
|
||||
.min = 16,
|
||||
.max = 32,
|
||||
.increment = 8
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 8,
|
||||
.max = 8,
|
||||
.increment = 0
|
||||
}
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{ /* NULL (AUTH) */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
|
||||
{.auth = {
|
||||
.algo = RTE_CRYPTO_AUTH_NULL,
|
||||
.block_size = 1,
|
||||
.key_size = {
|
||||
.min = 0,
|
||||
.max = 0,
|
||||
.increment = 0
|
||||
},
|
||||
.digest_size = {
|
||||
.min = 0,
|
||||
.max = 0,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = { 0 }
|
||||
}, },
|
||||
}, },
|
||||
},
|
||||
{ /* NULL (CIPHER) */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
|
||||
{.cipher = {
|
||||
.algo = RTE_CRYPTO_CIPHER_NULL,
|
||||
.block_size = 1,
|
||||
.key_size = {
|
||||
.min = 0,
|
||||
.max = 0,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 0,
|
||||
.max = 0,
|
||||
.increment = 0
|
||||
}
|
||||
}, },
|
||||
}, }
|
||||
},
|
||||
{ /* 3DES CBC */
|
||||
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
|
||||
{.sym = {
|
||||
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
|
||||
{.cipher = {
|
||||
.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
|
||||
.block_size = 8,
|
||||
.key_size = {
|
||||
.min = 24,
|
||||
.max = 24,
|
||||
.increment = 0
|
||||
},
|
||||
.iv_size = {
|
||||
.min = 8,
|
||||
.max = 8,
|
||||
.increment = 0
|
||||
}
|
||||
}, }
|
||||
}, }
|
||||
},
|
||||
{
|
||||
.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "iavf.h"
|
||||
#include "iavf_rxtx.h"
|
||||
#include "iavf_ipsec_crypto.h"
|
||||
#include "rte_pmd_iavf.h"
|
||||
|
||||
/* Offset of mbuf dynamic field for protocol extraction's metadata */
|
||||
@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
|
||||
uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
|
||||
uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
|
||||
uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
|
||||
uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
|
||||
|
||||
uint8_t
|
||||
iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
|
||||
@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
|
||||
[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
|
||||
[IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
|
||||
[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
|
||||
[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
|
||||
IAVF_RXDID_COMMS_IPSEC_CRYPTO,
|
||||
};
|
||||
|
||||
return flex_type < RTE_DIM(rxdid_map) ?
|
||||
@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
|
||||
rxq->rxd_to_pkt_fields =
|
||||
iavf_rxd_to_pkt_fields_by_comms_aux_v2;
|
||||
break;
|
||||
case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
|
||||
rxq->xtr_ol_flag =
|
||||
rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
|
||||
rxq->rxd_to_pkt_fields =
|
||||
iavf_rxd_to_pkt_fields_by_comms_aux_v2;
|
||||
break;
|
||||
case IAVF_RXDID_COMMS_OVS_1:
|
||||
rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
|
||||
break;
|
||||
@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
const struct rte_eth_txconf *tx_conf)
|
||||
{
|
||||
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct iavf_adapter *adapter =
|
||||
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct iavf_info *vf =
|
||||
IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
|
||||
struct iavf_tx_queue *txq;
|
||||
@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
|
||||
if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
|
||||
struct virtchnl_vlan_supported_caps *insertion_support =
|
||||
&vf->vlan_v2_caps.offloads.insertion_support;
|
||||
&adapter->vf.vlan_v2_caps.offloads.insertion_support;
|
||||
uint32_t insertion_cap;
|
||||
|
||||
if (insertion_support->outer)
|
||||
@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
txq->offloads = offloads;
|
||||
txq->tx_deferred_start = tx_conf->tx_deferred_start;
|
||||
|
||||
if (iavf_ipsec_crypto_supported(adapter))
|
||||
txq->ipsec_crypto_pkt_md_offset =
|
||||
iavf_security_get_pkt_md_offset(adapter);
|
||||
|
||||
/* Allocate software ring */
|
||||
txq->sw_ring =
|
||||
rte_zmalloc_socket("iavf tx sw ring",
|
||||
@ -1084,6 +1100,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
|
||||
volatile union iavf_rx_flex_desc *rxdp)
|
||||
{
|
||||
volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
|
||||
(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
|
||||
|
||||
mb->dynfield1[0] = desc->ipsec_said &
|
||||
IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
|
||||
volatile union iavf_rx_flex_desc *rxdp,
|
||||
struct iavf_ipsec_crypto_stats *stats)
|
||||
{
|
||||
uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
|
||||
|
||||
if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
|
||||
uint16_t ipsec_status;
|
||||
|
||||
mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
|
||||
|
||||
ipsec_status = status1 &
|
||||
IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
|
||||
|
||||
|
||||
if (unlikely(ipsec_status !=
|
||||
IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
|
||||
mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
|
||||
|
||||
switch (ipsec_status) {
|
||||
case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
|
||||
stats->ierrors.sad_miss++;
|
||||
break;
|
||||
case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
|
||||
stats->ierrors.not_processed++;
|
||||
break;
|
||||
case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
|
||||
stats->ierrors.icv_check++;
|
||||
break;
|
||||
case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
|
||||
stats->ierrors.ipsec_length++;
|
||||
break;
|
||||
case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
|
||||
stats->ierrors.misc++;
|
||||
break;
|
||||
}
|
||||
|
||||
stats->ierrors.count++;
|
||||
return;
|
||||
}
|
||||
|
||||
stats->icount++;
|
||||
stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
|
||||
|
||||
if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
|
||||
ipsec_status !=
|
||||
IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
|
||||
iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Translate the rx descriptor status and error fields to pkt flags */
|
||||
static inline uint64_t
|
||||
iavf_rxd_to_pkt_flags(uint64_t qword)
|
||||
@ -1402,6 +1482,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
|
||||
rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
|
||||
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
|
||||
iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
|
||||
iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
|
||||
&rxq->stats.ipsec_crypto);
|
||||
rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
|
||||
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
|
||||
rxm->ol_flags |= pkt_flags;
|
||||
@ -1544,6 +1626,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
|
||||
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
|
||||
iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
|
||||
iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
|
||||
&rxq->stats.ipsec_crypto);
|
||||
rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
|
||||
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
|
||||
|
||||
@ -1782,6 +1866,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
|
||||
mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
|
||||
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
|
||||
iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
|
||||
iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
|
||||
&rxq->stats.ipsec_crypto);
|
||||
rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
|
||||
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
|
||||
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
|
||||
@ -2094,6 +2180,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
|
||||
*field |= cmd;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
|
||||
struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
|
||||
{
|
||||
uint64_t ipsec_field =
|
||||
(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
|
||||
IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
|
||||
|
||||
*field |= ipsec_field;
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
|
||||
const struct rte_mbuf *m)
|
||||
@ -2127,15 +2225,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
|
||||
|
||||
static inline uint16_t
|
||||
iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
|
||||
struct rte_mbuf *m)
|
||||
struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
|
||||
{
|
||||
uint64_t segmentation_field = 0;
|
||||
uint64_t total_length = 0;
|
||||
|
||||
total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
|
||||
if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
|
||||
total_length = ipsec_md->l4_payload_len;
|
||||
} else {
|
||||
total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
|
||||
|
||||
if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
|
||||
total_length -= m->outer_l3_len;
|
||||
if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
|
||||
total_length -= m->outer_l3_len;
|
||||
}
|
||||
|
||||
#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
|
||||
if (!m->l4_len || !m->tso_segsz)
|
||||
@ -2164,7 +2266,8 @@ struct iavf_tx_context_desc_qws {
|
||||
|
||||
static inline void
|
||||
iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
|
||||
struct rte_mbuf *m, uint16_t *tlen)
|
||||
struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
|
||||
uint16_t *tlen)
|
||||
{
|
||||
volatile struct iavf_tx_context_desc_qws *desc_qws =
|
||||
(volatile struct iavf_tx_context_desc_qws *)desc;
|
||||
@ -2176,8 +2279,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
|
||||
|
||||
/* fill segmentation field */
|
||||
if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
|
||||
/* fill IPsec field */
|
||||
if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
|
||||
iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
|
||||
ipsec_md);
|
||||
|
||||
*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
|
||||
m);
|
||||
m, ipsec_md);
|
||||
}
|
||||
|
||||
/* fill tunnelling field */
|
||||
@ -2191,6 +2299,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
|
||||
const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
|
||||
{
|
||||
desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
|
||||
IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
|
||||
((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
|
||||
((uint64_t)md->esp_trailer_len <<
|
||||
IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
|
||||
|
||||
desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
|
||||
IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
|
||||
((uint64_t)md->next_proto <<
|
||||
IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
|
||||
((uint64_t)(md->len_iv & 0x3) <<
|
||||
IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
|
||||
((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
|
||||
1ULL : 0ULL) <<
|
||||
IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
|
||||
(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
|
||||
|
||||
/**
|
||||
* TODO: Pre-calculate this in the Session initialization
|
||||
*
|
||||
* Calculate IPsec length required in data descriptor func when TSO
|
||||
* offload is enabled
|
||||
*/
|
||||
*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
|
||||
(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
|
||||
sizeof(struct rte_udp_hdr) : 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
|
||||
struct rte_mbuf *m)
|
||||
@ -2296,6 +2436,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
|
||||
}
|
||||
|
||||
|
||||
static struct iavf_ipsec_crypto_pkt_metadata *
|
||||
iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
|
||||
struct rte_mbuf *m)
|
||||
{
|
||||
if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
|
||||
return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
|
||||
struct iavf_ipsec_crypto_pkt_metadata *);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* TX function */
|
||||
uint16_t
|
||||
iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
@ -2324,7 +2475,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
|
||||
for (idx = 0; idx < nb_pkts; idx++) {
|
||||
volatile struct iavf_tx_desc *ddesc;
|
||||
uint16_t nb_desc_ctx;
|
||||
struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
|
||||
|
||||
uint16_t nb_desc_ctx, nb_desc_ipsec;
|
||||
uint16_t nb_desc_data, nb_desc_required;
|
||||
uint16_t tlen = 0, ipseclen = 0;
|
||||
uint64_t ddesc_template = 0;
|
||||
@ -2334,17 +2487,24 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
|
||||
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
|
||||
|
||||
/**
|
||||
* Get metadata for ipsec crypto from mbuf dynamic fields if
|
||||
* security offload is specified.
|
||||
*/
|
||||
ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
|
||||
|
||||
nb_desc_data = mb->nb_segs;
|
||||
nb_desc_ctx = !!(mb->ol_flags &
|
||||
(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
|
||||
RTE_MBUF_F_TX_TUNNEL_MASK));
|
||||
nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
|
||||
|
||||
/**
|
||||
* The number of descriptors that must be allocated for
|
||||
* a packet equals to the number of the segments of that
|
||||
* packet plus the context and ipsec descriptors if needed.
|
||||
*/
|
||||
nb_desc_required = nb_desc_data + nb_desc_ctx;
|
||||
nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
|
||||
|
||||
desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
|
||||
|
||||
@ -2395,7 +2555,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
txe->mbuf = NULL;
|
||||
}
|
||||
|
||||
iavf_fill_context_desc(ctx_desc, mb, &tlen);
|
||||
iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
|
||||
IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
|
||||
|
||||
txe->last_id = desc_idx_last;
|
||||
@ -2403,7 +2563,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
txe = txn;
|
||||
}
|
||||
|
||||
if (nb_desc_ipsec) {
|
||||
volatile struct iavf_tx_ipsec_desc *ipsec_desc =
|
||||
(volatile struct iavf_tx_ipsec_desc *)
|
||||
&txr[desc_idx];
|
||||
|
||||
txn = &txe_ring[txe->next_id];
|
||||
RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
|
||||
|
||||
if (txe->mbuf) {
|
||||
rte_pktmbuf_free_seg(txe->mbuf);
|
||||
txe->mbuf = NULL;
|
||||
}
|
||||
|
||||
iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
|
||||
|
||||
IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
|
||||
|
||||
txe->last_id = desc_idx_last;
|
||||
desc_idx = txe->next_id;
|
||||
txe = txn;
|
||||
}
|
||||
|
||||
mb_seg = mb;
|
||||
|
||||
|
@ -25,7 +25,8 @@
|
||||
|
||||
#define IAVF_TX_NO_VECTOR_FLAGS ( \
|
||||
RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
|
||||
RTE_ETH_TX_OFFLOAD_TCP_TSO)
|
||||
RTE_ETH_TX_OFFLOAD_TCP_TSO | \
|
||||
RTE_ETH_TX_OFFLOAD_SECURITY)
|
||||
|
||||
#define IAVF_TX_VECTOR_OFFLOAD ( \
|
||||
RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
|
||||
@ -47,23 +48,26 @@
|
||||
#define DEFAULT_TX_RS_THRESH 32
|
||||
#define DEFAULT_TX_FREE_THRESH 32
|
||||
|
||||
#define IAVF_MIN_TSO_MSS 88
|
||||
#define IAVF_MIN_TSO_MSS 256
|
||||
#define IAVF_MAX_TSO_MSS 9668
|
||||
#define IAVF_TSO_MAX_SEG UINT8_MAX
|
||||
#define IAVF_TX_MAX_MTU_SEG 8
|
||||
|
||||
#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
|
||||
#define IAVF_TX_CKSUM_OFFLOAD_MASK ( \
|
||||
RTE_MBUF_F_TX_IP_CKSUM | \
|
||||
RTE_MBUF_F_TX_L4_MASK | \
|
||||
RTE_MBUF_F_TX_TCP_SEG)
|
||||
|
||||
#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
|
||||
#define IAVF_TX_OFFLOAD_MASK ( \
|
||||
RTE_MBUF_F_TX_OUTER_IPV6 | \
|
||||
RTE_MBUF_F_TX_OUTER_IPV4 | \
|
||||
RTE_MBUF_F_TX_IPV6 | \
|
||||
RTE_MBUF_F_TX_IPV4 | \
|
||||
RTE_MBUF_F_TX_VLAN | \
|
||||
RTE_MBUF_F_TX_IP_CKSUM | \
|
||||
RTE_MBUF_F_TX_L4_MASK | \
|
||||
RTE_MBUF_F_TX_TCP_SEG)
|
||||
RTE_MBUF_F_TX_TCP_SEG | \
|
||||
RTE_ETH_TX_OFFLOAD_SECURITY)
|
||||
|
||||
#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
|
||||
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
|
||||
@ -161,6 +165,24 @@ struct iavf_txq_ops {
|
||||
void (*release_mbufs)(struct iavf_tx_queue *txq);
|
||||
};
|
||||
|
||||
struct iavf_ipsec_crypto_stats {
|
||||
uint64_t icount;
|
||||
uint64_t ibytes;
|
||||
struct {
|
||||
uint64_t count;
|
||||
uint64_t sad_miss;
|
||||
uint64_t not_processed;
|
||||
uint64_t icv_check;
|
||||
uint64_t ipsec_length;
|
||||
uint64_t misc;
|
||||
} ierrors;
|
||||
};
|
||||
|
||||
struct iavf_rx_queue_stats {
|
||||
uint64_t reserved;
|
||||
struct iavf_ipsec_crypto_stats ipsec_crypto;
|
||||
};
|
||||
|
||||
/* Structure associated with each Rx queue. */
|
||||
struct iavf_rx_queue {
|
||||
struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
|
||||
@ -209,6 +231,7 @@ struct iavf_rx_queue {
|
||||
/* flexible descriptor metadata extraction offload flag */
|
||||
iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
|
||||
/* handle flexible descriptor by RXDID */
|
||||
struct iavf_rx_queue_stats stats;
|
||||
uint64_t offloads;
|
||||
};
|
||||
|
||||
@ -243,6 +266,7 @@ struct iavf_tx_queue {
|
||||
uint64_t offloads;
|
||||
uint16_t next_dd; /* next to set RS, for VPMD */
|
||||
uint16_t next_rs; /* next to check DD, for VPMD */
|
||||
uint16_t ipsec_crypto_pkt_md_offset;
|
||||
|
||||
bool q_set; /* if rx queue has been configured */
|
||||
bool tx_deferred_start; /* don't start this queue in dev start */
|
||||
@ -345,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
|
||||
} flex_ts;
|
||||
};
|
||||
|
||||
/* Rx Flex Descriptor
|
||||
* RxDID Profile ID 24 Inline IPsec
|
||||
* Flex-field 0: RSS hash lower 16-bits
|
||||
* Flex-field 1: RSS hash upper 16-bits
|
||||
* Flex-field 2: Flow ID lower 16-bits
|
||||
* Flex-field 3: Flow ID upper 16-bits
|
||||
* Flex-field 4: Inline IPsec SAID lower 16-bits
|
||||
* Flex-field 5: Inline IPsec SAID upper 16-bits
|
||||
*/
|
||||
struct iavf_32b_rx_flex_desc_comms_ipsec {
|
||||
/* Qword 0 */
|
||||
u8 rxdid;
|
||||
u8 mir_id_umb_cast;
|
||||
__le16 ptype_flexi_flags0;
|
||||
__le16 pkt_len;
|
||||
__le16 hdr_len_sph_flex_flags1;
|
||||
|
||||
/* Qword 1 */
|
||||
__le16 status_error0;
|
||||
__le16 l2tag1;
|
||||
__le32 rss_hash;
|
||||
|
||||
/* Qword 2 */
|
||||
__le16 status_error1;
|
||||
u8 flexi_flags2;
|
||||
u8 ts_low;
|
||||
__le16 l2tag2_1st;
|
||||
__le16 l2tag2_2nd;
|
||||
|
||||
/* Qword 3 */
|
||||
__le32 flow_id;
|
||||
__le32 ipsec_said;
|
||||
};
|
||||
|
||||
/* Receive Flex Descriptor profile IDs: There are a total
|
||||
* of 64 profiles where profile IDs 0/1 are for legacy; and
|
||||
* profiles 2-63 are flex profiles that can be programmed
|
||||
@ -364,6 +422,7 @@ enum iavf_rxdid {
|
||||
IAVF_RXDID_COMMS_AUX_TCP = 21,
|
||||
IAVF_RXDID_COMMS_OVS_1 = 22,
|
||||
IAVF_RXDID_COMMS_OVS_2 = 23,
|
||||
IAVF_RXDID_COMMS_IPSEC_CRYPTO = 24,
|
||||
IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25,
|
||||
IAVF_RXDID_LAST = 63,
|
||||
};
|
||||
@ -391,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
|
||||
|
||||
enum iavf_rx_flex_desc_status_error_1_bits {
|
||||
/* Note: These are predefined bit offsets */
|
||||
IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
|
||||
IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
|
||||
IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
|
||||
/* Bits 3:0 are reserved for inline ipsec status */
|
||||
IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
|
||||
IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
|
||||
IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
|
||||
IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
|
||||
IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
|
||||
IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
|
||||
/* [10:6] reserved */
|
||||
IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
|
||||
IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
|
||||
@ -403,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
|
||||
IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
|
||||
};
|
||||
|
||||
#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK ( \
|
||||
BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) | \
|
||||
BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) | \
|
||||
BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) | \
|
||||
BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
|
||||
|
||||
enum iavf_rx_flex_desc_ipsec_crypto_status {
|
||||
IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
|
||||
IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
|
||||
IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
|
||||
IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
|
||||
IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
|
||||
/* Reserved */
|
||||
IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
|
||||
};
|
||||
|
||||
|
||||
|
||||
#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT (0)
|
||||
#define IAVF_TXD_DATA_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
|
||||
@ -670,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
|
||||
case IAVF_TX_DESC_DTYPE_CONTEXT:
|
||||
name = "Tx_context_desc";
|
||||
break;
|
||||
case IAVF_TX_DESC_DTYPE_IPSEC:
|
||||
name = "Tx_IPsec_desc";
|
||||
break;
|
||||
default:
|
||||
name = "unknown_desc";
|
||||
break;
|
||||
|
@ -1776,3 +1776,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int
|
||||
iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
|
||||
uint8_t *msg, size_t msg_len,
|
||||
uint8_t *resp_msg, size_t resp_msg_len)
|
||||
{
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
|
||||
struct iavf_cmd_info args;
|
||||
int err;
|
||||
|
||||
args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
|
||||
args.in_args = msg;
|
||||
args.in_args_size = msg_len;
|
||||
args.out_buffer = vf->aq_resp;
|
||||
args.out_size = IAVF_AQ_BUF_SZ;
|
||||
|
||||
err = iavf_execute_vf_cmd(adapter, &args, 1);
|
||||
if (err) {
|
||||
PMD_DRV_LOG(ERR, "fail to execute command %s",
|
||||
"OP_INLINE_IPSEC_CRYPTO");
|
||||
return err;
|
||||
}
|
||||
|
||||
memcpy(resp_msg, args.out_buffer, resp_msg_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
cflags += ['-Wno-strict-aliasing']
|
||||
|
||||
includes += include_directories('../../common/iavf')
|
||||
deps += ['common_iavf']
|
||||
deps += ['common_iavf', 'security', 'cryptodev']
|
||||
|
||||
sources = files(
|
||||
'iavf_ethdev.c',
|
||||
@ -15,6 +15,7 @@ sources = files(
|
||||
'iavf_fdir.c',
|
||||
'iavf_hash.c',
|
||||
'iavf_tm.c',
|
||||
'iavf_ipsec_crypto.c',
|
||||
)
|
||||
|
||||
if arch_subdir == 'x86'
|
||||
|
@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
|
||||
extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
|
||||
extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
|
||||
extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
|
||||
extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
|
||||
|
||||
/**
|
||||
* The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
|
||||
|
@ -13,4 +13,7 @@ EXPERIMENTAL {
|
||||
rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
|
||||
rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
|
||||
rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
|
||||
|
||||
# added in 21.11
|
||||
rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user