net/txgbe: add IPsec context creation

Initialize securiry context, and add support to get
security capabilities.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
This commit is contained in:
Jiawen Wu 2020-12-18 17:36:58 +08:00 committed by Ferruh Yigit
parent 1784c5f947
commit f437d97c3d
5 changed files with 194 additions and 1 deletions

View File

@ -28,6 +28,7 @@ VLAN filter = Y
Flow control = Y
Flow API = Y
Rate limitation = Y
Inline crypto = Y
CRC offload = P
VLAN offload = P
QinQ offload = P

View File

@ -8,13 +8,14 @@ sources = files(
'txgbe_ethdev.c',
'txgbe_fdir.c',
'txgbe_flow.c',
'txgbe_ipsec.c',
'txgbe_ptypes.c',
'txgbe_pf.c',
'txgbe_rxtx.c',
'txgbe_tm.c',
)
deps += ['hash']
deps += ['hash', 'security']
includes += include_directories('base')

View File

@ -547,6 +547,12 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* Unlock any pending hardware semaphore */
txgbe_swfw_lock_reset(hw);
#ifdef RTE_LIB_SECURITY
/* Initialize security_ctx only for primary process*/
if (txgbe_ipsec_ctx_create(eth_dev))
return -ENOMEM;
#endif
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
txgbe_dcb_init(hw, dcb_config);
@ -1963,6 +1969,10 @@ txgbe_dev_close(struct rte_eth_dev *dev)
/* Remove all Traffic Manager configuration */
txgbe_tm_conf_uninit(dev);
#ifdef RTE_LIB_SECURITY
rte_free(dev->security_ctx);
#endif
return ret;
}

View File

@ -632,6 +632,10 @@ txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info,
return idx;
}
#ifdef RTE_LIB_SECURITY
int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
#endif
/* High threshold controlling when to start sending XOFF frames. */
#define TXGBE_FC_XOFF_HITH 128 /*KB*/
/* Low threshold controlling when to start sending XON frames. */

View File

@ -0,0 +1,177 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2015-2020
*/
#include <rte_ethdev_pci.h>
#include <rte_security_driver.h>
#include <rte_cryptodev.h>
#include "base/txgbe.h"
#include "txgbe_ethdev.h"
static const struct rte_security_capability *
txgbe_crypto_capabilities_get(void *device __rte_unused)
{
static const struct rte_cryptodev_capabilities
aes_gcm_gmac_crypto_capabilities[] = {
{ /* AES GMAC (128-bit) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_AES_GMAC,
.block_size = 16,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
{ /* AES GCM (128-bit) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
{.aead = {
.algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
.aad_size = {
.min = 0,
.max = 65535,
.increment = 1
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
{
.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
}, }
},
};
static const struct rte_security_capability
txgbe_security_capabilities[] = {
{ /* IPsec Inline Crypto ESP Transport Egress */
.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
{.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
.options = { 0 }
} },
.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
},
{ /* IPsec Inline Crypto ESP Transport Ingress */
.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
{.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
.options = { 0 }
} },
.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
.ol_flags = 0
},
{ /* IPsec Inline Crypto ESP Tunnel Egress */
.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
{.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
.options = { 0 }
} },
.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
},
{ /* IPsec Inline Crypto ESP Tunnel Ingress */
.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
{.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
.options = { 0 }
} },
.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
.ol_flags = 0
},
{
.action = RTE_SECURITY_ACTION_TYPE_NONE
}
};
return txgbe_security_capabilities;
}
static struct rte_security_ops txgbe_security_ops = {
.capabilities_get = txgbe_crypto_capabilities_get
};
static int
txgbe_crypto_capable(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t reg_i, reg, capable = 1;
/* test if rx crypto can be enabled and then write back initial value*/
reg_i = rd32(hw, TXGBE_SECRXCTL);
wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
if (reg != 0)
capable = 0;
wr32(hw, TXGBE_SECRXCTL, reg_i);
return capable;
}
int
txgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
{
struct rte_security_ctx *ctx = NULL;
if (txgbe_crypto_capable(dev)) {
ctx = rte_malloc("rte_security_instances_ops",
sizeof(struct rte_security_ctx), 0);
if (ctx) {
ctx->device = (void *)dev;
ctx->ops = &txgbe_security_ops;
ctx->sess_cnt = 0;
dev->security_ctx = ctx;
} else {
return -ENOMEM;
}
}
if (rte_security_dynfield_register() < 0)
return -rte_errno;
return 0;
}