common/cnxk: add IPsec common code

Add code that can be leveraged across
lookaside and inline IPsec on cn10k.

Signed-off-by: Srujana Challa <schalla@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
This commit is contained in:
Srujana Challa 2021-06-25 11:06:48 +05:30 committed by Akhil Goyal
parent 80ab347db4
commit 78d03027f2
4 changed files with 530 additions and 1 deletions

View File

@ -0,0 +1,468 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#include "cnxk_security.h"
static int
ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2,
uint8_t *cipher_key, uint8_t *salt_key,
struct rte_security_ipsec_xform *ipsec_xfrm,
struct rte_crypto_sym_xform *crypto_xfrm)
{
const uint8_t *key;
uint32_t *tmp_salt;
uint64_t *tmp_key;
int length, i;
/* Set direction */
switch (ipsec_xfrm->direction) {
case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
w2->s.dir = ROC_IE_OT_SA_DIR_INBOUND;
break;
case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
w2->s.dir = ROC_IE_OT_SA_DIR_OUTBOUND;
break;
default:
return -EINVAL;
}
/* Set protocol - ESP vs AH */
switch (ipsec_xfrm->proto) {
case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
w2->s.protocol = ROC_IE_OT_SA_PROTOCOL_ESP;
break;
case RTE_SECURITY_IPSEC_SA_PROTO_AH:
w2->s.protocol = ROC_IE_OT_SA_PROTOCOL_AH;
break;
default:
return -EINVAL;
}
/* Set mode - transport vs tunnel */
switch (ipsec_xfrm->mode) {
case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
w2->s.mode = ROC_IE_OT_SA_MODE_TRANSPORT;
break;
case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
w2->s.mode = ROC_IE_OT_SA_MODE_TUNNEL;
break;
default:
return -EINVAL;
}
/* Set encryption algorithm */
if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
key = crypto_xfrm->aead.key.data;
length = crypto_xfrm->aead.key.length;
switch (crypto_xfrm->aead.algo) {
case RTE_CRYPTO_AEAD_AES_GCM:
w2->s.enc_type = ROC_IE_OT_SA_ENC_AES_GCM;
w2->s.auth_type = ROC_IE_OT_SA_AUTH_NULL;
memcpy(salt_key, &ipsec_xfrm->salt, 4);
tmp_salt = (uint32_t *)salt_key;
*tmp_salt = rte_be_to_cpu_32(*tmp_salt);
break;
default:
return -ENOTSUP;
}
} else {
return -ENOTSUP;
}
w2->s.spi = ipsec_xfrm->spi;
/* Copy encryption key */
memcpy(cipher_key, key, length);
tmp_key = (uint64_t *)cipher_key;
for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++)
tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
switch (length) {
case ROC_CPT_AES128_KEY_LEN:
w2->s.aes_key_len = ROC_IE_OT_SA_AES_KEY_LEN_128;
break;
case ROC_CPT_AES192_KEY_LEN:
w2->s.aes_key_len = ROC_IE_OT_SA_AES_KEY_LEN_192;
break;
case ROC_CPT_AES256_KEY_LEN:
w2->s.aes_key_len = ROC_IE_OT_SA_AES_KEY_LEN_256;
break;
default:
return -EINVAL;
}
return 0;
}
static size_t
ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa)
{
size_t size;
/* Variable based on Anti-replay Window */
size = offsetof(struct roc_ot_ipsec_inb_sa, ctx) +
offsetof(struct roc_ot_ipsec_inb_ctx_update_reg, ar_winbits);
if (sa->w0.s.ar_win)
size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t);
return size;
}
int
cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
struct rte_crypto_sym_xform *crypto_xfrm)
{
union roc_ot_ipsec_sa_word2 w2;
uint32_t replay_win_sz;
size_t offset;
int rc;
w2.u64 = 0;
rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt,
ipsec_xfrm, crypto_xfrm);
if (rc)
return rc;
/* Updata common word2 data */
sa->w2.u64 = w2.u64;
/* Only support power-of-two window sizes supported */
replay_win_sz = ipsec_xfrm->replay_win_sz;
if (replay_win_sz) {
if (!rte_is_power_of_2(replay_win_sz) ||
replay_win_sz > ROC_AR_WIN_SIZE_MAX)
return -ENOTSUP;
sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
}
/* Default options for pkt_out and pkt_fmt are with
* second pass meta and no defrag.
*/
sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META;
sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
sa->w0.s.pkind = ROC_OT_CPT_META_PKIND;
/* ESN */
sa->w2.s.esn_en = !!ipsec_xfrm->options.esn;
offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
/* Word offset for HW managed SA field */
sa->w0.s.hw_ctx_off = offset / 8;
/* Context push size for inbound spans up to hw_ctx including
* ar_base field, in 8b units
*/
sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
/* Entire context size in 128B units */
sa->w0.s.ctx_size =
(PLT_ALIGN_CEIL(ot_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) /
ROC_CTX_UNIT_128B) -
1;
/* There are two words of CPT_CTX_HW_S for ucode to skip */
sa->w0.s.ctx_hdr_size = 1;
sa->w0.s.aop_valid = 1;
rte_wmb();
/* Enable SA */
sa->w2.s.valid = 1;
return 0;
}
int
cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
struct rte_crypto_sym_xform *crypto_xfrm)
{
struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
union roc_ot_ipsec_sa_word2 w2;
size_t offset;
int rc;
w2.u64 = 0;
rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt,
ipsec_xfrm, crypto_xfrm);
if (rc)
return rc;
/* Update common word2 data */
sa->w2.u64 = w2.u64;
if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
goto skip_tunnel_info;
/* Tunnel header info */
switch (tunnel->type) {
case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
sa->w2.s.outer_ip_ver = ROC_IE_OT_SA_IP_VERSION_4;
memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
sizeof(struct in_addr));
memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
sizeof(struct in_addr));
/* IP Source and Dest seems to be in LE/CPU endian */
sa->outer_hdr.ipv4.src_addr =
rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
sa->outer_hdr.ipv4.dst_addr =
rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
/* Outer header DF bit source */
if (!ipsec_xfrm->options.copy_df) {
sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
ROC_IE_OT_SA_COPY_FROM_SA;
sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df;
} else {
sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
}
/* Outer header DSCP source */
if (!ipsec_xfrm->options.copy_dscp) {
sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
sa->w10.s.dscp = tunnel->ipv4.dscp;
} else {
sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
}
break;
case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
sa->w2.s.outer_ip_ver = ROC_IE_OT_SA_IP_VERSION_6;
memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
sizeof(struct in6_addr));
memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
sizeof(struct in6_addr));
/* Outer header flow label source */
if (!ipsec_xfrm->options.copy_flabel) {
sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
ROC_IE_OT_SA_COPY_FROM_SA;
sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel;
} else {
sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
}
/* Outer header DSCP source */
if (!ipsec_xfrm->options.copy_dscp) {
sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
sa->w10.s.dscp = tunnel->ipv6.dscp;
} else {
sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
}
break;
default:
return -EINVAL;
}
/* Default options of DSCP and Flow label/DF */
sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
skip_tunnel_info:
/* ESN */
sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx);
/* Word offset for HW managed SA field */
sa->w0.s.hw_ctx_off = offset / 8;
/* Context push size is up to hmac_opad_ipad */
sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off;
/* Entire context size in 128B units */
offset = sizeof(struct roc_ot_ipsec_outb_sa);
sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) /
ROC_CTX_UNIT_128B) -
1;
/* IPID gen */
sa->w2.s.ipid_gen = 1;
/* There are two words of CPT_CTX_HW_S for ucode to skip */
sa->w0.s.ctx_hdr_size = 1;
sa->w0.s.aop_valid = 1;
rte_wmb();
/* Enable SA */
sa->w2.s.valid = 1;
return 0;
}
bool
cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa)
{
return !!sa->w2.s.valid;
}
bool
cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
{
return !!sa->w2.s.valid;
}
uint8_t
cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_auth_algorithm a_algo,
enum rte_crypto_aead_algorithm aead_algo)
{
uint8_t ivlen = 0;
if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
ivlen = 8;
switch (c_algo) {
case RTE_CRYPTO_CIPHER_AES_CTR:
ivlen = 8;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
ivlen = ROC_CPT_DES_BLOCK_LENGTH;
break;
case RTE_CRYPTO_CIPHER_AES_CBC:
ivlen = ROC_CPT_AES_BLOCK_LENGTH;
break;
default:
break;
}
switch (a_algo) {
case RTE_CRYPTO_AUTH_AES_GMAC:
ivlen = 8;
break;
default:
break;
}
return ivlen;
}
uint8_t
cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_auth_algorithm a_algo,
enum rte_crypto_aead_algorithm aead_algo)
{
uint8_t icv = 0;
(void)c_algo;
switch (a_algo) {
case RTE_CRYPTO_AUTH_NULL:
icv = 0;
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
icv = 12;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
case RTE_CRYPTO_AUTH_AES_GMAC:
icv = 16;
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
icv = 24;
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
icv = 32;
break;
default:
break;
}
switch (aead_algo) {
case RTE_CRYPTO_AEAD_AES_GCM:
icv = 16;
break;
default:
break;
}
return icv;
}
uint8_t
cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_aead_algorithm aead_algo)
{
uint8_t roundup_byte = 4;
if (aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
return roundup_byte;
switch (c_algo) {
case RTE_CRYPTO_CIPHER_AES_CTR:
roundup_byte = 4;
break;
case RTE_CRYPTO_CIPHER_AES_CBC:
roundup_byte = 16;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
roundup_byte = 8;
break;
case RTE_CRYPTO_CIPHER_NULL:
roundup_byte = 4;
break;
default:
break;
}
return roundup_byte;
}
int
cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
struct rte_security_ipsec_xform *ipsec_xfrm,
struct rte_crypto_sym_xform *crypto_xfrm)
{
struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
enum rte_crypto_cipher_algorithm c_algo = RTE_CRYPTO_CIPHER_NULL;
enum rte_crypto_auth_algorithm a_algo = RTE_CRYPTO_AUTH_NULL;
enum rte_crypto_aead_algorithm aead_algo = 0;
uint16_t partial_len = 0;
uint8_t roundup_byte = 0;
int8_t roundup_len = 0;
memset(rlens, 0, sizeof(struct cnxk_ipsec_outb_rlens));
/* Get Cipher and Auth algo */
if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
aead_algo = crypto_xfrm->aead.algo;
} else {
if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
c_algo = crypto_xfrm->cipher.algo;
else
a_algo = crypto_xfrm->auth.algo;
if (crypto_xfrm->next) {
if (crypto_xfrm->next->type ==
RTE_CRYPTO_SYM_XFORM_CIPHER)
c_algo = crypto_xfrm->next->cipher.algo;
else
a_algo = crypto_xfrm->next->auth.algo;
}
}
if (ipsec_xfrm->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
partial_len = ROC_CPT_ESP_HDR_LEN;
roundup_len = ROC_CPT_ESP_TRL_LEN;
} else {
partial_len = ROC_CPT_AH_HDR_LEN;
}
if (ipsec_xfrm->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
if (tunnel->type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
partial_len += ROC_CPT_TUNNEL_IPV4_HDR_LEN;
else
partial_len += ROC_CPT_TUNNEL_IPV6_HDR_LEN;
}
partial_len += cnxk_ipsec_ivlen_get(c_algo, a_algo, aead_algo);
partial_len += cnxk_ipsec_icvlen_get(c_algo, a_algo, aead_algo);
roundup_byte = cnxk_ipsec_outb_roundup_byte(c_algo, aead_algo);
rlens->partial_len = partial_len;
rlens->roundup_len = roundup_len;
rlens->roundup_byte = roundup_byte;
rlens->max_extended_len = partial_len + roundup_len + roundup_byte;
return 0;
}

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#ifndef _CNXK_SECURITY_H__
#define _CNXK_SECURITY_H__
#include <rte_crypto.h>
#include <rte_security.h>
#include "roc_api.h"
/* Response length calculation data */
struct cnxk_ipsec_outb_rlens {
uint16_t partial_len;
uint8_t roundup_byte;
int8_t roundup_len;
uint16_t max_extended_len;
};
int __roc_api
cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
struct rte_security_ipsec_xform *ipsec_xfrm,
struct rte_crypto_sym_xform *crypto_xfrm);
uint8_t __roc_api
cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_auth_algorithm a_algo,
enum rte_crypto_aead_algorithm aead_algo);
uint8_t __roc_api
cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_auth_algorithm a_algo,
enum rte_crypto_aead_algorithm aead_algo);
uint8_t __roc_api
cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_aead_algorithm aead_algo);
/* [CN10K, .) */
int __roc_api
cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
struct rte_crypto_sym_xform *crypto_xfrm);
int __roc_api
cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
struct rte_crypto_sym_xform *crypto_xfrm);
bool __roc_api cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa);
bool __roc_api cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa);
#endif /* _CNXK_SECURITY_H__ */

View File

@ -9,7 +9,7 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
endif
config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
deps = ['eal', 'pci', 'bus_pci', 'mbuf']
deps = ['eal', 'pci', 'bus_pci', 'mbuf', 'security']
sources = files(
'roc_ae.c',
'roc_ae_fpm_tables.c',
@ -55,4 +55,8 @@ sources = files(
'roc_tim_irq.c',
'roc_utils.c',
)
# Security common code
sources += files('cnxk_security.c')
includes += include_directories('../../bus/pci')

View File

@ -1,6 +1,10 @@
INTERNAL {
global:
cnxk_ipsec_icvlen_get;
cnxk_ipsec_ivlen_get;
cnxk_ipsec_outb_rlens_get;
cnxk_ipsec_outb_roundup_byte;
cnxk_logtype_base;
cnxk_logtype_cpt;
cnxk_logtype_mbox;
@ -10,6 +14,10 @@ INTERNAL {
cnxk_logtype_sso;
cnxk_logtype_tim;
cnxk_logtype_tm;
cnxk_ot_ipsec_inb_sa_fill;
cnxk_ot_ipsec_outb_sa_fill;
cnxk_ot_ipsec_inb_sa_valid;
cnxk_ot_ipsec_outb_sa_valid;
roc_ae_ec_grp_get;
roc_ae_ec_grp_put;
roc_ae_fpm_get;