crypto/octeontx2: support AES-CBC SHA1-HMAC

Support for aes-cbc sha1-hmac is added in lookaside protocol
mode. The functionality is verified using ipsec-secgw application.

Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
This commit is contained in:
Ankur Dwivedi 2020-12-19 12:24:55 +05:30 committed by Akhil Goyal
parent bab97a3ffb
commit 8f685ec2d5
5 changed files with 92 additions and 45 deletions

View File

@ -182,3 +182,4 @@ Features supported
* ESN
* Anti-replay
* AES-128/192/256-GCM
* AES-128/192/256-CBC-SHA1-HMAC

View File

@ -81,6 +81,8 @@ New Features
* Updated the OCTEON TX2 crypto PMD lookaside protocol offload for IPsec with
ESN and anti-replay support.
* Updated the OCTEON TX2 crypto PMD with CN98xx support.
* Added support for aes-cbc sha1-hmac cipher combination in OCTEON TX2 crypto
PMD lookaside protocol offload for IPsec.
Removed Items

View File

@ -206,11 +206,11 @@ crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
struct otx2_ipsec_po_sa_ctl *ctl;
int cipher_key_len, auth_key_len;
struct otx2_ipsec_po_out_sa *sa;
struct rte_ipv6_hdr *ip6 = NULL;
struct rte_ipv4_hdr *ip = NULL;
struct otx2_sec_session *sess;
struct otx2_cpt_inst_s inst;
struct rte_ipv6_hdr *ip6;
struct rte_ipv4_hdr *ip;
int ret;
int ret, ctx_len;
sess = get_sec_session_private_data(sec_sess);
sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
@ -239,19 +239,36 @@ crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
if (ret)
return ret;
memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
if (ipsec->options.udp_encap) {
sa->udp_src = 4500;
sa->udp_dst = 4500;
}
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
/* Start ip id from 1 */
lp->ip_id = 1;
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
ip = &sa->template.ipv4_hdr;
if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
if (ipsec->options.udp_encap) {
sa->aes_gcm.template.ip4.udp_src = 4500;
sa->aes_gcm.template.ip4.udp_dst = 4500;
}
ip = &sa->aes_gcm.template.ip4.ipv4_hdr;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
aes_gcm.template) + sizeof(
sa->aes_gcm.template.ip4);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else if (ctl->auth_type ==
OTX2_IPSEC_PO_SA_AUTH_SHA1) {
if (ipsec->options.udp_encap) {
sa->sha1.template.ip4.udp_src = 4500;
sa->sha1.template.ip4.udp_dst = 4500;
}
ip = &sa->sha1.template.ip4.ipv4_hdr;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
sha1.template) + sizeof(
sa->sha1.template.ip4);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
}
ip->version_ihl = RTE_IPV4_VHL_DEF;
ip->next_proto_id = IPPROTO_ESP;
ip->time_to_live = ipsec->tunnel.ipv4.ttl;
@ -264,7 +281,32 @@ crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
sizeof(struct in_addr));
} else if (ipsec->tunnel.type ==
RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
ip6 = &sa->template.ipv6_hdr;
if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
if (ipsec->options.udp_encap) {
sa->aes_gcm.template.ip6.udp_src = 4500;
sa->aes_gcm.template.ip6.udp_dst = 4500;
}
ip6 = &sa->aes_gcm.template.ip6.ipv6_hdr;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
aes_gcm.template) + sizeof(
sa->aes_gcm.template.ip6);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else if (ctl->auth_type ==
OTX2_IPSEC_PO_SA_AUTH_SHA1) {
if (ipsec->options.udp_encap) {
sa->sha1.template.ip6.udp_src = 4500;
sa->sha1.template.ip6.udp_dst = 4500;
}
ip6 = &sa->sha1.template.ip6.ipv6_hdr;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
sha1.template) + sizeof(
sa->sha1.template.ip6);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
}
ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
((ipsec->tunnel.ipv6.dscp <<
RTE_IPV6_HDR_TC_SHIFT) &
@ -294,21 +336,18 @@ crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
auth_key_len = 0;
if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
cipher_key = crypto_xform->aead.key.data;
cipher_key_len = crypto_xform->aead.key.length;
lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa);
lp->ctx_len >>= 3;
RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_OUTB_CTX_LEN);
} else {
cipher_key = cipher_xform->cipher.key.data;
cipher_key_len = cipher_xform->cipher.key.length;
auth_key = auth_xform->auth.key.data;
auth_key_len = auth_xform->auth.key.length;
/* TODO: check the ctx len for supporting ALGO */
lp->ctx_len = sizeof(struct otx2_ipsec_po_out_sa) >> 3;
RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_OUTB_CTX_LEN);
if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
memcpy(sa->sha1.hmac_key, auth_key, auth_key_len);
}
if (cipher_key_len != 0)
@ -316,10 +355,6 @@ crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
else
return -EINVAL;
/* Use OPAD & IPAD */
RTE_SET_USED(auth_key);
RTE_SET_USED(auth_key_len);
inst.u64[7] = 0;
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);
@ -342,9 +377,9 @@ crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
struct rte_security_session *sec_sess)
{
struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
const uint8_t *cipher_key, *auth_key;
struct otx2_sec_session_ipsec_lp *lp;
struct otx2_ipsec_po_sa_ctl *ctl;
const uint8_t *cipher_key, *auth_key;
int cipher_key_len, auth_key_len;
struct otx2_ipsec_po_in_sa *sa;
struct otx2_sec_session *sess;
@ -392,9 +427,11 @@ crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
auth_key = auth_xform->auth.key.data;
auth_key_len = auth_xform->auth.key.length;
/* TODO: check the ctx len for supporting ALGO */
lp->ctx_len = sizeof(struct otx2_ipsec_po_in_sa) >> 2;
RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_MAX_INB_CTX_LEN);
if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
memcpy(sa->aes_gcm.hmac_key, auth_key, auth_key_len);
lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
aes_gcm.selector) >> 3;
}
if (cipher_key_len != 0)
@ -402,10 +439,6 @@ crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
else
return -EINVAL;
/* Use OPAD & IPAD */
RTE_SET_USED(auth_key);
RTE_SET_USED(auth_key_len);
inst.u64[7] = 0;
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);

View File

@ -10,10 +10,6 @@
#include <rte_security.h>
#define OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN 0x09
#define OTX2_IPSEC_PO_AES_GCM_OUTB_CTX_LEN 0x28
#define OTX2_IPSEC_PO_MAX_INB_CTX_LEN 0x22
#define OTX2_IPSEC_PO_MAX_OUTB_CTX_LEN 0x38
#define OTX2_IPSEC_PO_PER_PKT_IV BIT(11)
@ -171,9 +167,16 @@ struct otx2_ipsec_po_in_sa {
struct otx2_ipsec_po_ip_template {
RTE_STD_C11
union {
uint8_t raw[252];
struct rte_ipv4_hdr ipv4_hdr;
struct rte_ipv6_hdr ipv6_hdr;
struct {
struct rte_ipv4_hdr ipv4_hdr;
uint16_t udp_src;
uint16_t udp_dst;
} ip4;
struct {
struct rte_ipv6_hdr ipv6_hdr;
uint16_t udp_src;
uint16_t udp_dst;
} ip6;
};
};
@ -191,10 +194,18 @@ struct otx2_ipsec_po_out_sa {
uint32_t esn_hi;
uint32_t esn_low;
/* w8-w39 */
struct otx2_ipsec_po_ip_template template;
uint16_t udp_src;
uint16_t udp_dst;
/* w8-w55 */
union {
uint8_t raw[384];
struct {
struct otx2_ipsec_po_ip_template template;
} aes_gcm;
struct {
uint8_t hmac_key[24];
uint8_t unused[24];
struct otx2_ipsec_po_ip_template template;
} sha1;
};
};
static inline int
@ -348,8 +359,8 @@ ipsec_po_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
return -ENOTSUP;
}
} else if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
ctl->enc_type = OTX2_IPSEC_PO_SA_ENC_AES_CCM;
aes_key_len = xform->cipher.key.length;
ctl->enc_type = OTX2_IPSEC_PO_SA_ENC_AES_CBC;
aes_key_len = cipher_xform->cipher.key.length;
} else {
return -ENOTSUP;
}

View File

@ -111,7 +111,7 @@ process_outb_sa(struct rte_crypto_op *cop,
memcpy(&hdr->iv[0], &sa->iv.gcm.nonce, 4);
memcpy(&hdr->iv[4], rte_crypto_op_ctod_offset(cop, uint8_t *,
sess->iv_offset), sess->iv_length);
} else if (ctl_wrd->auth_type == OTX2_IPSEC_FP_SA_ENC_AES_CBC) {
} else if (ctl_wrd->auth_type == OTX2_IPSEC_PO_SA_AUTH_SHA1) {
memcpy(&hdr->iv[0], rte_crypto_op_ctod_offset(cop, uint8_t *,
sess->iv_offset), sess->iv_length);
}