ipsec: support more AEAD algorithms

Added support for AES_CCM, CHACHA20_POLY1305 and AES_GMAC.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Daniel Martin Buckley <daniel.m.buckley@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
This commit is contained in:
Radu Nicolau 2021-10-14 17:03:22 +01:00 committed by Akhil Goyal
parent 199fcba1bd
commit c99d26197c
7 changed files with 329 additions and 12 deletions

View File

@ -313,7 +313,8 @@ Supported features
* ESN and replay window.
* algorithms: 3DES-CBC, AES-CBC, AES-CTR, AES-GCM, HMAC-SHA1, NULL.
* algorithms: 3DES-CBC, AES-CBC, AES-CTR, AES-GCM, AES_CCM, CHACHA20_POLY1305,
AES_GMAC, HMAC-SHA1, NULL.
Limitations

View File

@ -151,6 +151,11 @@ New Features
Added support for more comprehensive CRC options.
* **Updated IPsec library.**
* Added support for more AEAD algorithms AES_CCM, CHACHA20_POLY1305
and AES_GMAC.
* **Added multi-process support for testpmd.**
Added command-line options to specify total number of processes and

View File

@ -21,6 +21,37 @@ struct aesctr_cnt_blk {
uint32_t cnt;
} __rte_packed;
/*
* CHACHA20-POLY1305 devices have some specific requirements
* for IV and AAD formats.
* Ideally that to be done by the driver itself.
*/
struct aead_chacha20_poly1305_iv {
uint32_t salt;
uint64_t iv;
uint32_t cnt;
} __rte_packed;
struct aead_chacha20_poly1305_aad {
uint32_t spi;
/*
* RFC 4106, section 5:
* Two formats of the AAD are defined:
* one for 32-bit sequence numbers, and one for 64-bit ESN.
*/
union {
uint32_t u32[2];
uint64_t u64;
} sqn;
uint32_t align0; /* align to 16B boundary */
} __rte_packed;
struct chacha20_poly1305_esph_iv {
struct rte_esp_hdr esph;
uint64_t iv;
} __rte_packed;
/*
* AES-GCM devices have some specific requirements for IV and AAD formats.
* Ideally that to be done by the driver itself.
@ -51,6 +82,47 @@ struct gcm_esph_iv {
uint64_t iv;
} __rte_packed;
/*
* AES-CCM devices have some specific requirements for IV and AAD formats.
* Ideally that to be done by the driver itself.
*/
union aead_ccm_salt {
uint32_t salt;
struct inner {
uint8_t salt8[3];
uint8_t ccm_flags;
} inner;
} __rte_packed;
struct aead_ccm_iv {
uint8_t ccm_flags;
uint8_t salt[3];
uint64_t iv;
uint32_t cnt;
} __rte_packed;
struct aead_ccm_aad {
uint8_t padding[18];
uint32_t spi;
/*
* RFC 4309, section 5:
* Two formats of the AAD are defined:
* one for 32-bit sequence numbers, and one for 64-bit ESN.
*/
union {
uint32_t u32[2];
uint64_t u64;
} sqn;
uint32_t align0; /* align to 16B boundary */
} __rte_packed;
struct ccm_esph_iv {
struct rte_esp_hdr esph;
uint64_t iv;
} __rte_packed;
static inline void
aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)
{
@ -59,6 +131,16 @@ aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)
ctr->cnt = rte_cpu_to_be_32(1);
}
static inline void
aead_chacha20_poly1305_iv_fill(struct aead_chacha20_poly1305_iv
*chacha20_poly1305,
uint64_t iv, uint32_t salt)
{
chacha20_poly1305->salt = salt;
chacha20_poly1305->iv = iv;
chacha20_poly1305->cnt = rte_cpu_to_be_32(1);
}
static inline void
aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)
{
@ -67,6 +149,21 @@ aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)
gcm->cnt = rte_cpu_to_be_32(1);
}
static inline void
aead_ccm_iv_fill(struct aead_ccm_iv *ccm, uint64_t iv, uint32_t salt)
{
union aead_ccm_salt tsalt;
tsalt.salt = salt;
ccm->ccm_flags = tsalt.inner.ccm_flags;
ccm->salt[0] = tsalt.inner.salt8[0];
ccm->salt[1] = tsalt.inner.salt8[1];
ccm->salt[2] = tsalt.inner.salt8[2];
ccm->iv = iv;
ccm->cnt = rte_cpu_to_be_32(1);
}
/*
* RFC 4106, 5 AAD Construction
* spi and sqn should already be converted into network byte order.
@ -86,6 +183,25 @@ aead_gcm_aad_fill(struct aead_gcm_aad *aad, rte_be32_t spi, rte_be64_t sqn,
aad->align0 = 0;
}
/*
* RFC 4309, 5 AAD Construction
* spi and sqn should already be converted into network byte order.
* Make sure that not used bytes are zeroed.
*/
static inline void
aead_ccm_aad_fill(struct aead_ccm_aad *aad, rte_be32_t spi, rte_be64_t sqn,
int esn)
{
aad->spi = spi;
if (esn)
aad->sqn.u64 = sqn;
else {
aad->sqn.u32[0] = sqn_low32(sqn);
aad->sqn.u32[1] = 0;
}
aad->align0 = 0;
}
static inline void
gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)
{
@ -93,6 +209,27 @@ gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)
iv[1] = 0;
}
/*
* RFC 7634, 2.1 AAD Construction
* spi and sqn should already be converted into network byte order.
* Make sure that not used bytes are zeroed.
*/
static inline void
aead_chacha20_poly1305_aad_fill(struct aead_chacha20_poly1305_aad *aad,
rte_be32_t spi, rte_be64_t sqn,
int esn)
{
aad->spi = spi;
if (esn)
aad->sqn.u64 = sqn;
else {
aad->sqn.u32[0] = sqn_low32(sqn);
aad->sqn.u32[1] = 0;
}
aad->align0 = 0;
}
/*
* Helper routine to copy IV
* Right now we support only algorithms with IV length equals 0/8/16 bytes.

View File

@ -63,6 +63,8 @@ inb_cop_prepare(struct rte_crypto_op *cop,
{
struct rte_crypto_sym_op *sop;
struct aead_gcm_iv *gcm;
struct aead_ccm_iv *ccm;
struct aead_chacha20_poly1305_iv *chacha20_poly1305;
struct aesctr_cnt_blk *ctr;
uint64_t *ivc, *ivp;
uint32_t algo;
@ -83,6 +85,24 @@ inb_cop_prepare(struct rte_crypto_op *cop,
sa->iv_ofs);
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CCM:
sop_aead_prepare(sop, sa, icv, pofs, plen);
/* fill AAD IV (located inside crypto op) */
ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
sa->iv_ofs);
aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
break;
case ALGO_TYPE_CHACHA20_POLY1305:
sop_aead_prepare(sop, sa, icv, pofs, plen);
/* fill AAD IV (located inside crypto op) */
chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
struct aead_chacha20_poly1305_iv *,
sa->iv_ofs);
aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CBC:
case ALGO_TYPE_3DES_CBC:
sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
@ -91,6 +111,14 @@ inb_cop_prepare(struct rte_crypto_op *cop,
ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
copy_iv(ivc, ivp, sa->iv_len);
break;
case ALGO_TYPE_AES_GMAC:
sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
/* fill AAD IV (located inside crypto op) */
gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
sa->iv_ofs);
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CTR:
sop_ciph_auth_prepare(sop, sa, icv, pofs, plen);
@ -110,6 +138,8 @@ inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
uint32_t *pofs, uint32_t plen, void *iv)
{
struct aead_gcm_iv *gcm;
struct aead_ccm_iv *ccm;
struct aead_chacha20_poly1305_iv *chacha20_poly1305;
struct aesctr_cnt_blk *ctr;
uint64_t *ivp;
uint32_t clen;
@ -120,9 +150,19 @@ inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
switch (sa->algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_AES_GMAC:
gcm = (struct aead_gcm_iv *)iv;
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CCM:
ccm = (struct aead_ccm_iv *)iv;
aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
break;
case ALGO_TYPE_CHACHA20_POLY1305:
chacha20_poly1305 = (struct aead_chacha20_poly1305_iv *)iv;
aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CBC:
case ALGO_TYPE_3DES_CBC:
copy_iv(iv, ivp, sa->iv_len);
@ -175,6 +215,8 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
const union sym_op_data *icv)
{
struct aead_gcm_aad *aad;
struct aead_ccm_aad *caad;
struct aead_chacha20_poly1305_aad *chacha_aad;
/* insert SQN.hi between ESP trailer and ICV */
if (sa->sqh_len != 0)
@ -184,9 +226,27 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
* fill AAD fields, if any (aad fields are placed after icv),
* right now we support only one AEAD algorithm: AES-GCM.
*/
if (sa->aad_len != 0) {
aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
switch (sa->algo_type) {
case ALGO_TYPE_AES_GCM:
if (sa->aad_len != 0) {
aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
}
break;
case ALGO_TYPE_AES_CCM:
if (sa->aad_len != 0) {
caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
}
break;
case ALGO_TYPE_CHACHA20_POLY1305:
if (sa->aad_len != 0) {
chacha_aad = (struct aead_chacha20_poly1305_aad *)
(icv->va + sa->icv_len);
aead_chacha20_poly1305_aad_fill(chacha_aad,
sa->spi, sqc, IS_ESN(sa));
}
break;
}
}

View File

@ -63,6 +63,8 @@ outb_cop_prepare(struct rte_crypto_op *cop,
{
struct rte_crypto_sym_op *sop;
struct aead_gcm_iv *gcm;
struct aead_ccm_iv *ccm;
struct aead_chacha20_poly1305_iv *chacha20_poly1305;
struct aesctr_cnt_blk *ctr;
uint32_t algo;
@ -80,6 +82,15 @@ outb_cop_prepare(struct rte_crypto_op *cop,
/* NULL case */
sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
break;
case ALGO_TYPE_AES_GMAC:
/* GMAC case */
sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
/* fill AAD IV (located inside crypto op) */
gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
sa->iv_ofs);
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_GCM:
/* AEAD (AES_GCM) case */
sop_aead_prepare(sop, sa, icv, hlen, plen);
@ -89,6 +100,26 @@ outb_cop_prepare(struct rte_crypto_op *cop,
sa->iv_ofs);
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CCM:
/* AEAD (AES_CCM) case */
sop_aead_prepare(sop, sa, icv, hlen, plen);
/* fill AAD IV (located inside crypto op) */
ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
sa->iv_ofs);
aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
break;
case ALGO_TYPE_CHACHA20_POLY1305:
/* AEAD (CHACHA20_POLY) case */
sop_aead_prepare(sop, sa, icv, hlen, plen);
/* fill AAD IV (located inside crypto op) */
chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
struct aead_chacha20_poly1305_iv *,
sa->iv_ofs);
aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CTR:
/* Cipher-Auth (AES-CTR *) case */
sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
@ -196,7 +227,9 @@ outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
const union sym_op_data *icv)
{
uint32_t *psqh;
struct aead_gcm_aad *aad;
struct aead_gcm_aad *gaad;
struct aead_ccm_aad *caad;
struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
/* insert SQN.hi between ESP trailer and ICV */
if (sa->sqh_len != 0) {
@ -208,9 +241,29 @@ outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
* fill IV and AAD fields, if any (aad fields are placed after icv),
* right now we support only one AEAD algorithm: AES-GCM .
*/
switch (sa->algo_type) {
case ALGO_TYPE_AES_GCM:
if (sa->aad_len != 0) {
aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
}
break;
case ALGO_TYPE_AES_CCM:
if (sa->aad_len != 0) {
caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
}
break;
case ALGO_TYPE_CHACHA20_POLY1305:
if (sa->aad_len != 0) {
chacha20_poly1305_aad = (struct aead_chacha20_poly1305_aad *)
(icv->va + sa->icv_len);
aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
sa->spi, sqc, IS_ESN(sa));
}
break;
default:
break;
}
}
@ -418,6 +471,8 @@ outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
{
uint64_t *ivp = iv;
struct aead_gcm_iv *gcm;
struct aead_ccm_iv *ccm;
struct aead_chacha20_poly1305_iv *chacha20_poly1305;
struct aesctr_cnt_blk *ctr;
uint32_t clen;
@ -426,6 +481,15 @@ outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
gcm = iv;
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CCM:
ccm = iv;
aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
break;
case ALGO_TYPE_CHACHA20_POLY1305:
chacha20_poly1305 = iv;
aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CTR:
ctr = iv;
aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);

View File

@ -47,6 +47,15 @@ fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
if (xfn != NULL)
return -EINVAL;
xform->aead = &xf->aead;
/* GMAC has only auth */
} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xf->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
if (xfn != NULL)
return -EINVAL;
xform->auth = &xf->auth;
xform->cipher = &xfn->cipher;
/*
* CIPHER+AUTH xforms are expected in strict order,
* depending on SA direction:
@ -247,12 +256,13 @@ esp_inb_init(struct rte_ipsec_sa *sa)
sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
/*
* for AEAD and NULL algorithms we can assume that
* for AEAD algorithms we can assume that
* auth and cipher offsets would be equal.
*/
switch (sa->algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_NULL:
case ALGO_TYPE_AES_CCM:
case ALGO_TYPE_CHACHA20_POLY1305:
sa->ctp.auth.raw = sa->ctp.cipher.raw;
break;
default:
@ -294,6 +304,8 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
switch (algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_AES_CCM:
case ALGO_TYPE_CHACHA20_POLY1305:
case ALGO_TYPE_AES_CTR:
case ALGO_TYPE_NULL:
sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
@ -305,15 +317,20 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
sa->ctp.cipher.length = sa->iv_len;
break;
case ALGO_TYPE_AES_GMAC:
sa->ctp.cipher.offset = 0;
sa->ctp.cipher.length = 0;
break;
}
/*
* for AEAD and NULL algorithms we can assume that
* for AEAD algorithms we can assume that
* auth and cipher offsets would be equal.
*/
switch (algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_NULL:
case ALGO_TYPE_AES_CCM:
case ALGO_TYPE_CHACHA20_POLY1305:
sa->ctp.auth.raw = sa->ctp.cipher.raw;
break;
default:
@ -374,13 +391,39 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
sa->pad_align = IPSEC_PAD_AES_GCM;
sa->algo_type = ALGO_TYPE_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
/* RFC 4309 */
sa->aad_len = sizeof(struct aead_ccm_aad);
sa->icv_len = cxf->aead->digest_length;
sa->iv_ofs = cxf->aead->iv.offset;
sa->iv_len = sizeof(uint64_t);
sa->pad_align = IPSEC_PAD_AES_CCM;
sa->algo_type = ALGO_TYPE_AES_CCM;
break;
case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
/* RFC 7634 & 8439*/
sa->aad_len = sizeof(struct aead_chacha20_poly1305_aad);
sa->icv_len = cxf->aead->digest_length;
sa->iv_ofs = cxf->aead->iv.offset;
sa->iv_len = sizeof(uint64_t);
sa->pad_align = IPSEC_PAD_CHACHA20_POLY1305;
sa->algo_type = ALGO_TYPE_CHACHA20_POLY1305;
break;
default:
return -EINVAL;
}
} else if (cxf->auth->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
/* RFC 4543 */
/* AES-GMAC is a special case of auth that needs IV */
sa->pad_align = IPSEC_PAD_AES_GMAC;
sa->iv_len = sizeof(uint64_t);
sa->icv_len = cxf->auth->digest_length;
sa->iv_ofs = cxf->auth->iv.offset;
sa->algo_type = ALGO_TYPE_AES_GMAC;
} else {
sa->icv_len = cxf->auth->digest_length;
sa->iv_ofs = cxf->cipher->iv.offset;
sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
switch (cxf->cipher->algo) {
case RTE_CRYPTO_CIPHER_NULL:
@ -414,6 +457,7 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
}
}
sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
sa->udata = prm->userdata;
sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
sa->salt = prm->ipsec_xform.salt;

View File

@ -19,7 +19,10 @@ enum {
IPSEC_PAD_AES_CBC = IPSEC_MAX_IV_SIZE,
IPSEC_PAD_AES_CTR = IPSEC_PAD_DEFAULT,
IPSEC_PAD_AES_GCM = IPSEC_PAD_DEFAULT,
IPSEC_PAD_AES_CCM = IPSEC_PAD_DEFAULT,
IPSEC_PAD_CHACHA20_POLY1305 = IPSEC_PAD_DEFAULT,
IPSEC_PAD_NULL = IPSEC_PAD_DEFAULT,
IPSEC_PAD_AES_GMAC = IPSEC_PAD_DEFAULT,
};
/* iv sizes for different algorithms */
@ -67,6 +70,9 @@ enum sa_algo_type {
ALGO_TYPE_AES_CBC,
ALGO_TYPE_AES_CTR,
ALGO_TYPE_AES_GCM,
ALGO_TYPE_AES_CCM,
ALGO_TYPE_CHACHA20_POLY1305,
ALGO_TYPE_AES_GMAC,
ALGO_TYPE_MAX
};