ipsec: support CPU crypto mode

Update library to handle CPU cypto security mode which utilizes
cryptodev's synchronous, CPU accelerated crypto operations.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Signed-off-by: Marcin Smoczynski <marcinx.smoczynski@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Marcin Smoczynski 2020-02-04 14:12:55 +01:00 committed by Akhil Goyal
parent 5d6d7e443d
commit 957394f726
8 changed files with 475 additions and 58 deletions

View File

@ -1,5 +1,5 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2018 Intel Corporation.
Copyright(c) 2018-2020 Intel Corporation.
IPsec Packet Processing Library
===============================
@ -81,6 +81,14 @@ In that mode the library functions perform
- verify that crypto device operations (encryption, ICV generation)
were completed successfully
RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In that mode the library functions perform same operations as in
``RTE_SECURITY_ACTION_TYPE_NONE``. The only difference is that crypto operations
are performed with CPU crypto synchronous API.
RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
@ -105,6 +105,39 @@ inb_cop_prepare(struct rte_crypto_op *cop,
}
}
static inline uint32_t
inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
uint32_t *pofs, uint32_t plen, void *iv)
{
struct aead_gcm_iv *gcm;
struct aesctr_cnt_blk *ctr;
uint64_t *ivp;
uint32_t clen;
ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
*pofs + sizeof(struct rte_esp_hdr));
clen = 0;
switch (sa->algo_type) {
case ALGO_TYPE_AES_GCM:
gcm = (struct aead_gcm_iv *)iv;
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CBC:
case ALGO_TYPE_3DES_CBC:
copy_iv(iv, ivp, sa->iv_len);
break;
case ALGO_TYPE_AES_CTR:
ctr = (struct aesctr_cnt_blk *)iv;
aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
break;
}
*pofs += sa->ctp.auth.offset;
clen = plen - sa->ctp.auth.length;
return clen;
}
/*
* Helper function for prepare() to deal with situation when
* ICV is spread by two segments. Tries to move ICV completely into the
@ -157,17 +190,12 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
}
}
/*
* setup/update packet data and metadata for ESP inbound tunnel case.
*/
static inline int32_t
inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
static inline int
inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
{
int32_t rc;
uint64_t sqn;
uint32_t clen, icv_len, icv_ofs, plen;
struct rte_mbuf *ml;
struct rte_esp_hdr *esph;
esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
@ -179,12 +207,21 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
sqn = rte_be_to_cpu_32(esph->seq);
if (IS_ESN(sa))
sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
*sqc = rte_cpu_to_be_64(sqn);
/* check IPsec window */
rc = esn_inb_check_sqn(rsn, sa, sqn);
if (rc != 0)
return rc;
sqn = rte_cpu_to_be_64(sqn);
return rc;
}
/* prepare packet for upcoming processing */
static inline int32_t
inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
uint32_t hlen, union sym_op_data *icv)
{
uint32_t clen, icv_len, icv_ofs, plen;
struct rte_mbuf *ml;
/* start packet manipulation */
plen = mb->pkt_len;
@ -217,7 +254,8 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
icv_ofs += sa->sqh_len;
/* we have to allocate space for AAD somewhere,
/*
* we have to allocate space for AAD somewhere,
* right now - just use free trailing space at the last segment.
* Would probably be more convenient to reserve space for AAD
* inside rte_crypto_op itself
@ -238,10 +276,28 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
mb->pkt_len += sa->sqh_len;
ml->data_len += sa->sqh_len;
inb_pkt_xprepare(sa, sqn, icv);
return plen;
}
static inline int32_t
inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
{
int rc;
rte_be64_t sqn;
rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
if (rc != 0)
return rc;
rc = inb_prepare(sa, mb, hlen, icv);
if (rc < 0)
return rc;
inb_pkt_xprepare(sa, sqn, icv);
return rc;
}
/*
* setup/update packets and crypto ops for ESP inbound case.
*/
@ -270,17 +326,17 @@ esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
lksd_none_cop_prepare(cop[k], cs, mb[i]);
inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
k++;
} else
} else {
dr[i - k] = i;
rte_errno = -rc;
}
}
rsn_release(sa, rsn);
/* copy not prepared mbufs beyond good ones */
if (k != num && k != 0) {
if (k != num && k != 0)
move_bad_mbufs(mb, dr, num, num - k);
rte_errno = EBADMSG;
}
return k;
}
@ -512,7 +568,6 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
return k;
}
/*
* *process* function for tunnel packets
*/
@ -612,7 +667,7 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
if (k != num && k != 0)
move_bad_mbufs(mb, dr, num, num - k);
/* update SQN and replay winow */
/* update SQN and replay window */
n = esp_inb_rsn_update(sa, sqn, dr, k);
/* handle mbufs with wrong SQN */
@ -625,6 +680,67 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
return n;
}
/*
* Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
* (synchronous mode).
*/
uint16_t
cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
int32_t rc;
uint32_t i, k;
struct rte_ipsec_sa *sa;
struct replay_sqn *rsn;
union sym_op_data icv;
void *iv[num];
void *aad[num];
void *dgst[num];
uint32_t dr[num];
uint32_t l4ofs[num];
uint32_t clen[num];
uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
sa = ss->sa;
/* grab rsn lock */
rsn = rsn_acquire(sa);
/* do preparation for all packets */
for (i = 0, k = 0; i != num; i++) {
/* calculate ESP header offset */
l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
/* prepare ESP packet for processing */
rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
if (rc >= 0) {
/* get encrypted data offset and length */
clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
l4ofs + k, rc, ivbuf[k]);
/* fill iv, digest and aad */
iv[k] = ivbuf[k];
aad[k] = icv.va + sa->icv_len;
dgst[k++] = icv.va;
} else {
dr[i - k] = i;
rte_errno = -rc;
}
}
/* release rsn lock */
rsn_release(sa, rsn);
/* copy not prepared mbufs beyond good ones */
if (k != num && k != 0)
move_bad_mbufs(mb, dr, num, num - k);
/* convert mbufs to iovecs and do actual crypto/auth processing */
cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, l4ofs, clen, k);
return k;
}
/*
* process group of ESP inbound tunnel packets.
*/

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
@ -15,6 +15,9 @@
#include "misc.h"
#include "pad.h"
typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
union sym_op_data *icv, uint8_t sqh_len);
/*
* helper function to fill crypto_sym op for cipher+auth algorithms.
@ -177,6 +180,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
espt->pad_len = pdlen;
espt->next_proto = sa->proto;
/* set icv va/pa value(s) */
icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
@ -270,8 +274,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
static inline int32_t
outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
uint32_t l2len, uint32_t l3len, union sym_op_data *icv,
uint8_t sqh_len)
union sym_op_data *icv, uint8_t sqh_len)
{
uint8_t np;
uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
@ -280,6 +283,10 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
struct rte_esp_tail *espt;
char *ph, *pt;
uint64_t *iv;
uint32_t l2len, l3len;
l2len = mb->l2_len;
l3len = mb->l3_len;
uhlen = l2len + l3len;
plen = mb->pkt_len - uhlen;
@ -340,6 +347,7 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
espt->pad_len = pdlen;
espt->next_proto = np;
/* set icv va/pa value(s) */
icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
@ -381,8 +389,8 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
gen_iv(iv, sqc);
/* try to update the packet itself */
rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv,
sa->sqh_len);
rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
sa->sqh_len);
/* success, setup crypto op */
if (rc >= 0) {
outb_pkt_xprepare(sa, sqc, &icv);
@ -403,6 +411,116 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
return k;
}
static inline uint32_t
outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
uint32_t plen, void *iv)
{
uint64_t *ivp = iv;
struct aead_gcm_iv *gcm;
struct aesctr_cnt_blk *ctr;
uint32_t clen;
switch (sa->algo_type) {
case ALGO_TYPE_AES_GCM:
gcm = iv;
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
case ALGO_TYPE_AES_CTR:
ctr = iv;
aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
break;
}
*pofs += sa->ctp.auth.offset;
clen = plen + sa->ctp.auth.length;
return clen;
}
static uint16_t
cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num,
esp_outb_prepare_t prepare, uint32_t cofs_mask)
{
int32_t rc;
uint64_t sqn;
rte_be64_t sqc;
struct rte_ipsec_sa *sa;
uint32_t i, k, n;
uint32_t l2, l3;
union sym_op_data icv;
void *iv[num];
void *aad[num];
void *dgst[num];
uint32_t dr[num];
uint32_t l4ofs[num];
uint32_t clen[num];
uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
sa = ss->sa;
n = num;
sqn = esn_outb_update_sqn(sa, &n);
if (n != num)
rte_errno = EOVERFLOW;
for (i = 0, k = 0; i != n; i++) {
l2 = mb[i]->l2_len;
l3 = mb[i]->l3_len;
/* calculate ESP header offset */
l4ofs[k] = (l2 + l3) & cofs_mask;
sqc = rte_cpu_to_be_64(sqn + i);
gen_iv(ivbuf[k], sqc);
/* try to update the packet itself */
rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
/* success, proceed with preparations */
if (rc >= 0) {
outb_pkt_xprepare(sa, sqc, &icv);
/* get encrypted data offset and length */
clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
ivbuf[k]);
/* fill iv, digest and aad */
iv[k] = ivbuf[k];
aad[k] = icv.va + sa->icv_len;
dgst[k++] = icv.va;
} else {
dr[i - k] = i;
rte_errno = -rc;
}
}
/* copy not prepared mbufs beyond good ones */
if (k != n && k != 0)
move_bad_mbufs(mb, dr, n, n - k);
/* convert mbufs to iovecs and do actual crypto/auth processing */
cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, l4ofs, clen, k);
return k;
}
uint16_t
cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
}
uint16_t
cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
UINT32_MAX);
}
/*
* process outbound packets for SA with ESN support,
* for algorithms that require SQN.hibits to be implictly included
@ -526,7 +644,7 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
int32_t rc;
uint32_t i, k, n, l2, l3;
uint32_t i, k, n;
uint64_t sqn;
rte_be64_t sqc;
struct rte_ipsec_sa *sa;
@ -544,15 +662,11 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
k = 0;
for (i = 0; i != n; i++) {
l2 = mb[i]->l2_len;
l3 = mb[i]->l3_len;
sqc = rte_cpu_to_be_64(sqn + i);
gen_iv(iv, sqc);
/* try to update the packet itself */
rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
l2, l3, &icv, 0);
rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
k += (rc >= 0);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018-2020 Intel Corporation
*/
#ifndef _MISC_H_
@ -105,4 +105,75 @@ mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs,
mb->pkt_len -= len;
}
/*
* process packets using sync crypto engine
*/
static inline void
cpu_crypto_bulk(const struct rte_ipsec_session *ss,
union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[],
void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[],
uint32_t clen[], uint32_t num)
{
uint32_t i, j, n;
int32_t vcnt, vofs;
int32_t st[num];
struct rte_crypto_sgl vecpkt[num];
struct rte_crypto_vec vec[UINT8_MAX];
struct rte_crypto_sym_vec symvec;
const uint32_t vnum = RTE_DIM(vec);
j = 0, n = 0;
vofs = 0;
for (i = 0; i != num; i++) {
vcnt = rte_crypto_mbuf_to_vec(mb[i], l4ofs[i], clen[i],
&vec[vofs], vnum - vofs);
/* not enough space in vec[] to hold all segments */
if (vcnt < 0) {
/* fill the request structure */
symvec.sgl = &vecpkt[j];
symvec.iv = &iv[j];
symvec.aad = &aad[j];
symvec.digest = &dgst[j];
symvec.status = &st[j];
symvec.num = i - j;
/* flush vec array and try again */
n += rte_cryptodev_sym_cpu_crypto_process(
ss->crypto.dev_id, ss->crypto.ses, ofs,
&symvec);
vofs = 0;
vcnt = rte_crypto_mbuf_to_vec(mb[i], l4ofs[i], clen[i],
vec, vnum);
RTE_ASSERT(vcnt > 0);
j = i;
}
vecpkt[i].vec = &vec[vofs];
vecpkt[i].num = vcnt;
vofs += vcnt;
}
/* fill the request structure */
symvec.sgl = &vecpkt[j];
symvec.iv = &iv[j];
symvec.aad = &aad[j];
symvec.digest = &dgst[j];
symvec.status = &st[j];
symvec.num = i - j;
n += rte_cryptodev_sym_cpu_crypto_process(ss->crypto.dev_id,
ss->crypto.ses, ofs, &symvec);
j = num - n;
for (i = 0; j != 0 && i != num; i++) {
if (st[i] != 0) {
mb[i]->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
j--;
}
}
}
#endif /* _MISC_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018-2020 Intel Corporation
*/
#ifndef _RTE_IPSEC_H_
@ -33,10 +33,15 @@ struct rte_ipsec_session;
* (see rte_ipsec_pkt_process for more details).
*/
struct rte_ipsec_sa_pkt_func {
uint16_t (*prepare)(const struct rte_ipsec_session *ss,
union {
uint16_t (*async)(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[],
struct rte_crypto_op *cop[],
uint16_t num);
uint16_t (*sync)(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[],
uint16_t num);
} prepare;
uint16_t (*process)(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[],
uint16_t num);
@ -62,6 +67,7 @@ struct rte_ipsec_session {
union {
struct {
struct rte_cryptodev_sym_session *ses;
uint8_t dev_id;
} crypto;
struct {
struct rte_security_session *ses;
@ -114,7 +120,15 @@ static inline uint16_t
rte_ipsec_pkt_crypto_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
{
return ss->pkt_func.prepare(ss, mb, cop, num);
return ss->pkt_func.prepare.async(ss, mb, cop, num);
}
__rte_experimental
static inline uint16_t
rte_ipsec_pkt_cpu_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
return ss->pkt_func.prepare.sync(ss, mb, num);
}
/**

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
@ -243,10 +243,26 @@ static void
esp_inb_init(struct rte_ipsec_sa *sa)
{
/* these params may differ with new algorithms support */
sa->ctp.auth.offset = 0;
sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
/*
* for AEAD and NULL algorithms we can assume that
* auth and cipher offsets would be equal.
*/
switch (sa->algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_NULL:
sa->ctp.auth.raw = sa->ctp.cipher.raw;
break;
default:
sa->ctp.auth.offset = 0;
sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
sa->cofs.ofs.cipher.tail = sa->sqh_len;
break;
}
sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
}
/*
@ -269,13 +285,13 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
sa->sqn.outb.raw = 1;
/* these params may differ with new algorithms support */
sa->ctp.auth.offset = hlen;
sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + sa->sqh_len;
algo_type = sa->algo_type;
/*
* Setup auth and cipher length and offset.
* these params may differ with new algorithms support
*/
switch (algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_AES_CTR:
@ -286,11 +302,30 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
break;
case ALGO_TYPE_AES_CBC:
case ALGO_TYPE_3DES_CBC:
sa->ctp.cipher.offset = sa->hdr_len +
sizeof(struct rte_esp_hdr);
sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
sa->ctp.cipher.length = sa->iv_len;
break;
}
/*
* for AEAD and NULL algorithms we can assume that
* auth and cipher offsets would be equal.
*/
switch (algo_type) {
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_NULL:
sa->ctp.auth.raw = sa->ctp.cipher.raw;
break;
default:
sa->ctp.auth.offset = hlen;
sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + sa->sqh_len;
break;
}
sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
(sa->ctp.cipher.offset + sa->ctp.cipher.length);
}
/*
@ -544,9 +579,9 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss,
* - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
* - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
*/
static uint16_t
pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
uint16_t num)
uint16_t
pkt_flag_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
uint32_t i, k;
uint32_t dr[num];
@ -588,21 +623,59 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
switch (sa->type & msk) {
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
pf->prepare = esp_inb_pkt_prepare;
pf->prepare.async = esp_inb_pkt_prepare;
pf->process = esp_inb_tun_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
pf->prepare = esp_inb_pkt_prepare;
pf->prepare.async = esp_inb_pkt_prepare;
pf->process = esp_inb_trs_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
pf->prepare = esp_outb_tun_prepare;
pf->prepare.async = esp_outb_tun_prepare;
pf->process = (sa->sqh_len != 0) ?
esp_outb_sqh_process : pkt_flag_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
pf->prepare = esp_outb_trs_prepare;
pf->prepare.async = esp_outb_trs_prepare;
pf->process = (sa->sqh_len != 0) ?
esp_outb_sqh_process : pkt_flag_process;
break;
default:
rc = -ENOTSUP;
}
return rc;
}
static int
cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
struct rte_ipsec_sa_pkt_func *pf)
{
int32_t rc;
static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
RTE_IPSEC_SATP_MODE_MASK;
rc = 0;
switch (sa->type & msk) {
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
pf->prepare.sync = cpu_inb_pkt_prepare;
pf->process = esp_inb_tun_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
pf->prepare.sync = cpu_inb_pkt_prepare;
pf->process = esp_inb_trs_pkt_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
pf->prepare.sync = cpu_outb_tun_pkt_prepare;
pf->process = (sa->sqh_len != 0) ?
esp_outb_sqh_process : pkt_flag_process;
break;
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
pf->prepare.sync = cpu_outb_trs_pkt_prepare;
pf->process = (sa->sqh_len != 0) ?
esp_outb_sqh_process : pkt_flag_process;
break;
@ -660,7 +733,7 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
int32_t rc;
rc = 0;
pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
switch (ss->type) {
case RTE_SECURITY_ACTION_TYPE_NONE:
@ -677,9 +750,12 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
pf->process = inline_proto_outb_pkt_process;
break;
case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
pf->prepare = lksd_proto_prepare;
pf->prepare.async = lksd_proto_prepare;
pf->process = pkt_flag_process;
break;
case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
rc = cpu_crypto_pkt_func_select(sa, pf);
break;
default:
rc = -ENOTSUP;
}

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018-2020 Intel Corporation
*/
#ifndef _SA_H_
@ -88,6 +88,8 @@ struct rte_ipsec_sa {
union sym_op_ofslen cipher;
union sym_op_ofslen auth;
} ctp;
/* cpu-crypto offsets */
union rte_crypto_sym_ofs cofs;
/* tx_offload template for tunnel mbuf */
struct {
uint64_t msk;
@ -156,6 +158,10 @@ uint16_t
inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
uint16_t
cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
/* outbound processing */
uint16_t
@ -170,6 +176,10 @@ uint16_t
esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
uint16_t num);
uint16_t
pkt_flag_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
uint16_t
inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
@ -182,4 +192,11 @@ uint16_t
inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
uint16_t
cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
uint16_t
cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
#endif /* _SA_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018-2020 Intel Corporation
*/
#include <rte_ipsec.h>
@ -11,7 +11,8 @@ session_check(struct rte_ipsec_session *ss)
if (ss == NULL || ss->sa == NULL)
return -EINVAL;
if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
if (ss->crypto.ses == NULL)
return -EINVAL;
} else {