ipsec: implement SA data-path API

Provide implementation for rte_ipsec_pkt_crypto_prepare() and
rte_ipsec_pkt_process().
Current implementation:
 - supports ESP protocol tunnel mode.
 - supports ESP protocol transport mode.
 - supports ESN and replay window.
 - supports algorithms: AES-CBC, AES-GCM, HMAC-SHA1, NULL.
 - covers all currently defined security session types:
        - RTE_SECURITY_ACTION_TYPE_NONE
        - RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
        - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
        - RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL

For first two types SQN check/update is done by SW (inside the library).
For last two type it is HW/PMD responsibility.

Signed-off-by: Mohammad Abdul Awal <mohammad.abdul.awal@intel.com>
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Konstantin Ananyev 2019-01-10 21:06:30 +00:00 committed by Pablo de Lara
parent 1e0ad1e36d
commit 4d7ea3e145
5 changed files with 1574 additions and 7 deletions

123
lib/librte_ipsec/crypto.h Normal file
View File

@ -0,0 +1,123 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
*/
#ifndef _CRYPTO_H_
#define _CRYPTO_H_
/**
* @file crypto.h
* Contains crypto specific functions/structures/macros used internally
* by ipsec library.
*/
/*
* AES-GCM devices have some specific requirements for IV and AAD formats.
* Ideally that to be done by the driver itself.
*/
struct aead_gcm_iv {
uint32_t salt;
uint64_t iv;
uint32_t cnt;
} __attribute__((packed));
struct aead_gcm_aad {
uint32_t spi;
/*
* RFC 4106, section 5:
* Two formats of the AAD are defined:
* one for 32-bit sequence numbers, and one for 64-bit ESN.
*/
union {
uint32_t u32[2];
uint64_t u64;
} sqn;
uint32_t align0; /* align to 16B boundary */
} __attribute__((packed));
struct gcm_esph_iv {
struct esp_hdr esph;
uint64_t iv;
} __attribute__((packed));
static inline void
aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)
{
gcm->salt = salt;
gcm->iv = iv;
gcm->cnt = rte_cpu_to_be_32(1);
}
/*
* RFC 4106, 5 AAD Construction
* spi and sqn should already be converted into network byte order.
* Make sure that not used bytes are zeroed.
*/
static inline void
aead_gcm_aad_fill(struct aead_gcm_aad *aad, rte_be32_t spi, rte_be64_t sqn,
int esn)
{
aad->spi = spi;
if (esn)
aad->sqn.u64 = sqn;
else {
aad->sqn.u32[0] = sqn_low32(sqn);
aad->sqn.u32[1] = 0;
}
aad->align0 = 0;
}
static inline void
gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)
{
iv[0] = sqn;
iv[1] = 0;
}
/*
* from RFC 4303 3.3.2.1.4:
* If the ESN option is enabled for the SA, the high-order 32
* bits of the sequence number are appended after the Next Header field
* for purposes of this computation, but are not transmitted.
*/
/*
* Helper function that moves ICV by 4B below, and inserts SQN.hibits.
* icv parameter points to the new start of ICV.
*/
static inline void
insert_sqh(uint32_t sqh, void *picv, uint32_t icv_len)
{
uint32_t *icv;
int32_t i;
RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
icv = picv;
icv_len = icv_len / sizeof(uint32_t);
for (i = icv_len; i-- != 0; icv[i] = icv[i - 1])
;
icv[i] = sqh;
}
/*
* Helper function that moves ICV by 4B up, and removes SQN.hibits.
* icv parameter points to the new start of ICV.
*/
static inline void
remove_sqh(void *picv, uint32_t icv_len)
{
uint32_t i, *icv;
RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
icv = picv;
icv_len = icv_len / sizeof(uint32_t);
for (i = 0; i != icv_len; i++)
icv[i] = icv[i + 1];
}
#endif /* _CRYPTO_H_ */

84
lib/librte_ipsec/iph.h Normal file
View File

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
*/
#ifndef _IPH_H_
#define _IPH_H_
/**
* @file iph.h
* Contains functions/structures/macros to manipulate IPv4/IPv6 headers
* used internally by ipsec library.
*/
/*
* Move preceding (L3) headers down to remove ESP header and IV.
*/
static inline void
remove_esph(char *np, char *op, uint32_t hlen)
{
uint32_t i;
for (i = hlen; i-- != 0; np[i] = op[i])
;
}
/*
* Move preceding (L3) headers up to free space for ESP header and IV.
*/
static inline void
insert_esph(char *np, char *op, uint32_t hlen)
{
uint32_t i;
for (i = 0; i != hlen; i++)
np[i] = op[i];
}
/* update original ip header fields for transport case */
static inline int
update_trs_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,
uint32_t l2len, uint32_t l3len, uint8_t proto)
{
struct ipv4_hdr *v4h;
struct ipv6_hdr *v6h;
int32_t rc;
if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
v4h = p;
rc = v4h->next_proto_id;
v4h->next_proto_id = proto;
v4h->total_length = rte_cpu_to_be_16(plen - l2len);
} else if (l3len == sizeof(*v6h)) {
v6h = p;
rc = v6h->proto;
v6h->proto = proto;
v6h->payload_len = rte_cpu_to_be_16(plen - l2len -
sizeof(*v6h));
/* need to add support for IPv6 with options */
} else
rc = -ENOTSUP;
return rc;
}
/* update original and new ip header fields for tunnel case */
static inline void
update_tun_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,
uint32_t l2len, rte_be16_t pid)
{
struct ipv4_hdr *v4h;
struct ipv6_hdr *v6h;
if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) {
v4h = p;
v4h->packet_id = pid;
v4h->total_length = rte_cpu_to_be_16(plen - l2len);
} else {
v6h = p;
v6h->payload_len = rte_cpu_to_be_16(plen - l2len -
sizeof(*v6h));
}
}
#endif /* _IPH_H_ */

View File

@ -15,6 +15,45 @@
#define IS_ESN(sa) ((sa)->sqn_mask == UINT64_MAX)
/*
* gets SQN.hi32 bits, SQN supposed to be in network byte order.
*/
static inline rte_be32_t
sqn_hi32(rte_be64_t sqn)
{
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
return (sqn >> 32);
#else
return sqn;
#endif
}
/*
* gets SQN.low32 bits, SQN supposed to be in network byte order.
*/
static inline rte_be32_t
sqn_low32(rte_be64_t sqn)
{
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
return sqn;
#else
return (sqn >> 32);
#endif
}
/*
* gets SQN.low16 bits, SQN supposed to be in network byte order.
*/
static inline rte_be16_t
sqn_low16(rte_be64_t sqn)
{
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
return sqn;
#else
return (sqn >> 48);
#endif
}
/*
* for given size, calculate required number of buckets.
*/
@ -30,6 +69,153 @@ replay_num_bucket(uint32_t wsz)
return nb;
}
/*
* According to RFC4303 A2.1, determine the high-order bit of sequence number.
* use 32bit arithmetic inside, return uint64_t.
*/
static inline uint64_t
reconstruct_esn(uint64_t t, uint32_t sqn, uint32_t w)
{
uint32_t th, tl, bl;
tl = t;
th = t >> 32;
bl = tl - w + 1;
/* case A: window is within one sequence number subspace */
if (tl >= (w - 1))
th += (sqn < bl);
/* case B: window spans two sequence number subspaces */
else if (th != 0)
th -= (sqn >= bl);
/* return constructed sequence with proper high-order bits */
return (uint64_t)th << 32 | sqn;
}
/**
* Perform the replay checking.
*
* struct rte_ipsec_sa contains the window and window related parameters,
* such as the window size, bitmask, and the last acknowledged sequence number.
*
* Based on RFC 6479.
* Blocks are 64 bits unsigned integers
*/
static inline int32_t
esn_inb_check_sqn(const struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
uint64_t sqn)
{
uint32_t bit, bucket;
/* replay not enabled */
if (sa->replay.win_sz == 0)
return 0;
/* seq is larger than lastseq */
if (sqn > rsn->sqn)
return 0;
/* seq is outside window */
if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
return -EINVAL;
/* seq is inside the window */
bit = sqn & WINDOW_BIT_LOC_MASK;
bucket = (sqn >> WINDOW_BUCKET_BITS) & sa->replay.bucket_index_mask;
/* already seen packet */
if (rsn->window[bucket] & ((uint64_t)1 << bit))
return -EINVAL;
return 0;
}
/**
* For outbound SA perform the sequence number update.
*/
static inline uint64_t
esn_outb_update_sqn(struct rte_ipsec_sa *sa, uint32_t *num)
{
uint64_t n, s, sqn;
n = *num;
sqn = sa->sqn.outb + n;
sa->sqn.outb = sqn;
/* overflow */
if (sqn > sa->sqn_mask) {
s = sqn - sa->sqn_mask;
*num = (s < n) ? n - s : 0;
}
return sqn - n;
}
/**
* For inbound SA perform the sequence number and replay window update.
*/
static inline int32_t
esn_inb_update_sqn(struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
uint64_t sqn)
{
uint32_t bit, bucket, last_bucket, new_bucket, diff, i;
/* replay not enabled */
if (sa->replay.win_sz == 0)
return 0;
/* handle ESN */
if (IS_ESN(sa))
sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
/* seq is outside window*/
if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
return -EINVAL;
/* update the bit */
bucket = (sqn >> WINDOW_BUCKET_BITS);
/* check if the seq is within the range */
if (sqn > rsn->sqn) {
last_bucket = rsn->sqn >> WINDOW_BUCKET_BITS;
diff = bucket - last_bucket;
/* seq is way after the range of WINDOW_SIZE */
if (diff > sa->replay.nb_bucket)
diff = sa->replay.nb_bucket;
for (i = 0; i != diff; i++) {
new_bucket = (i + last_bucket + 1) &
sa->replay.bucket_index_mask;
rsn->window[new_bucket] = 0;
}
rsn->sqn = sqn;
}
bucket &= sa->replay.bucket_index_mask;
bit = (uint64_t)1 << (sqn & WINDOW_BIT_LOC_MASK);
/* already seen packet */
if (rsn->window[bucket] & bit)
return -EINVAL;
rsn->window[bucket] |= bit;
return 0;
}
/**
* To achieve ability to do multiple readers single writer for
* SA replay window information and sequence number (RSN)
* basic RCU schema is used:
* SA have 2 copies of RSN (one for readers, another for writers).
* Each RSN contains a rwlock that has to be grabbed (for read/write)
* to avoid races between readers and writer.
* Writer is responsible to make a copy or reader RSN, update it
* and mark newly updated RSN as readers one.
* That approach is intended to minimize contention and cache sharing
* between writer and readers.
*/
/**
* Based on number of buckets calculated required size for the
* structure that holds replay window and sequence number (RSN) information.

45
lib/librte_ipsec/pad.h Normal file
View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
*/
#ifndef _PAD_H_
#define _PAD_H_
#define IPSEC_MAX_PAD_SIZE UINT8_MAX
static const uint8_t esp_pad_bytes[IPSEC_MAX_PAD_SIZE] = {
1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72,
73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 96,
97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128,
129, 130, 131, 132, 133, 134, 135, 136,
137, 138, 139, 140, 141, 142, 143, 144,
145, 146, 147, 148, 149, 150, 151, 152,
153, 154, 155, 156, 157, 158, 159, 160,
161, 162, 163, 164, 165, 166, 167, 168,
169, 170, 171, 172, 173, 174, 175, 176,
177, 178, 179, 180, 181, 182, 183, 184,
185, 186, 187, 188, 189, 190, 191, 192,
193, 194, 195, 196, 197, 198, 199, 200,
201, 202, 203, 204, 205, 206, 207, 208,
209, 210, 211, 212, 213, 214, 215, 216,
217, 218, 219, 220, 221, 222, 223, 224,
225, 226, 227, 228, 229, 230, 231, 232,
233, 234, 235, 236, 237, 238, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248,
249, 250, 251, 252, 253, 254, 255,
};
#endif /* _PAD_H_ */

File diff suppressed because it is too large Load Diff