net: add rte prefix to ESP structure

Add 'rte_' prefix to structures:
- rename struct esp_hdr as struct rte_esp_hdr.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Olivier Matz 2019-05-21 18:13:06 +02:00 committed by Ferruh Yigit
parent 35b2d13fd6
commit 5ef2546767
9 changed files with 48 additions and 43 deletions

View File

@ -569,12 +569,12 @@ setup_test_string_tunneled(struct rte_mempool *mpool, const char *string,
size_t len, uint32_t spi, uint32_t seq)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
uint32_t hdrlen = sizeof(struct ipv4_hdr) + sizeof(struct esp_hdr);
uint32_t hdrlen = sizeof(struct ipv4_hdr) + sizeof(struct rte_esp_hdr);
uint32_t taillen = sizeof(struct esp_tail);
uint32_t t_len = len + hdrlen + taillen;
uint32_t padlen;
struct esp_hdr esph = {
struct rte_esp_hdr esph = {
.spi = rte_cpu_to_be_32(spi),
.seq = rte_cpu_to_be_32(seq)
};

View File

@ -49,7 +49,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
}
payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
@ -61,13 +61,14 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->m_src = m;
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
sa->iv_len;
sym_cop->aead.data.offset =
ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = payload_len;
struct cnt_blk *icb;
uint8_t *aad;
uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
sizeof(struct rte_esp_hdr));
icb = get_cnt_blk(m);
icb->salt = sa->salt;
@ -75,7 +76,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
icb->cnt = rte_cpu_to_be_32(1);
aad = get_aad(m);
memcpy(aad, iv - sizeof(struct esp_hdr), 8);
memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
sym_cop->aead.aad.data = aad;
sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
@ -85,12 +86,14 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct rte_esp_hdr) +
sa->iv_len;
sym_cop->cipher.data.length = payload_len;
struct cnt_blk *icb;
uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
sizeof(struct rte_esp_hdr));
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
uint8_t *, IV_OFFSET);
@ -118,7 +121,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + payload_len;
break;
default:
@ -192,7 +195,7 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
if (unlikely(sa->flags == TRANSPORT)) {
ip = rte_pktmbuf_mtod(m, struct ip *);
ip4 = (struct ip *)rte_pktmbuf_adj(m,
sizeof(struct esp_hdr) + sa->iv_len);
sizeof(struct rte_esp_hdr) + sa->iv_len);
if (likely(ip->ip_v == IPVERSION)) {
memmove(ip4, ip, ip->ip_hl * 4);
ip4->ip_p = *nexthdr;
@ -206,7 +209,7 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
sizeof(struct ip6_hdr));
}
} else
ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
return 0;
}
@ -217,7 +220,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
{
struct ip *ip4;
struct ip6_hdr *ip6;
struct esp_hdr *esp = NULL;
struct rte_esp_hdr *esp = NULL;
uint8_t *padding = NULL, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
int32_t i;
@ -268,7 +271,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
}
/* Check maximum packet size */
if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
@ -290,20 +293,20 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
switch (sa->flags) {
case IP4_TUNNEL:
ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
&sa->src, &sa->dst);
esp = (struct esp_hdr *)(ip4 + 1);
esp = (struct rte_esp_hdr *)(ip4 + 1);
break;
case IP6_TUNNEL:
ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
&sa->src, &sa->dst);
esp = (struct esp_hdr *)(ip6 + 1);
esp = (struct rte_esp_hdr *)(ip6 + 1);
break;
case TRANSPORT:
new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
sizeof(struct esp_hdr) + sa->iv_len);
sizeof(struct rte_esp_hdr) + sa->iv_len);
memmove(new_ip, ip4, ip_hdr_len);
esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
ip4 = (struct ip *)new_ip;
if (likely(ip4->ip_v == IPVERSION)) {
ip4->ip_p = IPPROTO_ESP;
@ -362,7 +365,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
uint8_t *aad;
sym_cop->aead.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = pad_payload_len;
/* Fill pad_len using default sequential scheme */
@ -392,12 +395,12 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr);
sizeof(struct rte_esp_hdr);
sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
break;
default:
@ -422,7 +425,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + pad_payload_len;
break;
default:

View File

@ -1219,7 +1219,7 @@ static inline void
single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
struct ipsec_sa **sa_ret)
{
struct esp_hdr *esp;
struct rte_esp_hdr *esp;
struct ip *ip;
uint32_t *src4_addr;
uint8_t *src6_addr;
@ -1229,9 +1229,9 @@ single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
ip = rte_pktmbuf_mtod(pkt, struct ip *);
if (ip->ip_v == IPVERSION)
esp = (struct esp_hdr *)(ip + 1);
esp = (struct rte_esp_hdr *)(ip + 1);
else
esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
esp = (struct rte_esp_hdr *)(((struct ip6_hdr *)ip) + 1);
if (esp->spi == INVALID_SPI)
return;

View File

@ -915,7 +915,7 @@ static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
* Matches an ESP header.
*/
struct rte_flow_item_esp {
struct esp_hdr hdr; /**< ESP header definition. */
struct rte_esp_hdr hdr; /**< ESP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ESP. */

View File

@ -47,7 +47,7 @@ struct aead_gcm_aad {
} __attribute__((packed));
struct gcm_esph_iv {
struct esp_hdr esph;
struct rte_esp_hdr esph;
uint64_t iv;
} __attribute__((packed));

View File

@ -68,7 +68,7 @@ inb_cop_prepare(struct rte_crypto_op *cop,
algo = sa->algo_type;
ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
pofs + sizeof(struct esp_hdr));
pofs + sizeof(struct rte_esp_hdr));
/* fill sym op fields */
sop = cop->sym;
@ -139,9 +139,9 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
uint64_t sqn;
uint32_t clen, icv_ofs, plen;
struct rte_mbuf *ml;
struct esp_hdr *esph;
struct rte_esp_hdr *esph;
esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
/*
* retrieve and reconstruct SQN, then check it, then
@ -295,10 +295,10 @@ static inline void *
tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen,
uint32_t adj, uint32_t tlen, uint32_t *sqn)
{
const struct esp_hdr *ph;
const struct rte_esp_hdr *ph;
/* read SQN value */
ph = rte_pktmbuf_mtod_offset(mb, const struct esp_hdr *, hlen);
ph = rte_pktmbuf_mtod_offset(mb, const struct rte_esp_hdr *, hlen);
sqn[0] = ph->seq;
/* cut of ICV, ESP tail and padding bytes */

View File

@ -108,7 +108,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
{
uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
struct rte_mbuf *ml;
struct esp_hdr *esph;
struct rte_esp_hdr *esph;
struct esp_tail *espt;
char *ph, *pt;
uint64_t *iv;
@ -156,7 +156,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
sqn_low16(sqc));
/* update spi, seqn and iv */
esph = (struct esp_hdr *)(ph + sa->hdr_len);
esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
iv = (uint64_t *)(esph + 1);
copy_iv(iv, ivp, sa->iv_len);
@ -275,7 +275,7 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
uint8_t np;
uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
struct rte_mbuf *ml;
struct esp_hdr *esph;
struct rte_esp_hdr *esph;
struct esp_tail *espt;
char *ph, *pt;
uint64_t *iv;
@ -318,7 +318,7 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
IPPROTO_ESP);
/* update spi, seqn and iv */
esph = (struct esp_hdr *)(ph + uhlen);
esph = (struct rte_esp_hdr *)(ph + uhlen);
iv = (uint64_t *)(esph + 1);
copy_iv(iv, ivp, sa->iv_len);

View File

@ -233,7 +233,7 @@ esp_inb_init(struct rte_ipsec_sa *sa)
/* these params may differ with new algorithms support */
sa->ctp.auth.offset = 0;
sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
}
@ -259,7 +259,8 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
/* these params may differ with new algorithms support */
sa->ctp.auth.offset = hlen;
sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + sa->sqh_len;
algo_type = sa->algo_type;
@ -267,13 +268,14 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
case ALGO_TYPE_AES_GCM:
case ALGO_TYPE_AES_CTR:
case ALGO_TYPE_NULL:
sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
sa->iv_len;
sa->ctp.cipher.length = 0;
break;
case ALGO_TYPE_AES_CBC:
case ALGO_TYPE_3DES_CBC:
sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
sa->ctp.cipher.offset = sa->hdr_len +
sizeof(struct rte_esp_hdr);
sa->ctp.cipher.length = sa->iv_len;
break;
}

View File

@ -20,7 +20,7 @@ extern "C" {
/**
* ESP Header
*/
struct esp_hdr {
struct rte_esp_hdr {
rte_be32_t spi; /**< Security Parameters Index */
rte_be32_t seq; /**< packet sequence number */
} __attribute__((__packed__));